1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
31 #include "insn-config.h"
33 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "addresses.h"
38 /* Forward declarations */
39 static void set_of_1 (rtx
, const_rtx
, void *);
40 static bool covers_regno_p (const_rtx
, unsigned int);
41 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
42 static int computed_jump_p_1 (const_rtx
);
43 static void parms_set (rtx
, const_rtx
, void *);
45 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
46 const_rtx
, machine_mode
,
47 unsigned HOST_WIDE_INT
);
48 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
49 const_rtx
, machine_mode
,
50 unsigned HOST_WIDE_INT
);
51 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
54 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
55 machine_mode
, unsigned int);
57 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
58 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
60 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
61 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
62 SIGN_EXTEND then while narrowing we also have to enforce the
63 representation and sign-extend the value to mode DESTINATION_REP.
65 If the value is already sign-extended to DESTINATION_REP mode we
66 can just switch to DESTINATION mode on it. For each pair of
67 integral modes SOURCE and DESTINATION, when truncating from SOURCE
68 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
69 contains the number of high-order bits in SOURCE that have to be
70 copies of the sign-bit so that we can do this mode-switch to
74 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
76 /* Store X into index I of ARRAY. ARRAY is known to have at least I
77 elements. Return the new base of ARRAY. */
80 typename
T::value_type
*
81 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
83 size_t i
, value_type x
)
85 if (base
== array
.stack
)
92 gcc_checking_assert (i
== LOCAL_ELEMS
);
93 /* A previous iteration might also have moved from the stack to the
94 heap, in which case the heap array will already be big enough. */
95 if (vec_safe_length (array
.heap
) <= i
)
96 vec_safe_grow (array
.heap
, i
+ 1);
97 base
= array
.heap
->address ();
98 memcpy (base
, array
.stack
, sizeof (array
.stack
));
99 base
[LOCAL_ELEMS
] = x
;
102 unsigned int length
= array
.heap
->length ();
105 gcc_checking_assert (base
== array
.heap
->address ());
111 gcc_checking_assert (i
== length
);
112 vec_safe_push (array
.heap
, x
);
113 return array
.heap
->address ();
117 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
118 number of elements added to the worklist. */
120 template <typename T
>
122 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
124 size_t end
, rtx_type x
)
126 enum rtx_code code
= GET_CODE (x
);
127 const char *format
= GET_RTX_FORMAT (code
);
128 size_t orig_end
= end
;
129 if (__builtin_expect (INSN_P (x
), false))
131 /* Put the pattern at the top of the queue, since that's what
132 we're likely to want most. It also allows for the SEQUENCE
134 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
135 if (format
[i
] == 'e')
137 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
138 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
141 base
= add_single_to_queue (array
, base
, end
++, subx
);
145 for (int i
= 0; format
[i
]; ++i
)
146 if (format
[i
] == 'e')
148 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
149 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
152 base
= add_single_to_queue (array
, base
, end
++, subx
);
154 else if (format
[i
] == 'E')
156 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
157 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
158 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
159 for (unsigned int j
= 0; j
< length
; j
++)
160 base
[end
++] = T::get_value (vec
[j
]);
162 for (unsigned int j
= 0; j
< length
; j
++)
163 base
= add_single_to_queue (array
, base
, end
++,
164 T::get_value (vec
[j
]));
165 if (code
== SEQUENCE
&& end
== length
)
166 /* If the subrtxes of the sequence fill the entire array then
167 we know that no other parts of a containing insn are queued.
168 The caller is therefore iterating over the sequence as a
169 PATTERN (...), so we also want the patterns of the
171 for (unsigned int j
= 0; j
< length
; j
++)
173 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
175 base
[j
] = T::get_value (PATTERN (x
));
178 return end
- orig_end
;
181 template <typename T
>
183 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
185 vec_free (array
.heap
);
188 template <typename T
>
189 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
191 template class generic_subrtx_iterator
<const_rtx_accessor
>;
192 template class generic_subrtx_iterator
<rtx_var_accessor
>;
193 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
195 /* Return 1 if the value of X is unstable
196 (would be different at a different point in the program).
197 The frame pointer, arg pointer, etc. are considered stable
198 (within one function) and so is anything marked `unchanging'. */
201 rtx_unstable_p (const_rtx x
)
203 const RTX_CODE code
= GET_CODE (x
);
210 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
219 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
220 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
221 /* The arg pointer varies if it is not a fixed register. */
222 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
224 /* ??? When call-clobbered, the value is stable modulo the restore
225 that must happen after a call. This currently screws up local-alloc
226 into believing that the restore is not needed. */
227 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
232 if (MEM_VOLATILE_P (x
))
241 fmt
= GET_RTX_FORMAT (code
);
242 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
245 if (rtx_unstable_p (XEXP (x
, i
)))
248 else if (fmt
[i
] == 'E')
251 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
252 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
259 /* Return 1 if X has a value that can vary even between two
260 executions of the program. 0 means X can be compared reliably
261 against certain constants or near-constants.
262 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
263 zero, we are slightly more conservative.
264 The frame pointer and the arg pointer are considered constant. */
267 rtx_varies_p (const_rtx x
, bool for_alias
)
280 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
289 /* Note that we have to test for the actual rtx used for the frame
290 and arg pointers and not just the register number in case we have
291 eliminated the frame and/or arg pointer and are using it
293 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
294 /* The arg pointer varies if it is not a fixed register. */
295 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
297 if (x
== pic_offset_table_rtx
298 /* ??? When call-clobbered, the value is stable modulo the restore
299 that must happen after a call. This currently screws up
300 local-alloc into believing that the restore is not needed, so we
301 must return 0 only if we are called from alias analysis. */
302 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
307 /* The operand 0 of a LO_SUM is considered constant
308 (in fact it is related specifically to operand 1)
309 during alias analysis. */
310 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
311 || rtx_varies_p (XEXP (x
, 1), for_alias
);
314 if (MEM_VOLATILE_P (x
))
323 fmt
= GET_RTX_FORMAT (code
);
324 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
327 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
330 else if (fmt
[i
] == 'E')
333 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
334 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
341 /* Compute an approximation for the offset between the register
342 FROM and TO for the current function, as it was at the start
346 get_initial_register_offset (int from
, int to
)
348 #ifdef ELIMINABLE_REGS
349 static const struct elim_table_t
353 } table
[] = ELIMINABLE_REGS
;
354 HOST_WIDE_INT offset1
, offset2
;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed
)
365 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
369 if (to
== STACK_POINTER_REGNUM
)
371 else if (from
== STACK_POINTER_REGNUM
)
377 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
378 if (table
[i
].from
== from
)
380 if (table
[i
].to
== to
)
382 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
386 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
388 if (table
[j
].to
== to
389 && table
[j
].from
== table
[i
].to
)
391 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
393 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
395 return offset1
+ offset2
;
397 if (table
[j
].from
== to
398 && table
[j
].to
== table
[i
].to
)
400 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
402 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
404 return offset1
- offset2
;
408 else if (table
[i
].to
== from
)
410 if (table
[i
].from
== to
)
412 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
416 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
418 if (table
[j
].to
== to
419 && table
[j
].from
== table
[i
].from
)
421 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
423 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
425 return - offset1
+ offset2
;
427 if (table
[j
].from
== to
428 && table
[j
].to
== table
[i
].from
)
430 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
432 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
434 return - offset1
- offset2
;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from
== ARG_POINTER_REGNUM
)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
443 else if (to
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
445 else if (from
== HARD_FRAME_POINTER_REGNUM
)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
447 else if (to
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
453 HOST_WIDE_INT offset
;
458 if (reload_completed
)
460 INITIAL_FRAME_POINTER_OFFSET (offset
);
464 offset
= crtl
->outgoing_args_size
+ get_frame_size ();
465 #if !STACK_GROWS_DOWNWARD
470 if (to
== STACK_POINTER_REGNUM
)
472 else if (from
== STACK_POINTER_REGNUM
)
480 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
481 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
482 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
483 references on strict alignment machines. */
486 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
487 machine_mode mode
, bool unaligned_mems
)
489 enum rtx_code code
= GET_CODE (x
);
491 /* The offset must be a multiple of the mode size if we are considering
492 unaligned memory references on strict alignment machines. */
493 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
495 HOST_WIDE_INT actual_offset
= offset
;
497 #ifdef SPARC_STACK_BOUNDARY_HACK
498 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
499 the real alignment of %sp. However, when it does this, the
500 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
501 if (SPARC_STACK_BOUNDARY_HACK
502 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
503 actual_offset
-= STACK_POINTER_OFFSET
;
506 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
513 if (SYMBOL_REF_WEAK (x
))
515 if (!CONSTANT_POOL_ADDRESS_P (x
))
518 HOST_WIDE_INT decl_size
;
523 size
= GET_MODE_SIZE (mode
);
527 /* If the size of the access or of the symbol is unknown,
529 decl
= SYMBOL_REF_DECL (x
);
531 /* Else check that the access is in bounds. TODO: restructure
532 expr_size/tree_expr_size/int_expr_size and just use the latter. */
535 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
536 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
537 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
539 else if (TREE_CODE (decl
) == STRING_CST
)
540 decl_size
= TREE_STRING_LENGTH (decl
);
541 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
542 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
546 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
555 /* Stack references are assumed not to trap, but we need to deal with
556 nonsensical offsets. */
557 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
558 || x
== stack_pointer_rtx
559 /* The arg pointer varies if it is not a fixed register. */
560 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
563 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
565 HOST_WIDE_INT red_zone_size
= 0;
567 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
569 HOST_WIDE_INT low_bound
, high_bound
;
572 size
= GET_MODE_SIZE (mode
);
574 if (x
== frame_pointer_rtx
)
576 if (FRAME_GROWS_DOWNWARD
)
578 high_bound
= STARTING_FRAME_OFFSET
;
579 low_bound
= high_bound
- get_frame_size ();
583 low_bound
= STARTING_FRAME_OFFSET
;
584 high_bound
= low_bound
+ get_frame_size ();
587 else if (x
== hard_frame_pointer_rtx
)
589 HOST_WIDE_INT sp_offset
590 = get_initial_register_offset (STACK_POINTER_REGNUM
,
591 HARD_FRAME_POINTER_REGNUM
);
592 HOST_WIDE_INT ap_offset
593 = get_initial_register_offset (ARG_POINTER_REGNUM
,
594 HARD_FRAME_POINTER_REGNUM
);
596 #if STACK_GROWS_DOWNWARD
597 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
598 high_bound
= ap_offset
599 + FIRST_PARM_OFFSET (current_function_decl
)
600 #if !ARGS_GROW_DOWNWARD
605 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
606 low_bound
= ap_offset
607 + FIRST_PARM_OFFSET (current_function_decl
)
608 #if ARGS_GROW_DOWNWARD
614 else if (x
== stack_pointer_rtx
)
616 HOST_WIDE_INT ap_offset
617 = get_initial_register_offset (ARG_POINTER_REGNUM
,
618 STACK_POINTER_REGNUM
);
620 #if STACK_GROWS_DOWNWARD
621 low_bound
= - red_zone_size
- stack_boundary
;
622 high_bound
= ap_offset
623 + FIRST_PARM_OFFSET (current_function_decl
)
624 #if !ARGS_GROW_DOWNWARD
629 high_bound
= red_zone_size
+ stack_boundary
;
630 low_bound
= ap_offset
631 + FIRST_PARM_OFFSET (current_function_decl
)
632 #if ARGS_GROW_DOWNWARD
640 /* We assume that accesses are safe to at least the
642 Examples are varargs and __builtin_return_address. */
643 #if ARGS_GROW_DOWNWARD
644 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
646 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
647 - crtl
->args
.size
- stack_boundary
;
649 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
651 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
652 + crtl
->args
.size
+ stack_boundary
;
656 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
660 /* All of the virtual frame registers are stack references. */
661 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
662 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
667 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
668 mode
, unaligned_mems
);
671 /* An address is assumed not to trap if:
672 - it is the pic register plus a constant. */
673 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
676 /* - or it is an address that can't trap plus a constant integer. */
677 if (CONST_INT_P (XEXP (x
, 1))
678 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
679 size
, mode
, unaligned_mems
))
686 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
687 mode
, unaligned_mems
);
694 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
695 mode
, unaligned_mems
);
701 /* If it isn't one of the case above, it can cause a trap. */
705 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
708 rtx_addr_can_trap_p (const_rtx x
)
710 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
713 /* Return true if X is an address that is known to not be zero. */
716 nonzero_address_p (const_rtx x
)
718 const enum rtx_code code
= GET_CODE (x
);
723 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
729 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
730 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
731 || x
== stack_pointer_rtx
732 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
734 /* All of the virtual frame registers are stack references. */
735 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
736 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
741 return nonzero_address_p (XEXP (x
, 0));
744 /* Handle PIC references. */
745 if (XEXP (x
, 0) == pic_offset_table_rtx
746 && CONSTANT_P (XEXP (x
, 1)))
751 /* Similar to the above; allow positive offsets. Further, since
752 auto-inc is only allowed in memories, the register must be a
754 if (CONST_INT_P (XEXP (x
, 1))
755 && INTVAL (XEXP (x
, 1)) > 0)
757 return nonzero_address_p (XEXP (x
, 0));
760 /* Similarly. Further, the offset is always positive. */
767 return nonzero_address_p (XEXP (x
, 0));
770 return nonzero_address_p (XEXP (x
, 1));
776 /* If it isn't one of the case above, might be zero. */
780 /* Return 1 if X refers to a memory location whose address
781 cannot be compared reliably with constant addresses,
782 or if X refers to a BLKmode memory object.
783 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
784 zero, we are slightly more conservative. */
787 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
798 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
800 fmt
= GET_RTX_FORMAT (code
);
801 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
804 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
807 else if (fmt
[i
] == 'E')
810 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
811 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
817 /* Return the CALL in X if there is one. */
820 get_call_rtx_from (rtx x
)
824 if (GET_CODE (x
) == PARALLEL
)
825 x
= XVECEXP (x
, 0, 0);
826 if (GET_CODE (x
) == SET
)
828 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
833 /* Return the value of the integer term in X, if one is apparent;
835 Only obvious integer terms are detected.
836 This is used in cse.c with the `related_value' field. */
839 get_integer_term (const_rtx x
)
841 if (GET_CODE (x
) == CONST
)
844 if (GET_CODE (x
) == MINUS
845 && CONST_INT_P (XEXP (x
, 1)))
846 return - INTVAL (XEXP (x
, 1));
847 if (GET_CODE (x
) == PLUS
848 && CONST_INT_P (XEXP (x
, 1)))
849 return INTVAL (XEXP (x
, 1));
853 /* If X is a constant, return the value sans apparent integer term;
855 Only obvious integer terms are detected. */
858 get_related_value (const_rtx x
)
860 if (GET_CODE (x
) != CONST
)
863 if (GET_CODE (x
) == PLUS
864 && CONST_INT_P (XEXP (x
, 1)))
866 else if (GET_CODE (x
) == MINUS
867 && CONST_INT_P (XEXP (x
, 1)))
872 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
873 to somewhere in the same object or object_block as SYMBOL. */
876 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
880 if (GET_CODE (symbol
) != SYMBOL_REF
)
888 if (CONSTANT_POOL_ADDRESS_P (symbol
)
889 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
892 decl
= SYMBOL_REF_DECL (symbol
);
893 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
897 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
898 && SYMBOL_REF_BLOCK (symbol
)
899 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
900 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
901 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
907 /* Split X into a base and a constant offset, storing them in *BASE_OUT
908 and *OFFSET_OUT respectively. */
911 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
913 if (GET_CODE (x
) == CONST
)
916 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
918 *base_out
= XEXP (x
, 0);
919 *offset_out
= XEXP (x
, 1);
924 *offset_out
= const0_rtx
;
927 /* Return the number of places FIND appears within X. If COUNT_DEST is
928 zero, we do not count occurrences inside the destination of a SET. */
931 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
935 const char *format_ptr
;
954 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
956 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
960 if (MEM_P (find
) && rtx_equal_p (x
, find
))
965 if (SET_DEST (x
) == find
&& ! count_dest
)
966 return count_occurrences (SET_SRC (x
), find
, count_dest
);
973 format_ptr
= GET_RTX_FORMAT (code
);
976 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
978 switch (*format_ptr
++)
981 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
985 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
986 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
994 /* Return TRUE if OP is a register or subreg of a register that
995 holds an unsigned quantity. Otherwise, return FALSE. */
998 unsigned_reg_p (rtx op
)
1002 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1005 if (GET_CODE (op
) == SUBREG
1006 && SUBREG_PROMOTED_SIGN (op
))
1013 /* Nonzero if register REG appears somewhere within IN.
1014 Also works if REG is not a register; in this case it checks
1015 for a subexpression of IN that is Lisp "equal" to REG. */
1018 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1030 if (GET_CODE (in
) == LABEL_REF
)
1031 return reg
== LABEL_REF_LABEL (in
);
1033 code
= GET_CODE (in
);
1037 /* Compare registers by number. */
1039 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1041 /* These codes have no constituent expressions
1049 /* These are kept unique for a given value. */
1056 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1059 fmt
= GET_RTX_FORMAT (code
);
1061 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1066 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1067 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1070 else if (fmt
[i
] == 'e'
1071 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1077 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1078 no CODE_LABEL insn. */
1081 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1086 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1092 /* Nonzero if register REG is used in an insn between
1093 FROM_INSN and TO_INSN (exclusive of those two). */
1096 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1097 const rtx_insn
*to_insn
)
1101 if (from_insn
== to_insn
)
1104 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1105 if (NONDEBUG_INSN_P (insn
)
1106 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1107 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1112 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1113 is entirely replaced by a new value and the only use is as a SET_DEST,
1114 we do not consider it a reference. */
1117 reg_referenced_p (const_rtx x
, const_rtx body
)
1121 switch (GET_CODE (body
))
1124 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1127 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1128 of a REG that occupies all of the REG, the insn references X if
1129 it is mentioned in the destination. */
1130 if (GET_CODE (SET_DEST (body
)) != CC0
1131 && GET_CODE (SET_DEST (body
)) != PC
1132 && !REG_P (SET_DEST (body
))
1133 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1134 && REG_P (SUBREG_REG (SET_DEST (body
)))
1135 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1136 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1137 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1138 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1139 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1144 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1145 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1152 return reg_overlap_mentioned_p (x
, body
);
1155 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1158 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1161 case UNSPEC_VOLATILE
:
1162 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1163 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1168 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1169 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1174 if (MEM_P (XEXP (body
, 0)))
1175 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1180 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1182 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1189 /* Nonzero if register REG is set or clobbered in an insn between
1190 FROM_INSN and TO_INSN (exclusive of those two). */
1193 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1194 const rtx_insn
*to_insn
)
1196 const rtx_insn
*insn
;
1198 if (from_insn
== to_insn
)
1201 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1202 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1207 /* Return true if REG is set or clobbered inside INSN. */
1210 reg_set_p (const_rtx reg
, const_rtx insn
)
1212 /* After delay slot handling, call and branch insns might be in a
1213 sequence. Check all the elements there. */
1214 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1216 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1217 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1223 /* We can be passed an insn or part of one. If we are passed an insn,
1224 check if a side-effect of the insn clobbers REG. */
1226 && (FIND_REG_INC_NOTE (insn
, reg
)
1229 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1230 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1231 GET_MODE (reg
), REGNO (reg
)))
1233 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1236 return set_of (reg
, insn
) != NULL_RTX
;
1239 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1240 only if none of them are modified between START and END. Return 1 if
1241 X contains a MEM; this routine does use memory aliasing. */
1244 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1246 const enum rtx_code code
= GET_CODE (x
);
1267 if (modified_between_p (XEXP (x
, 0), start
, end
))
1269 if (MEM_READONLY_P (x
))
1271 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1272 if (memory_modified_in_insn_p (x
, insn
))
1278 return reg_set_between_p (x
, start
, end
);
1284 fmt
= GET_RTX_FORMAT (code
);
1285 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1287 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1290 else if (fmt
[i
] == 'E')
1291 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1292 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1299 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1300 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1301 does use memory aliasing. */
1304 modified_in_p (const_rtx x
, const_rtx insn
)
1306 const enum rtx_code code
= GET_CODE (x
);
1323 if (modified_in_p (XEXP (x
, 0), insn
))
1325 if (MEM_READONLY_P (x
))
1327 if (memory_modified_in_insn_p (x
, insn
))
1333 return reg_set_p (x
, insn
);
1339 fmt
= GET_RTX_FORMAT (code
);
1340 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1342 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1345 else if (fmt
[i
] == 'E')
1346 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1347 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1354 /* Helper function for set_of. */
1362 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1364 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1365 if (rtx_equal_p (x
, data
->pat
)
1366 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1370 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1371 (either directly or via STRICT_LOW_PART and similar modifiers). */
1373 set_of (const_rtx pat
, const_rtx insn
)
1375 struct set_of_data data
;
1376 data
.found
= NULL_RTX
;
1378 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1382 /* Add all hard register in X to *PSET. */
1384 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1386 subrtx_iterator::array_type array
;
1387 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1389 const_rtx x
= *iter
;
1390 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1391 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1395 /* This function, called through note_stores, collects sets and
1396 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1399 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1401 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1402 if (REG_P (x
) && HARD_REGISTER_P (x
))
1403 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1406 /* Examine INSN, and compute the set of hard registers written by it.
1407 Store it in *PSET. Should only be called after reload. */
1409 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1413 CLEAR_HARD_REG_SET (*pset
);
1414 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1418 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1420 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1421 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1423 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1424 if (REG_NOTE_KIND (link
) == REG_INC
)
1425 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1428 /* Like record_hard_reg_sets, but called through note_uses. */
1430 record_hard_reg_uses (rtx
*px
, void *data
)
1432 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1435 /* Given an INSN, return a SET expression if this insn has only a single SET.
1436 It may also have CLOBBERs, USEs, or SET whose output
1437 will not be used, which we ignore. */
1440 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1443 int set_verified
= 1;
1446 if (GET_CODE (pat
) == PARALLEL
)
1448 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1450 rtx sub
= XVECEXP (pat
, 0, i
);
1451 switch (GET_CODE (sub
))
1458 /* We can consider insns having multiple sets, where all
1459 but one are dead as single set insns. In common case
1460 only single set is present in the pattern so we want
1461 to avoid checking for REG_UNUSED notes unless necessary.
1463 When we reach set first time, we just expect this is
1464 the single set we are looking for and only when more
1465 sets are found in the insn, we check them. */
1468 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1469 && !side_effects_p (set
))
1475 set
= sub
, set_verified
= 0;
1476 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1477 || side_effects_p (sub
))
1489 /* Given an INSN, return nonzero if it has more than one SET, else return
1493 multiple_sets (const_rtx insn
)
1498 /* INSN must be an insn. */
1499 if (! INSN_P (insn
))
1502 /* Only a PARALLEL can have multiple SETs. */
1503 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1505 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1506 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1508 /* If we have already found a SET, then return now. */
1516 /* Either zero or one SET. */
1520 /* Return nonzero if the destination of SET equals the source
1521 and there are no side effects. */
1524 set_noop_p (const_rtx set
)
1526 rtx src
= SET_SRC (set
);
1527 rtx dst
= SET_DEST (set
);
1529 if (dst
== pc_rtx
&& src
== pc_rtx
)
1532 if (MEM_P (dst
) && MEM_P (src
))
1533 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1535 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1536 return rtx_equal_p (XEXP (dst
, 0), src
)
1537 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1538 && !side_effects_p (src
);
1540 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1541 dst
= XEXP (dst
, 0);
1543 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1545 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1547 src
= SUBREG_REG (src
);
1548 dst
= SUBREG_REG (dst
);
1551 /* It is a NOOP if destination overlaps with selected src vector
1553 if (GET_CODE (src
) == VEC_SELECT
1554 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1555 && HARD_REGISTER_P (XEXP (src
, 0))
1556 && HARD_REGISTER_P (dst
))
1559 rtx par
= XEXP (src
, 1);
1560 rtx src0
= XEXP (src
, 0);
1561 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1562 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1564 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1565 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1568 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1569 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1572 return (REG_P (src
) && REG_P (dst
)
1573 && REGNO (src
) == REGNO (dst
));
1576 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1580 noop_move_p (const rtx_insn
*insn
)
1582 rtx pat
= PATTERN (insn
);
1584 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1587 /* Insns carrying these notes are useful later on. */
1588 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1591 /* Check the code to be executed for COND_EXEC. */
1592 if (GET_CODE (pat
) == COND_EXEC
)
1593 pat
= COND_EXEC_CODE (pat
);
1595 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1598 if (GET_CODE (pat
) == PARALLEL
)
1601 /* If nothing but SETs of registers to themselves,
1602 this insn can also be deleted. */
1603 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1605 rtx tem
= XVECEXP (pat
, 0, i
);
1607 if (GET_CODE (tem
) == USE
1608 || GET_CODE (tem
) == CLOBBER
)
1611 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1621 /* Return nonzero if register in range [REGNO, ENDREGNO)
1622 appears either explicitly or implicitly in X
1623 other than being stored into.
1625 References contained within the substructure at LOC do not count.
1626 LOC may be zero, meaning don't ignore anything. */
1629 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1633 unsigned int x_regno
;
1638 /* The contents of a REG_NONNEG note is always zero, so we must come here
1639 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1643 code
= GET_CODE (x
);
1648 x_regno
= REGNO (x
);
1650 /* If we modifying the stack, frame, or argument pointer, it will
1651 clobber a virtual register. In fact, we could be more precise,
1652 but it isn't worth it. */
1653 if ((x_regno
== STACK_POINTER_REGNUM
1654 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1655 && x_regno
== ARG_POINTER_REGNUM
)
1656 || x_regno
== FRAME_POINTER_REGNUM
)
1657 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1660 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1663 /* If this is a SUBREG of a hard reg, we can see exactly which
1664 registers are being modified. Otherwise, handle normally. */
1665 if (REG_P (SUBREG_REG (x
))
1666 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1668 unsigned int inner_regno
= subreg_regno (x
);
1669 unsigned int inner_endregno
1670 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1671 ? subreg_nregs (x
) : 1);
1673 return endregno
> inner_regno
&& regno
< inner_endregno
;
1679 if (&SET_DEST (x
) != loc
1680 /* Note setting a SUBREG counts as referring to the REG it is in for
1681 a pseudo but not for hard registers since we can
1682 treat each word individually. */
1683 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1684 && loc
!= &SUBREG_REG (SET_DEST (x
))
1685 && REG_P (SUBREG_REG (SET_DEST (x
)))
1686 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1687 && refers_to_regno_p (regno
, endregno
,
1688 SUBREG_REG (SET_DEST (x
)), loc
))
1689 || (!REG_P (SET_DEST (x
))
1690 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1693 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1702 /* X does not match, so try its subexpressions. */
1704 fmt
= GET_RTX_FORMAT (code
);
1705 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1707 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1715 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1718 else if (fmt
[i
] == 'E')
1721 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1722 if (loc
!= &XVECEXP (x
, i
, j
)
1723 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1730 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1731 we check if any register number in X conflicts with the relevant register
1732 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1733 contains a MEM (we don't bother checking for memory addresses that can't
1734 conflict because we expect this to be a rare case. */
1737 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1739 unsigned int regno
, endregno
;
1741 /* If either argument is a constant, then modifying X can not
1742 affect IN. Here we look at IN, we can profitably combine
1743 CONSTANT_P (x) with the switch statement below. */
1744 if (CONSTANT_P (in
))
1748 switch (GET_CODE (x
))
1750 case STRICT_LOW_PART
:
1753 /* Overly conservative. */
1758 regno
= REGNO (SUBREG_REG (x
));
1759 if (regno
< FIRST_PSEUDO_REGISTER
)
1760 regno
= subreg_regno (x
);
1761 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1762 ? subreg_nregs (x
) : 1);
1767 endregno
= END_REGNO (x
);
1769 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1779 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1780 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1783 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1786 else if (fmt
[i
] == 'E')
1789 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1790 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1800 return reg_mentioned_p (x
, in
);
1806 /* If any register in here refers to it we return true. */
1807 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1808 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1809 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1815 gcc_assert (CONSTANT_P (x
));
1820 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1821 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1822 ignored by note_stores, but passed to FUN.
1824 FUN receives three arguments:
1825 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1826 2. the SET or CLOBBER rtx that does the store,
1827 3. the pointer DATA provided to note_stores.
1829 If the item being stored in or clobbered is a SUBREG of a hard register,
1830 the SUBREG will be passed. */
1833 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1837 if (GET_CODE (x
) == COND_EXEC
)
1838 x
= COND_EXEC_CODE (x
);
1840 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1842 rtx dest
= SET_DEST (x
);
1844 while ((GET_CODE (dest
) == SUBREG
1845 && (!REG_P (SUBREG_REG (dest
))
1846 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1847 || GET_CODE (dest
) == ZERO_EXTRACT
1848 || GET_CODE (dest
) == STRICT_LOW_PART
)
1849 dest
= XEXP (dest
, 0);
1851 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1852 each of whose first operand is a register. */
1853 if (GET_CODE (dest
) == PARALLEL
)
1855 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1856 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1857 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1860 (*fun
) (dest
, x
, data
);
1863 else if (GET_CODE (x
) == PARALLEL
)
1864 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1865 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1868 /* Like notes_stores, but call FUN for each expression that is being
1869 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1870 FUN for each expression, not any interior subexpressions. FUN receives a
1871 pointer to the expression and the DATA passed to this function.
1873 Note that this is not quite the same test as that done in reg_referenced_p
1874 since that considers something as being referenced if it is being
1875 partially set, while we do not. */
1878 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1883 switch (GET_CODE (body
))
1886 (*fun
) (&COND_EXEC_TEST (body
), data
);
1887 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1891 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1892 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1896 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1897 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1901 (*fun
) (&XEXP (body
, 0), data
);
1905 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1906 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1910 (*fun
) (&TRAP_CONDITION (body
), data
);
1914 (*fun
) (&XEXP (body
, 0), data
);
1918 case UNSPEC_VOLATILE
:
1919 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1920 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1924 if (MEM_P (XEXP (body
, 0)))
1925 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1930 rtx dest
= SET_DEST (body
);
1932 /* For sets we replace everything in source plus registers in memory
1933 expression in store and operands of a ZERO_EXTRACT. */
1934 (*fun
) (&SET_SRC (body
), data
);
1936 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1938 (*fun
) (&XEXP (dest
, 1), data
);
1939 (*fun
) (&XEXP (dest
, 2), data
);
1942 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1943 dest
= XEXP (dest
, 0);
1946 (*fun
) (&XEXP (dest
, 0), data
);
1951 /* All the other possibilities never store. */
1952 (*fun
) (pbody
, data
);
1957 /* Return nonzero if X's old contents don't survive after INSN.
1958 This will be true if X is (cc0) or if X is a register and
1959 X dies in INSN or because INSN entirely sets X.
1961 "Entirely set" means set directly and not through a SUBREG, or
1962 ZERO_EXTRACT, so no trace of the old contents remains.
1963 Likewise, REG_INC does not count.
1965 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1966 but for this use that makes no difference, since regs don't overlap
1967 during their lifetimes. Therefore, this function may be used
1968 at any time after deaths have been computed.
1970 If REG is a hard reg that occupies multiple machine registers, this
1971 function will only return 1 if each of those registers will be replaced
1975 dead_or_set_p (const_rtx insn
, const_rtx x
)
1977 unsigned int regno
, end_regno
;
1980 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1981 if (GET_CODE (x
) == CC0
)
1984 gcc_assert (REG_P (x
));
1987 end_regno
= END_REGNO (x
);
1988 for (i
= regno
; i
< end_regno
; i
++)
1989 if (! dead_or_set_regno_p (insn
, i
))
1995 /* Return TRUE iff DEST is a register or subreg of a register and
1996 doesn't change the number of words of the inner register, and any
1997 part of the register is TEST_REGNO. */
2000 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2002 unsigned int regno
, endregno
;
2004 if (GET_CODE (dest
) == SUBREG
2005 && (((GET_MODE_SIZE (GET_MODE (dest
))
2006 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
2007 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
2008 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
2009 dest
= SUBREG_REG (dest
);
2014 regno
= REGNO (dest
);
2015 endregno
= END_REGNO (dest
);
2016 return (test_regno
>= regno
&& test_regno
< endregno
);
2019 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2020 any member matches the covers_regno_no_parallel_p criteria. */
2023 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2025 if (GET_CODE (dest
) == PARALLEL
)
2027 /* Some targets place small structures in registers for return
2028 values of functions, and those registers are wrapped in
2029 PARALLELs that we may see as the destination of a SET. */
2032 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2034 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2035 if (inner
!= NULL_RTX
2036 && covers_regno_no_parallel_p (inner
, test_regno
))
2043 return covers_regno_no_parallel_p (dest
, test_regno
);
2046 /* Utility function for dead_or_set_p to check an individual register. */
2049 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
2053 /* See if there is a death note for something that includes TEST_REGNO. */
2054 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2058 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2061 pattern
= PATTERN (insn
);
2063 /* If a COND_EXEC is not executed, the value survives. */
2064 if (GET_CODE (pattern
) == COND_EXEC
)
2067 if (GET_CODE (pattern
) == SET
)
2068 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2069 else if (GET_CODE (pattern
) == PARALLEL
)
2073 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2075 rtx body
= XVECEXP (pattern
, 0, i
);
2077 if (GET_CODE (body
) == COND_EXEC
)
2078 body
= COND_EXEC_CODE (body
);
2080 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2081 && covers_regno_p (SET_DEST (body
), test_regno
))
2089 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2090 If DATUM is nonzero, look for one whose datum is DATUM. */
2093 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2097 gcc_checking_assert (insn
);
2099 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2100 if (! INSN_P (insn
))
2104 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2105 if (REG_NOTE_KIND (link
) == kind
)
2110 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2111 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2116 /* Return the reg-note of kind KIND in insn INSN which applies to register
2117 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2118 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2119 it might be the case that the note overlaps REGNO. */
2122 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2126 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2127 if (! INSN_P (insn
))
2130 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2131 if (REG_NOTE_KIND (link
) == kind
2132 /* Verify that it is a register, so that scratch and MEM won't cause a
2134 && REG_P (XEXP (link
, 0))
2135 && REGNO (XEXP (link
, 0)) <= regno
2136 && END_REGNO (XEXP (link
, 0)) > regno
)
2141 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2145 find_reg_equal_equiv_note (const_rtx insn
)
2152 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2153 if (REG_NOTE_KIND (link
) == REG_EQUAL
2154 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2156 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2157 insns that have multiple sets. Checking single_set to
2158 make sure of this is not the proper check, as explained
2159 in the comment in set_unique_reg_note.
2161 This should be changed into an assert. */
2162 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2169 /* Check whether INSN is a single_set whose source is known to be
2170 equivalent to a constant. Return that constant if so, otherwise
2174 find_constant_src (const rtx_insn
*insn
)
2178 set
= single_set (insn
);
2181 x
= avoid_constant_pool_reference (SET_SRC (set
));
2186 note
= find_reg_equal_equiv_note (insn
);
2187 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2188 return XEXP (note
, 0);
2193 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2194 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2197 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2199 /* If it's not a CALL_INSN, it can't possibly have a
2200 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2210 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2212 link
= XEXP (link
, 1))
2213 if (GET_CODE (XEXP (link
, 0)) == code
2214 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2219 unsigned int regno
= REGNO (datum
);
2221 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2222 to pseudo registers, so don't bother checking. */
2224 if (regno
< FIRST_PSEUDO_REGISTER
)
2226 unsigned int end_regno
= END_REGNO (datum
);
2229 for (i
= regno
; i
< end_regno
; i
++)
2230 if (find_regno_fusage (insn
, code
, i
))
2238 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2239 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2242 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2246 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2247 to pseudo registers, so don't bother checking. */
2249 if (regno
>= FIRST_PSEUDO_REGISTER
2253 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2257 if (GET_CODE (op
= XEXP (link
, 0)) == code
2258 && REG_P (reg
= XEXP (op
, 0))
2259 && REGNO (reg
) <= regno
2260 && END_REGNO (reg
) > regno
)
2268 /* Return true if KIND is an integer REG_NOTE. */
2271 int_reg_note_p (enum reg_note kind
)
2273 return kind
== REG_BR_PROB
;
2276 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2277 stored as the pointer to the next register note. */
2280 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2284 gcc_checking_assert (!int_reg_note_p (kind
));
2289 case REG_LABEL_TARGET
:
2290 case REG_LABEL_OPERAND
:
2292 /* These types of register notes use an INSN_LIST rather than an
2293 EXPR_LIST, so that copying is done right and dumps look
2295 note
= alloc_INSN_LIST (datum
, list
);
2296 PUT_REG_NOTE_KIND (note
, kind
);
2300 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2307 /* Add register note with kind KIND and datum DATUM to INSN. */
2310 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2312 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2315 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2318 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2320 gcc_checking_assert (int_reg_note_p (kind
));
2321 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2322 datum
, REG_NOTES (insn
));
2325 /* Add a register note like NOTE to INSN. */
2328 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2330 if (GET_CODE (note
) == INT_LIST
)
2331 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2333 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2336 /* Remove register note NOTE from the REG_NOTES of INSN. */
2339 remove_note (rtx insn
, const_rtx note
)
2343 if (note
== NULL_RTX
)
2346 if (REG_NOTES (insn
) == note
)
2347 REG_NOTES (insn
) = XEXP (note
, 1);
2349 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2350 if (XEXP (link
, 1) == note
)
2352 XEXP (link
, 1) = XEXP (note
, 1);
2356 switch (REG_NOTE_KIND (note
))
2360 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2367 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2370 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2374 loc
= ®_NOTES (insn
);
2377 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2378 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2379 *loc
= XEXP (*loc
, 1);
2381 loc
= &XEXP (*loc
, 1);
2385 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2388 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2395 /* This loop is a little tricky. We cannot just go down the chain because
2396 it is being modified by some actions in the loop. So we just iterate
2397 over the head. We plan to drain the list anyway. */
2398 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2400 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2401 rtx note
= find_reg_equal_equiv_note (insn
);
2403 /* This assert is generally triggered when someone deletes a REG_EQUAL
2404 or REG_EQUIV note by hacking the list manually rather than calling
2408 remove_note (insn
, note
);
2412 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2413 return 1 if it is found. A simple equality test is used to determine if
2417 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2421 for (x
= listp
; x
; x
= XEXP (x
, 1))
2422 if (node
== XEXP (x
, 0))
2428 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2429 remove that entry from the list if it is found.
2431 A simple equality test is used to determine if NODE matches. */
2434 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2436 rtx_expr_list
*temp
= *listp
;
2437 rtx_expr_list
*prev
= NULL
;
2441 if (node
== temp
->element ())
2443 /* Splice the node out of the list. */
2445 XEXP (prev
, 1) = temp
->next ();
2447 *listp
= temp
->next ();
2453 temp
= temp
->next ();
2457 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2458 remove that entry from the list if it is found.
2460 A simple equality test is used to determine if NODE matches. */
2463 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2465 rtx_insn_list
*temp
= *listp
;
2466 rtx_insn_list
*prev
= NULL
;
2470 if (node
== temp
->insn ())
2472 /* Splice the node out of the list. */
2474 XEXP (prev
, 1) = temp
->next ();
2476 *listp
= temp
->next ();
2482 temp
= temp
->next ();
2486 /* Nonzero if X contains any volatile instructions. These are instructions
2487 which may cause unpredictable machine state instructions, and thus no
2488 instructions or register uses should be moved or combined across them.
2489 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2492 volatile_insn_p (const_rtx x
)
2494 const RTX_CODE code
= GET_CODE (x
);
2512 case UNSPEC_VOLATILE
:
2517 if (MEM_VOLATILE_P (x
))
2524 /* Recursively scan the operands of this expression. */
2527 const char *const fmt
= GET_RTX_FORMAT (code
);
2530 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2534 if (volatile_insn_p (XEXP (x
, i
)))
2537 else if (fmt
[i
] == 'E')
2540 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2541 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2549 /* Nonzero if X contains any volatile memory references
2550 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2553 volatile_refs_p (const_rtx x
)
2555 const RTX_CODE code
= GET_CODE (x
);
2571 case UNSPEC_VOLATILE
:
2577 if (MEM_VOLATILE_P (x
))
2584 /* Recursively scan the operands of this expression. */
2587 const char *const fmt
= GET_RTX_FORMAT (code
);
2590 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2594 if (volatile_refs_p (XEXP (x
, i
)))
2597 else if (fmt
[i
] == 'E')
2600 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2601 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2609 /* Similar to above, except that it also rejects register pre- and post-
2613 side_effects_p (const_rtx x
)
2615 const RTX_CODE code
= GET_CODE (x
);
2632 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2633 when some combination can't be done. If we see one, don't think
2634 that we can simplify the expression. */
2635 return (GET_MODE (x
) != VOIDmode
);
2644 case UNSPEC_VOLATILE
:
2650 if (MEM_VOLATILE_P (x
))
2657 /* Recursively scan the operands of this expression. */
2660 const char *fmt
= GET_RTX_FORMAT (code
);
2663 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2667 if (side_effects_p (XEXP (x
, i
)))
2670 else if (fmt
[i
] == 'E')
2673 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2674 if (side_effects_p (XVECEXP (x
, i
, j
)))
2682 /* Return nonzero if evaluating rtx X might cause a trap.
2683 FLAGS controls how to consider MEMs. A nonzero means the context
2684 of the access may have changed from the original, such that the
2685 address may have become invalid. */
2688 may_trap_p_1 (const_rtx x
, unsigned flags
)
2694 /* We make no distinction currently, but this function is part of
2695 the internal target-hooks ABI so we keep the parameter as
2696 "unsigned flags". */
2697 bool code_changed
= flags
!= 0;
2701 code
= GET_CODE (x
);
2704 /* Handle these cases quickly. */
2716 return targetm
.unspec_may_trap_p (x
, flags
);
2718 case UNSPEC_VOLATILE
:
2724 return MEM_VOLATILE_P (x
);
2726 /* Memory ref can trap unless it's a static var or a stack slot. */
2728 /* Recognize specific pattern of stack checking probes. */
2729 if (flag_stack_check
2730 && MEM_VOLATILE_P (x
)
2731 && XEXP (x
, 0) == stack_pointer_rtx
)
2733 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2734 reference; moving it out of context such as when moving code
2735 when optimizing, might cause its address to become invalid. */
2737 || !MEM_NOTRAP_P (x
))
2739 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2740 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2741 GET_MODE (x
), code_changed
);
2746 /* Division by a non-constant might trap. */
2751 if (HONOR_SNANS (x
))
2753 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2754 return flag_trapping_math
;
2755 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2760 /* An EXPR_LIST is used to represent a function call. This
2761 certainly may trap. */
2770 /* Some floating point comparisons may trap. */
2771 if (!flag_trapping_math
)
2773 /* ??? There is no machine independent way to check for tests that trap
2774 when COMPARE is used, though many targets do make this distinction.
2775 For instance, sparc uses CCFPE for compares which generate exceptions
2776 and CCFP for compares which do not generate exceptions. */
2779 /* But often the compare has some CC mode, so check operand
2781 if (HONOR_NANS (XEXP (x
, 0))
2782 || HONOR_NANS (XEXP (x
, 1)))
2788 if (HONOR_SNANS (x
))
2790 /* Often comparison is CC mode, so check operand modes. */
2791 if (HONOR_SNANS (XEXP (x
, 0))
2792 || HONOR_SNANS (XEXP (x
, 1)))
2797 /* Conversion of floating point might trap. */
2798 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2805 /* These operations don't trap even with floating point. */
2809 /* Any floating arithmetic may trap. */
2810 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2814 fmt
= GET_RTX_FORMAT (code
);
2815 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2819 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2822 else if (fmt
[i
] == 'E')
2825 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2826 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2833 /* Return nonzero if evaluating rtx X might cause a trap. */
2836 may_trap_p (const_rtx x
)
2838 return may_trap_p_1 (x
, 0);
2841 /* Same as above, but additionally return nonzero if evaluating rtx X might
2842 cause a fault. We define a fault for the purpose of this function as a
2843 erroneous execution condition that cannot be encountered during the normal
2844 execution of a valid program; the typical example is an unaligned memory
2845 access on a strict alignment machine. The compiler guarantees that it
2846 doesn't generate code that will fault from a valid program, but this
2847 guarantee doesn't mean anything for individual instructions. Consider
2848 the following example:
2850 struct S { int d; union { char *cp; int *ip; }; };
2852 int foo(struct S *s)
2860 on a strict alignment machine. In a valid program, foo will never be
2861 invoked on a structure for which d is equal to 1 and the underlying
2862 unique field of the union not aligned on a 4-byte boundary, but the
2863 expression *s->ip might cause a fault if considered individually.
2865 At the RTL level, potentially problematic expressions will almost always
2866 verify may_trap_p; for example, the above dereference can be emitted as
2867 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2868 However, suppose that foo is inlined in a caller that causes s->cp to
2869 point to a local character variable and guarantees that s->d is not set
2870 to 1; foo may have been effectively translated into pseudo-RTL as:
2873 (set (reg:SI) (mem:SI (%fp - 7)))
2875 (set (reg:QI) (mem:QI (%fp - 7)))
2877 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2878 memory reference to a stack slot, but it will certainly cause a fault
2879 on a strict alignment machine. */
2882 may_trap_or_fault_p (const_rtx x
)
2884 return may_trap_p_1 (x
, 1);
2887 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2888 i.e., an inequality. */
2891 inequality_comparisons_p (const_rtx x
)
2895 const enum rtx_code code
= GET_CODE (x
);
2923 len
= GET_RTX_LENGTH (code
);
2924 fmt
= GET_RTX_FORMAT (code
);
2926 for (i
= 0; i
< len
; i
++)
2930 if (inequality_comparisons_p (XEXP (x
, i
)))
2933 else if (fmt
[i
] == 'E')
2936 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2937 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2945 /* Replace any occurrence of FROM in X with TO. The function does
2946 not enter into CONST_DOUBLE for the replace.
2948 Note that copying is not done so X must not be shared unless all copies
2951 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2952 those pointer-equal ones. */
2955 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
2963 /* Allow this function to make replacements in EXPR_LISTs. */
2970 && REGNO (x
) == REGNO (from
))
2972 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
2975 else if (GET_CODE (x
) == SUBREG
)
2977 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
2979 if (CONST_INT_P (new_rtx
))
2981 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2982 GET_MODE (SUBREG_REG (x
)),
2987 SUBREG_REG (x
) = new_rtx
;
2991 else if (GET_CODE (x
) == ZERO_EXTEND
)
2993 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
2995 if (CONST_INT_P (new_rtx
))
2997 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2998 new_rtx
, GET_MODE (XEXP (x
, 0)));
3002 XEXP (x
, 0) = new_rtx
;
3007 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3008 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3011 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3012 else if (fmt
[i
] == 'E')
3013 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3014 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3015 from
, to
, all_regs
);
3021 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3022 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3025 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3027 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3029 if (JUMP_TABLE_DATA_P (x
))
3032 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3033 int len
= GET_NUM_ELEM (vec
);
3034 for (int i
= 0; i
< len
; ++i
)
3036 rtx ref
= RTVEC_ELT (vec
, i
);
3037 if (XEXP (ref
, 0) == old_label
)
3039 XEXP (ref
, 0) = new_label
;
3040 if (update_label_nuses
)
3042 ++LABEL_NUSES (new_label
);
3043 --LABEL_NUSES (old_label
);
3050 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3051 field. This is not handled by the iterator because it doesn't
3052 handle unprinted ('0') fields. */
3053 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3054 JUMP_LABEL (x
) = new_label
;
3056 subrtx_ptr_iterator::array_type array
;
3057 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3062 if (GET_CODE (x
) == SYMBOL_REF
3063 && CONSTANT_POOL_ADDRESS_P (x
))
3065 rtx c
= get_pool_constant (x
);
3066 if (rtx_referenced_p (old_label
, c
))
3068 /* Create a copy of constant C; replace the label inside
3069 but do not update LABEL_NUSES because uses in constant pool
3071 rtx new_c
= copy_rtx (c
);
3072 replace_label (&new_c
, old_label
, new_label
, false);
3074 /* Add the new constant NEW_C to constant pool and replace
3075 the old reference to constant by new reference. */
3076 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3077 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3081 if ((GET_CODE (x
) == LABEL_REF
3082 || GET_CODE (x
) == INSN_LIST
)
3083 && XEXP (x
, 0) == old_label
)
3085 XEXP (x
, 0) = new_label
;
3086 if (update_label_nuses
)
3088 ++LABEL_NUSES (new_label
);
3089 --LABEL_NUSES (old_label
);
3097 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
3098 bool update_label_nuses
)
3100 rtx insn_as_rtx
= insn
;
3101 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3102 gcc_checking_assert (insn_as_rtx
== insn
);
3105 /* Return true if X is referenced in BODY. */
3108 rtx_referenced_p (const_rtx x
, const_rtx body
)
3110 subrtx_iterator::array_type array
;
3111 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3112 if (const_rtx y
= *iter
)
3114 /* Check if a label_ref Y refers to label X. */
3115 if (GET_CODE (y
) == LABEL_REF
3117 && LABEL_REF_LABEL (y
) == x
)
3120 if (rtx_equal_p (x
, y
))
3123 /* If Y is a reference to pool constant traverse the constant. */
3124 if (GET_CODE (y
) == SYMBOL_REF
3125 && CONSTANT_POOL_ADDRESS_P (y
))
3126 iter
.substitute (get_pool_constant (y
));
3131 /* If INSN is a tablejump return true and store the label (before jump table) to
3132 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3135 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
3143 label
= JUMP_LABEL (insn
);
3144 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
3145 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
3146 && JUMP_TABLE_DATA_P (table
))
3151 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3157 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3158 constant that is not in the constant pool and not in the condition
3159 of an IF_THEN_ELSE. */
3162 computed_jump_p_1 (const_rtx x
)
3164 const enum rtx_code code
= GET_CODE (x
);
3181 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3182 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3185 return (computed_jump_p_1 (XEXP (x
, 1))
3186 || computed_jump_p_1 (XEXP (x
, 2)));
3192 fmt
= GET_RTX_FORMAT (code
);
3193 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3196 && computed_jump_p_1 (XEXP (x
, i
)))
3199 else if (fmt
[i
] == 'E')
3200 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3201 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3208 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3210 Tablejumps and casesi insns are not considered indirect jumps;
3211 we can recognize them by a (use (label_ref)). */
3214 computed_jump_p (const rtx_insn
*insn
)
3219 rtx pat
= PATTERN (insn
);
3221 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3222 if (JUMP_LABEL (insn
) != NULL
)
3225 if (GET_CODE (pat
) == PARALLEL
)
3227 int len
= XVECLEN (pat
, 0);
3228 int has_use_labelref
= 0;
3230 for (i
= len
- 1; i
>= 0; i
--)
3231 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3232 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3235 has_use_labelref
= 1;
3239 if (! has_use_labelref
)
3240 for (i
= len
- 1; i
>= 0; i
--)
3241 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3242 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3243 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3246 else if (GET_CODE (pat
) == SET
3247 && SET_DEST (pat
) == pc_rtx
3248 && computed_jump_p_1 (SET_SRC (pat
)))
3256 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3257 the equivalent add insn and pass the result to FN, using DATA as the
3261 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3263 rtx x
= XEXP (mem
, 0);
3264 switch (GET_CODE (x
))
3269 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3270 rtx r1
= XEXP (x
, 0);
3271 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3272 return fn (mem
, x
, r1
, r1
, c
, data
);
3278 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3279 rtx r1
= XEXP (x
, 0);
3280 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3281 return fn (mem
, x
, r1
, r1
, c
, data
);
3287 rtx r1
= XEXP (x
, 0);
3288 rtx add
= XEXP (x
, 1);
3289 return fn (mem
, x
, r1
, add
, NULL
, data
);
3297 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3298 For each such autoinc operation found, call FN, passing it
3299 the innermost enclosing MEM, the operation itself, the RTX modified
3300 by the operation, two RTXs (the second may be NULL) that, once
3301 added, represent the value to be held by the modified RTX
3302 afterwards, and DATA. FN is to return 0 to continue the
3303 traversal or any other value to have it returned to the caller of
3304 for_each_inc_dec. */
3307 for_each_inc_dec (rtx x
,
3308 for_each_inc_dec_fn fn
,
3311 subrtx_var_iterator::array_type array
;
3312 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3317 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3319 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3322 iter
.skip_subrtxes ();
3329 /* Searches X for any reference to REGNO, returning the rtx of the
3330 reference found if any. Otherwise, returns NULL_RTX. */
3333 regno_use_in (unsigned int regno
, rtx x
)
3339 if (REG_P (x
) && REGNO (x
) == regno
)
3342 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3343 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3347 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3350 else if (fmt
[i
] == 'E')
3351 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3352 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3359 /* Return a value indicating whether OP, an operand of a commutative
3360 operation, is preferred as the first or second operand. The more
3361 positive the value, the stronger the preference for being the first
3365 commutative_operand_precedence (rtx op
)
3367 enum rtx_code code
= GET_CODE (op
);
3369 /* Constants always become the second operand. Prefer "nice" constants. */
3370 if (code
== CONST_INT
)
3372 if (code
== CONST_WIDE_INT
)
3374 if (code
== CONST_DOUBLE
)
3376 if (code
== CONST_FIXED
)
3378 op
= avoid_constant_pool_reference (op
);
3379 code
= GET_CODE (op
);
3381 switch (GET_RTX_CLASS (code
))
3384 if (code
== CONST_INT
)
3386 if (code
== CONST_WIDE_INT
)
3388 if (code
== CONST_DOUBLE
)
3390 if (code
== CONST_FIXED
)
3395 /* SUBREGs of objects should come second. */
3396 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3401 /* Complex expressions should be the first, so decrease priority
3402 of objects. Prefer pointer objects over non pointer objects. */
3403 if ((REG_P (op
) && REG_POINTER (op
))
3404 || (MEM_P (op
) && MEM_POINTER (op
)))
3408 case RTX_COMM_ARITH
:
3409 /* Prefer operands that are themselves commutative to be first.
3410 This helps to make things linear. In particular,
3411 (and (and (reg) (reg)) (not (reg))) is canonical. */
3415 /* If only one operand is a binary expression, it will be the first
3416 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3417 is canonical, although it will usually be further simplified. */
3421 /* Then prefer NEG and NOT. */
3422 if (code
== NEG
|| code
== NOT
)
3430 /* Return 1 iff it is necessary to swap operands of commutative operation
3431 in order to canonicalize expression. */
3434 swap_commutative_operands_p (rtx x
, rtx y
)
3436 return (commutative_operand_precedence (x
)
3437 < commutative_operand_precedence (y
));
3440 /* Return 1 if X is an autoincrement side effect and the register is
3441 not the stack pointer. */
3443 auto_inc_p (const_rtx x
)
3445 switch (GET_CODE (x
))
3453 /* There are no REG_INC notes for SP. */
3454 if (XEXP (x
, 0) != stack_pointer_rtx
)
3462 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3464 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3473 code
= GET_CODE (in
);
3474 fmt
= GET_RTX_FORMAT (code
);
3475 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3479 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3482 else if (fmt
[i
] == 'E')
3483 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3484 if (loc
== &XVECEXP (in
, i
, j
)
3485 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3491 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3492 and SUBREG_BYTE, return the bit offset where the subreg begins
3493 (counting from the least significant bit of the operand). */
3496 subreg_lsb_1 (machine_mode outer_mode
,
3497 machine_mode inner_mode
,
3498 unsigned int subreg_byte
)
3500 unsigned int bitpos
;
3504 /* A paradoxical subreg begins at bit position 0. */
3505 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3508 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3509 /* If the subreg crosses a word boundary ensure that
3510 it also begins and ends on a word boundary. */
3511 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3512 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3513 && (subreg_byte
% UNITS_PER_WORD
3514 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3516 if (WORDS_BIG_ENDIAN
)
3517 word
= (GET_MODE_SIZE (inner_mode
)
3518 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3520 word
= subreg_byte
/ UNITS_PER_WORD
;
3521 bitpos
= word
* BITS_PER_WORD
;
3523 if (BYTES_BIG_ENDIAN
)
3524 byte
= (GET_MODE_SIZE (inner_mode
)
3525 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3527 byte
= subreg_byte
% UNITS_PER_WORD
;
3528 bitpos
+= byte
* BITS_PER_UNIT
;
3533 /* Given a subreg X, return the bit offset where the subreg begins
3534 (counting from the least significant bit of the reg). */
3537 subreg_lsb (const_rtx x
)
3539 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3543 /* Fill in information about a subreg of a hard register.
3544 xregno - A regno of an inner hard subreg_reg (or what will become one).
3545 xmode - The mode of xregno.
3546 offset - The byte offset.
3547 ymode - The mode of a top level SUBREG (or what may become one).
3548 info - Pointer to structure to fill in.
3550 Rather than considering one particular inner register (and thus one
3551 particular "outer" register) in isolation, this function really uses
3552 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3553 function does not check whether adding INFO->offset to XREGNO gives
3554 a valid hard register; even if INFO->offset + XREGNO is out of range,
3555 there might be another register of the same type that is in range.
3556 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3557 register, since that can depend on things like whether the final
3558 register number is even or odd. Callers that want to check whether
3559 this particular subreg can be replaced by a simple (reg ...) should
3560 use simplify_subreg_regno. */
3563 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3564 unsigned int offset
, machine_mode ymode
,
3565 struct subreg_info
*info
)
3567 int nregs_xmode
, nregs_ymode
;
3568 int mode_multiple
, nregs_multiple
;
3569 int offset_adj
, y_offset
, y_offset_adj
;
3570 int regsize_xmode
, regsize_ymode
;
3573 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3577 /* If there are holes in a non-scalar mode in registers, we expect
3578 that it is made up of its units concatenated together. */
3579 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3581 machine_mode xmode_unit
;
3583 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3584 xmode_unit
= GET_MODE_INNER (xmode
);
3585 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3586 gcc_assert (nregs_xmode
3587 == (GET_MODE_NUNITS (xmode
)
3588 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3589 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3590 == (hard_regno_nregs
[xregno
][xmode_unit
]
3591 * GET_MODE_NUNITS (xmode
)));
3593 /* You can only ask for a SUBREG of a value with holes in the middle
3594 if you don't cross the holes. (Such a SUBREG should be done by
3595 picking a different register class, or doing it in memory if
3596 necessary.) An example of a value with holes is XCmode on 32-bit
3597 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3598 3 for each part, but in memory it's two 128-bit parts.
3599 Padding is assumed to be at the end (not necessarily the 'high part')
3601 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3602 < GET_MODE_NUNITS (xmode
))
3603 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3604 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3605 / GET_MODE_SIZE (xmode_unit
))))
3607 info
->representable_p
= false;
3612 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3614 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3616 /* Paradoxical subregs are otherwise valid. */
3619 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3621 info
->representable_p
= true;
3622 /* If this is a big endian paradoxical subreg, which uses more
3623 actual hard registers than the original register, we must
3624 return a negative offset so that we find the proper highpart
3626 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3627 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3628 info
->offset
= nregs_xmode
- nregs_ymode
;
3631 info
->nregs
= nregs_ymode
;
3635 /* If registers store different numbers of bits in the different
3636 modes, we cannot generally form this subreg. */
3637 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3638 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3639 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3640 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3642 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3643 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3644 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3646 info
->representable_p
= false;
3648 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3649 info
->offset
= offset
/ regsize_xmode
;
3652 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3654 info
->representable_p
= false;
3656 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3657 info
->offset
= offset
/ regsize_xmode
;
3660 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3661 would go outside of XMODE. */
3663 && GET_MODE_SIZE (ymode
) + offset
> GET_MODE_SIZE (xmode
))
3665 info
->representable_p
= false;
3666 info
->nregs
= nregs_ymode
;
3667 info
->offset
= offset
/ regsize_xmode
;
3670 /* Quick exit for the simple and common case of extracting whole
3671 subregisters from a multiregister value. */
3672 /* ??? It would be better to integrate this into the code below,
3673 if we can generalize the concept enough and figure out how
3674 odd-sized modes can coexist with the other weird cases we support. */
3676 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3677 && regsize_xmode
== regsize_ymode
3678 && (offset
% regsize_ymode
) == 0)
3680 info
->representable_p
= true;
3681 info
->nregs
= nregs_ymode
;
3682 info
->offset
= offset
/ regsize_ymode
;
3683 gcc_assert (info
->offset
+ info
->nregs
<= nregs_xmode
);
3688 /* Lowpart subregs are otherwise valid. */
3689 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3691 info
->representable_p
= true;
3694 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3697 info
->nregs
= nregs_ymode
;
3702 /* This should always pass, otherwise we don't know how to verify
3703 the constraint. These conditions may be relaxed but
3704 subreg_regno_offset would need to be redesigned. */
3705 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3706 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3708 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3709 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3711 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3712 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3713 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3714 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3715 offset
= (xsize
- ysize
- off_high
) | off_low
;
3717 /* The XMODE value can be seen as a vector of NREGS_XMODE
3718 values. The subreg must represent a lowpart of given field.
3719 Compute what field it is. */
3720 offset_adj
= offset
;
3721 offset_adj
-= subreg_lowpart_offset (ymode
,
3722 mode_for_size (GET_MODE_BITSIZE (xmode
)
3726 /* Size of ymode must not be greater than the size of xmode. */
3727 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3728 gcc_assert (mode_multiple
!= 0);
3730 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3731 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3732 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3734 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3735 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3739 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3742 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3743 info
->nregs
= nregs_ymode
;
3746 /* This function returns the regno offset of a subreg expression.
3747 xregno - A regno of an inner hard subreg_reg (or what will become one).
3748 xmode - The mode of xregno.
3749 offset - The byte offset.
3750 ymode - The mode of a top level SUBREG (or what may become one).
3751 RETURN - The regno offset which would be used. */
3753 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3754 unsigned int offset
, machine_mode ymode
)
3756 struct subreg_info info
;
3757 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3761 /* This function returns true when the offset is representable via
3762 subreg_offset in the given regno.
3763 xregno - A regno of an inner hard subreg_reg (or what will become one).
3764 xmode - The mode of xregno.
3765 offset - The byte offset.
3766 ymode - The mode of a top level SUBREG (or what may become one).
3767 RETURN - Whether the offset is representable. */
3769 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3770 unsigned int offset
, machine_mode ymode
)
3772 struct subreg_info info
;
3773 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3774 return info
.representable_p
;
3777 /* Return the number of a YMODE register to which
3779 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3781 can be simplified. Return -1 if the subreg can't be simplified.
3783 XREGNO is a hard register number. */
3786 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3787 unsigned int offset
, machine_mode ymode
)
3789 struct subreg_info info
;
3790 unsigned int yregno
;
3792 #ifdef CANNOT_CHANGE_MODE_CLASS
3793 /* Give the backend a chance to disallow the mode change. */
3794 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3795 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3796 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3797 /* We can use mode change in LRA for some transformations. */
3798 && ! lra_in_progress
)
3802 /* We shouldn't simplify stack-related registers. */
3803 if ((!reload_completed
|| frame_pointer_needed
)
3804 && xregno
== FRAME_POINTER_REGNUM
)
3807 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3808 && xregno
== ARG_POINTER_REGNUM
)
3811 if (xregno
== STACK_POINTER_REGNUM
3812 /* We should convert hard stack register in LRA if it is
3814 && ! lra_in_progress
)
3817 /* Try to get the register offset. */
3818 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3819 if (!info
.representable_p
)
3822 /* Make sure that the offsetted register value is in range. */
3823 yregno
= xregno
+ info
.offset
;
3824 if (!HARD_REGISTER_NUM_P (yregno
))
3827 /* See whether (reg:YMODE YREGNO) is valid.
3829 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3830 This is a kludge to work around how complex FP arguments are passed
3831 on IA-64 and should be fixed. See PR target/49226. */
3832 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3833 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3836 return (int) yregno
;
3839 /* Return the final regno that a subreg expression refers to. */
3841 subreg_regno (const_rtx x
)
3844 rtx subreg
= SUBREG_REG (x
);
3845 int regno
= REGNO (subreg
);
3847 ret
= regno
+ subreg_regno_offset (regno
,
3855 /* Return the number of registers that a subreg expression refers
3858 subreg_nregs (const_rtx x
)
3860 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3863 /* Return the number of registers that a subreg REG with REGNO
3864 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3865 changed so that the regno can be passed in. */
3868 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3870 struct subreg_info info
;
3871 rtx subreg
= SUBREG_REG (x
);
3873 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3879 struct parms_set_data
3885 /* Helper function for noticing stores to parameter registers. */
3887 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3889 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3890 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3891 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3893 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3898 /* Look backward for first parameter to be loaded.
3899 Note that loads of all parameters will not necessarily be
3900 found if CSE has eliminated some of them (e.g., an argument
3901 to the outer function is passed down as a parameter).
3902 Do not skip BOUNDARY. */
3904 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3906 struct parms_set_data parm
;
3908 rtx_insn
*before
, *first_set
;
3910 /* Since different machines initialize their parameter registers
3911 in different orders, assume nothing. Collect the set of all
3912 parameter registers. */
3913 CLEAR_HARD_REG_SET (parm
.regs
);
3915 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3916 if (GET_CODE (XEXP (p
, 0)) == USE
3917 && REG_P (XEXP (XEXP (p
, 0), 0)))
3919 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3921 /* We only care about registers which can hold function
3923 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3926 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3930 first_set
= call_insn
;
3932 /* Search backward for the first set of a register in this set. */
3933 while (parm
.nregs
&& before
!= boundary
)
3935 before
= PREV_INSN (before
);
3937 /* It is possible that some loads got CSEed from one call to
3938 another. Stop in that case. */
3939 if (CALL_P (before
))
3942 /* Our caller needs either ensure that we will find all sets
3943 (in case code has not been optimized yet), or take care
3944 for possible labels in a way by setting boundary to preceding
3946 if (LABEL_P (before
))
3948 gcc_assert (before
== boundary
);
3952 if (INSN_P (before
))
3954 int nregs_old
= parm
.nregs
;
3955 note_stores (PATTERN (before
), parms_set
, &parm
);
3956 /* If we found something that did not set a parameter reg,
3957 we're done. Do not keep going, as that might result
3958 in hoisting an insn before the setting of a pseudo
3959 that is used by the hoisted insn. */
3960 if (nregs_old
!= parm
.nregs
)
3969 /* Return true if we should avoid inserting code between INSN and preceding
3970 call instruction. */
3973 keep_with_call_p (const rtx_insn
*insn
)
3977 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3979 if (REG_P (SET_DEST (set
))
3980 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3981 && fixed_regs
[REGNO (SET_DEST (set
))]
3982 && general_operand (SET_SRC (set
), VOIDmode
))
3984 if (REG_P (SET_SRC (set
))
3985 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3986 && REG_P (SET_DEST (set
))
3987 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3989 /* There may be a stack pop just after the call and before the store
3990 of the return register. Search for the actual store when deciding
3991 if we can break or not. */
3992 if (SET_DEST (set
) == stack_pointer_rtx
)
3994 /* This CONST_CAST is okay because next_nonnote_insn just
3995 returns its argument and we assign it to a const_rtx
3998 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3999 if (i2
&& keep_with_call_p (i2
))
4006 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4007 to non-complex jumps. That is, direct unconditional, conditional,
4008 and tablejumps, but not computed jumps or returns. It also does
4009 not apply to the fallthru case of a conditional jump. */
4012 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4014 rtx tmp
= JUMP_LABEL (jump_insn
);
4015 rtx_jump_table_data
*table
;
4020 if (tablejump_p (jump_insn
, NULL
, &table
))
4022 rtvec vec
= table
->get_labels ();
4023 int i
, veclen
= GET_NUM_ELEM (vec
);
4025 for (i
= 0; i
< veclen
; ++i
)
4026 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4030 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4037 /* Return an estimate of the cost of computing rtx X.
4038 One use is in cse, to decide which expression to keep in the hash table.
4039 Another is in rtl generation, to pick the cheapest way to multiply.
4040 Other uses like the latter are expected in the future.
4042 X appears as operand OPNO in an expression with code OUTER_CODE.
4043 SPEED specifies whether costs optimized for speed or size should
4047 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4048 int opno
, bool speed
)
4059 if (GET_MODE (x
) != VOIDmode
)
4060 mode
= GET_MODE (x
);
4062 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4063 many insns, taking N times as long. */
4064 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4068 /* Compute the default costs of certain things.
4069 Note that targetm.rtx_costs can override the defaults. */
4071 code
= GET_CODE (x
);
4075 /* Multiplication has time-complexity O(N*N), where N is the
4076 number of units (translated from digits) when using
4077 schoolbook long multiplication. */
4078 total
= factor
* factor
* COSTS_N_INSNS (5);
4084 /* Similarly, complexity for schoolbook long division. */
4085 total
= factor
* factor
* COSTS_N_INSNS (7);
4088 /* Used in combine.c as a marker. */
4092 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4093 the mode for the factor. */
4094 mode
= GET_MODE (SET_DEST (x
));
4095 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4100 total
= factor
* COSTS_N_INSNS (1);
4110 /* If we can't tie these modes, make this expensive. The larger
4111 the mode, the more expensive it is. */
4112 if (! MODES_TIEABLE_P (mode
, GET_MODE (SUBREG_REG (x
))))
4113 return COSTS_N_INSNS (2 + factor
);
4117 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4122 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4123 which is already in total. */
4125 fmt
= GET_RTX_FORMAT (code
);
4126 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4128 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4129 else if (fmt
[i
] == 'E')
4130 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4131 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4136 /* Fill in the structure C with information about both speed and size rtx
4137 costs for X, which is operand OPNO in an expression with code OUTER. */
4140 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4141 struct full_rtx_costs
*c
)
4143 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4144 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4148 /* Return cost of address expression X.
4149 Expect that X is properly formed address reference.
4151 SPEED parameter specify whether costs optimized for speed or size should
4155 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4157 /* We may be asked for cost of various unusual addresses, such as operands
4158 of push instruction. It is not worthwhile to complicate writing
4159 of the target hook by such cases. */
4161 if (!memory_address_addr_space_p (mode
, x
, as
))
4164 return targetm
.address_cost (x
, mode
, as
, speed
);
4167 /* If the target doesn't override, compute the cost as with arithmetic. */
4170 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4172 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4176 unsigned HOST_WIDE_INT
4177 nonzero_bits (const_rtx x
, machine_mode mode
)
4179 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4183 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4185 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4188 /* Return true if nonzero_bits1 might recurse into both operands
4192 nonzero_bits_binary_arith_p (const_rtx x
)
4194 if (!ARITHMETIC_P (x
))
4196 switch (GET_CODE (x
))
4218 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4219 It avoids exponential behavior in nonzero_bits1 when X has
4220 identical subexpressions on the first or the second level. */
4222 static unsigned HOST_WIDE_INT
4223 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4224 machine_mode known_mode
,
4225 unsigned HOST_WIDE_INT known_ret
)
4227 if (x
== known_x
&& mode
== known_mode
)
4230 /* Try to find identical subexpressions. If found call
4231 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4232 precomputed value for the subexpression as KNOWN_RET. */
4234 if (nonzero_bits_binary_arith_p (x
))
4236 rtx x0
= XEXP (x
, 0);
4237 rtx x1
= XEXP (x
, 1);
4239 /* Check the first level. */
4241 return nonzero_bits1 (x
, mode
, x0
, mode
,
4242 cached_nonzero_bits (x0
, mode
, known_x
,
4243 known_mode
, known_ret
));
4245 /* Check the second level. */
4246 if (nonzero_bits_binary_arith_p (x0
)
4247 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4248 return nonzero_bits1 (x
, mode
, x1
, mode
,
4249 cached_nonzero_bits (x1
, mode
, known_x
,
4250 known_mode
, known_ret
));
4252 if (nonzero_bits_binary_arith_p (x1
)
4253 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4254 return nonzero_bits1 (x
, mode
, x0
, mode
,
4255 cached_nonzero_bits (x0
, mode
, known_x
,
4256 known_mode
, known_ret
));
4259 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4262 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4263 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4264 is less useful. We can't allow both, because that results in exponential
4265 run time recursion. There is a nullstone testcase that triggered
4266 this. This macro avoids accidental uses of num_sign_bit_copies. */
4267 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4269 /* Given an expression, X, compute which bits in X can be nonzero.
4270 We don't care about bits outside of those defined in MODE.
4272 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4273 an arithmetic operation, we can do better. */
4275 static unsigned HOST_WIDE_INT
4276 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4277 machine_mode known_mode
,
4278 unsigned HOST_WIDE_INT known_ret
)
4280 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4281 unsigned HOST_WIDE_INT inner_nz
;
4283 machine_mode inner_mode
;
4284 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4286 /* For floating-point and vector values, assume all bits are needed. */
4287 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4288 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4291 /* If X is wider than MODE, use its mode instead. */
4292 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4294 mode
= GET_MODE (x
);
4295 nonzero
= GET_MODE_MASK (mode
);
4296 mode_width
= GET_MODE_PRECISION (mode
);
4299 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4300 /* Our only callers in this case look for single bit values. So
4301 just return the mode mask. Those tests will then be false. */
4304 /* If MODE is wider than X, but both are a single word for both the host
4305 and target machines, we can compute this from which bits of the
4306 object might be nonzero in its own mode, taking into account the fact
4307 that on many CISC machines, accessing an object in a wider mode
4308 causes the high-order bits to become undefined. So they are
4309 not known to be zero. */
4311 if (!WORD_REGISTER_OPERATIONS
4312 && GET_MODE (x
) != VOIDmode
4313 && GET_MODE (x
) != mode
4314 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4315 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4316 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4318 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4319 known_x
, known_mode
, known_ret
);
4320 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4324 /* Please keep nonzero_bits_binary_arith_p above in sync with
4325 the code in the switch below. */
4326 code
= GET_CODE (x
);
4330 #if defined(POINTERS_EXTEND_UNSIGNED)
4331 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4332 all the bits above ptr_mode are known to be zero. */
4333 /* As we do not know which address space the pointer is referring to,
4334 we can do this only if the target does not support different pointer
4335 or address modes depending on the address space. */
4336 if (target_default_pointer_address_modes_p ()
4337 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4339 && !targetm
.have_ptr_extend ())
4340 nonzero
&= GET_MODE_MASK (ptr_mode
);
4343 /* Include declared information about alignment of pointers. */
4344 /* ??? We don't properly preserve REG_POINTER changes across
4345 pointer-to-integer casts, so we can't trust it except for
4346 things that we know must be pointers. See execute/960116-1.c. */
4347 if ((x
== stack_pointer_rtx
4348 || x
== frame_pointer_rtx
4349 || x
== arg_pointer_rtx
)
4350 && REGNO_POINTER_ALIGN (REGNO (x
)))
4352 unsigned HOST_WIDE_INT alignment
4353 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4355 #ifdef PUSH_ROUNDING
4356 /* If PUSH_ROUNDING is defined, it is possible for the
4357 stack to be momentarily aligned only to that amount,
4358 so we pick the least alignment. */
4359 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4360 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4364 nonzero
&= ~(alignment
- 1);
4368 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4369 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4370 known_mode
, known_ret
,
4374 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4375 known_mode
, known_ret
);
4377 return nonzero_for_hook
;
4381 /* If X is negative in MODE, sign-extend the value. */
4382 if (SHORT_IMMEDIATES_SIGN_EXTEND
&& INTVAL (x
) > 0
4383 && mode_width
< BITS_PER_WORD
4384 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
4386 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4391 #ifdef LOAD_EXTEND_OP
4392 /* In many, if not most, RISC machines, reading a byte from memory
4393 zeros the rest of the register. Noticing that fact saves a lot
4394 of extra zero-extends. */
4395 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4396 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4401 case UNEQ
: case LTGT
:
4402 case GT
: case GTU
: case UNGT
:
4403 case LT
: case LTU
: case UNLT
:
4404 case GE
: case GEU
: case UNGE
:
4405 case LE
: case LEU
: case UNLE
:
4406 case UNORDERED
: case ORDERED
:
4407 /* If this produces an integer result, we know which bits are set.
4408 Code here used to clear bits outside the mode of X, but that is
4410 /* Mind that MODE is the mode the caller wants to look at this
4411 operation in, and not the actual operation mode. We can wind
4412 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4413 that describes the results of a vector compare. */
4414 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4415 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4416 nonzero
= STORE_FLAG_VALUE
;
4421 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4422 and num_sign_bit_copies. */
4423 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4424 == GET_MODE_PRECISION (GET_MODE (x
)))
4428 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4429 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4434 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4435 and num_sign_bit_copies. */
4436 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4437 == GET_MODE_PRECISION (GET_MODE (x
)))
4443 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4444 known_x
, known_mode
, known_ret
)
4445 & GET_MODE_MASK (mode
));
4449 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4450 known_x
, known_mode
, known_ret
);
4451 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4452 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4456 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4457 Otherwise, show all the bits in the outer mode but not the inner
4459 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4460 known_x
, known_mode
, known_ret
);
4461 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4463 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4464 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4465 inner_nz
|= (GET_MODE_MASK (mode
)
4466 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4469 nonzero
&= inner_nz
;
4473 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4474 known_x
, known_mode
, known_ret
)
4475 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4476 known_x
, known_mode
, known_ret
);
4480 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4482 unsigned HOST_WIDE_INT nonzero0
4483 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4484 known_x
, known_mode
, known_ret
);
4486 /* Don't call nonzero_bits for the second time if it cannot change
4488 if ((nonzero
& nonzero0
) != nonzero
)
4490 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4491 known_x
, known_mode
, known_ret
);
4495 case PLUS
: case MINUS
:
4497 case DIV
: case UDIV
:
4498 case MOD
: case UMOD
:
4499 /* We can apply the rules of arithmetic to compute the number of
4500 high- and low-order zero bits of these operations. We start by
4501 computing the width (position of the highest-order nonzero bit)
4502 and the number of low-order zero bits for each value. */
4504 unsigned HOST_WIDE_INT nz0
4505 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4506 known_x
, known_mode
, known_ret
);
4507 unsigned HOST_WIDE_INT nz1
4508 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4509 known_x
, known_mode
, known_ret
);
4510 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4511 int width0
= floor_log2 (nz0
) + 1;
4512 int width1
= floor_log2 (nz1
) + 1;
4513 int low0
= floor_log2 (nz0
& -nz0
);
4514 int low1
= floor_log2 (nz1
& -nz1
);
4515 unsigned HOST_WIDE_INT op0_maybe_minusp
4516 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4517 unsigned HOST_WIDE_INT op1_maybe_minusp
4518 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4519 unsigned int result_width
= mode_width
;
4525 result_width
= MAX (width0
, width1
) + 1;
4526 result_low
= MIN (low0
, low1
);
4529 result_low
= MIN (low0
, low1
);
4532 result_width
= width0
+ width1
;
4533 result_low
= low0
+ low1
;
4538 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4539 result_width
= width0
;
4544 result_width
= width0
;
4549 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4550 result_width
= MIN (width0
, width1
);
4551 result_low
= MIN (low0
, low1
);
4556 result_width
= MIN (width0
, width1
);
4557 result_low
= MIN (low0
, low1
);
4563 if (result_width
< mode_width
)
4564 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4567 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4572 if (CONST_INT_P (XEXP (x
, 1))
4573 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4574 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4578 /* If this is a SUBREG formed for a promoted variable that has
4579 been zero-extended, we know that at least the high-order bits
4580 are zero, though others might be too. */
4582 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4583 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4584 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4585 known_x
, known_mode
, known_ret
);
4587 inner_mode
= GET_MODE (SUBREG_REG (x
));
4588 /* If the inner mode is a single word for both the host and target
4589 machines, we can compute this from which bits of the inner
4590 object might be nonzero. */
4591 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4592 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4594 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4595 known_x
, known_mode
, known_ret
);
4597 #ifdef LOAD_EXTEND_OP
4598 /* If this is a typical RISC machine, we only have to worry
4599 about the way loads are extended. */
4600 if (WORD_REGISTER_OPERATIONS
4601 && ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4602 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4603 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4604 || !MEM_P (SUBREG_REG (x
))))
4607 /* On many CISC machines, accessing an object in a wider mode
4608 causes the high-order bits to become undefined. So they are
4609 not known to be zero. */
4610 if (GET_MODE_PRECISION (GET_MODE (x
))
4611 > GET_MODE_PRECISION (inner_mode
))
4612 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4613 & ~GET_MODE_MASK (inner_mode
));
4622 /* The nonzero bits are in two classes: any bits within MODE
4623 that aren't in GET_MODE (x) are always significant. The rest of the
4624 nonzero bits are those that are significant in the operand of
4625 the shift when shifted the appropriate number of bits. This
4626 shows that high-order bits are cleared by the right shift and
4627 low-order bits by left shifts. */
4628 if (CONST_INT_P (XEXP (x
, 1))
4629 && INTVAL (XEXP (x
, 1)) >= 0
4630 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4631 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4633 machine_mode inner_mode
= GET_MODE (x
);
4634 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4635 int count
= INTVAL (XEXP (x
, 1));
4636 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4637 unsigned HOST_WIDE_INT op_nonzero
4638 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4639 known_x
, known_mode
, known_ret
);
4640 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4641 unsigned HOST_WIDE_INT outer
= 0;
4643 if (mode_width
> width
)
4644 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4646 if (code
== LSHIFTRT
)
4648 else if (code
== ASHIFTRT
)
4652 /* If the sign bit may have been nonzero before the shift, we
4653 need to mark all the places it could have been copied to
4654 by the shift as possibly nonzero. */
4655 if (inner
& (HOST_WIDE_INT_1U
<< (width
- 1 - count
)))
4656 inner
|= ((HOST_WIDE_INT_1U
<< count
) - 1)
4659 else if (code
== ASHIFT
)
4662 inner
= ((inner
<< (count
% width
)
4663 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4665 nonzero
&= (outer
| inner
);
4671 /* This is at most the number of bits in the mode. */
4672 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4676 /* If CLZ has a known value at zero, then the nonzero bits are
4677 that value, plus the number of bits in the mode minus one. */
4678 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4680 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4686 /* If CTZ has a known value at zero, then the nonzero bits are
4687 that value, plus the number of bits in the mode minus one. */
4688 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4690 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4696 /* This is at most the number of bits in the mode minus 1. */
4697 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4706 unsigned HOST_WIDE_INT nonzero_true
4707 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4708 known_x
, known_mode
, known_ret
);
4710 /* Don't call nonzero_bits for the second time if it cannot change
4712 if ((nonzero
& nonzero_true
) != nonzero
)
4713 nonzero
&= nonzero_true
4714 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4715 known_x
, known_mode
, known_ret
);
4726 /* See the macro definition above. */
4727 #undef cached_num_sign_bit_copies
4730 /* Return true if num_sign_bit_copies1 might recurse into both operands
4734 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4736 if (!ARITHMETIC_P (x
))
4738 switch (GET_CODE (x
))
4756 /* The function cached_num_sign_bit_copies is a wrapper around
4757 num_sign_bit_copies1. It avoids exponential behavior in
4758 num_sign_bit_copies1 when X has identical subexpressions on the
4759 first or the second level. */
4762 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4763 machine_mode known_mode
,
4764 unsigned int known_ret
)
4766 if (x
== known_x
&& mode
== known_mode
)
4769 /* Try to find identical subexpressions. If found call
4770 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4771 the precomputed value for the subexpression as KNOWN_RET. */
4773 if (num_sign_bit_copies_binary_arith_p (x
))
4775 rtx x0
= XEXP (x
, 0);
4776 rtx x1
= XEXP (x
, 1);
4778 /* Check the first level. */
4781 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4782 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4786 /* Check the second level. */
4787 if (num_sign_bit_copies_binary_arith_p (x0
)
4788 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4790 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4791 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4795 if (num_sign_bit_copies_binary_arith_p (x1
)
4796 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4798 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4799 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4804 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4807 /* Return the number of bits at the high-order end of X that are known to
4808 be equal to the sign bit. X will be used in mode MODE; if MODE is
4809 VOIDmode, X will be used in its own mode. The returned value will always
4810 be between 1 and the number of bits in MODE. */
4813 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4814 machine_mode known_mode
,
4815 unsigned int known_ret
)
4817 enum rtx_code code
= GET_CODE (x
);
4818 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4819 int num0
, num1
, result
;
4820 unsigned HOST_WIDE_INT nonzero
;
4822 /* If we weren't given a mode, use the mode of X. If the mode is still
4823 VOIDmode, we don't know anything. Likewise if one of the modes is
4826 if (mode
== VOIDmode
)
4827 mode
= GET_MODE (x
);
4829 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4830 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4833 /* For a smaller object, just ignore the high bits. */
4834 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4836 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4837 known_x
, known_mode
, known_ret
);
4839 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4842 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4844 /* If this machine does not do all register operations on the entire
4845 register and MODE is wider than the mode of X, we can say nothing
4846 at all about the high-order bits. */
4847 if (!WORD_REGISTER_OPERATIONS
)
4850 /* Likewise on machines that do, if the mode of the object is smaller
4851 than a word and loads of that size don't sign extend, we can say
4852 nothing about the high order bits. */
4853 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4854 #ifdef LOAD_EXTEND_OP
4855 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4861 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4862 the code in the switch below. */
4867 #if defined(POINTERS_EXTEND_UNSIGNED)
4868 /* If pointers extend signed and this is a pointer in Pmode, say that
4869 all the bits above ptr_mode are known to be sign bit copies. */
4870 /* As we do not know which address space the pointer is referring to,
4871 we can do this only if the target does not support different pointer
4872 or address modes depending on the address space. */
4873 if (target_default_pointer_address_modes_p ()
4874 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4875 && mode
== Pmode
&& REG_POINTER (x
)
4876 && !targetm
.have_ptr_extend ())
4877 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4881 unsigned int copies_for_hook
= 1, copies
= 1;
4882 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4883 known_mode
, known_ret
,
4887 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4888 known_mode
, known_ret
);
4890 if (copies
> 1 || copies_for_hook
> 1)
4891 return MAX (copies
, copies_for_hook
);
4893 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4898 #ifdef LOAD_EXTEND_OP
4899 /* Some RISC machines sign-extend all loads of smaller than a word. */
4900 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4901 return MAX (1, ((int) bitwidth
4902 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4907 /* If the constant is negative, take its 1's complement and remask.
4908 Then see how many zero bits we have. */
4909 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4910 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4911 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4912 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4914 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4917 /* If this is a SUBREG for a promoted object that is sign-extended
4918 and we are looking at it in a wider mode, we know that at least the
4919 high-order bits are known to be sign bit copies. */
4921 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4923 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4924 known_x
, known_mode
, known_ret
);
4925 return MAX ((int) bitwidth
4926 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4930 /* For a smaller object, just ignore the high bits. */
4931 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4933 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4934 known_x
, known_mode
, known_ret
);
4935 return MAX (1, (num0
4936 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4940 #ifdef LOAD_EXTEND_OP
4941 /* For paradoxical SUBREGs on machines where all register operations
4942 affect the entire register, just look inside. Note that we are
4943 passing MODE to the recursive call, so the number of sign bit copies
4944 will remain relative to that mode, not the inner mode. */
4946 /* This works only if loads sign extend. Otherwise, if we get a
4947 reload for the inner part, it may be loaded from the stack, and
4948 then we lose all sign bit copies that existed before the store
4951 if (WORD_REGISTER_OPERATIONS
4952 && paradoxical_subreg_p (x
)
4953 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4954 && MEM_P (SUBREG_REG (x
)))
4955 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4956 known_x
, known_mode
, known_ret
);
4961 if (CONST_INT_P (XEXP (x
, 1)))
4962 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4966 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4967 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4968 known_x
, known_mode
, known_ret
));
4971 /* For a smaller object, just ignore the high bits. */
4972 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4973 known_x
, known_mode
, known_ret
);
4974 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4978 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4979 known_x
, known_mode
, known_ret
);
4981 case ROTATE
: case ROTATERT
:
4982 /* If we are rotating left by a number of bits less than the number
4983 of sign bit copies, we can just subtract that amount from the
4985 if (CONST_INT_P (XEXP (x
, 1))
4986 && INTVAL (XEXP (x
, 1)) >= 0
4987 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4989 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4990 known_x
, known_mode
, known_ret
);
4991 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4992 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4997 /* In general, this subtracts one sign bit copy. But if the value
4998 is known to be positive, the number of sign bit copies is the
4999 same as that of the input. Finally, if the input has just one bit
5000 that might be nonzero, all the bits are copies of the sign bit. */
5001 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5002 known_x
, known_mode
, known_ret
);
5003 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5004 return num0
> 1 ? num0
- 1 : 1;
5006 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5011 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5016 case IOR
: case AND
: case XOR
:
5017 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5018 /* Logical operations will preserve the number of sign-bit copies.
5019 MIN and MAX operations always return one of the operands. */
5020 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5021 known_x
, known_mode
, known_ret
);
5022 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5023 known_x
, known_mode
, known_ret
);
5025 /* If num1 is clearing some of the top bits then regardless of
5026 the other term, we are guaranteed to have at least that many
5027 high-order zero bits. */
5030 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5031 && CONST_INT_P (XEXP (x
, 1))
5032 && (UINTVAL (XEXP (x
, 1))
5033 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5036 /* Similarly for IOR when setting high-order bits. */
5039 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5040 && CONST_INT_P (XEXP (x
, 1))
5041 && (UINTVAL (XEXP (x
, 1))
5042 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5045 return MIN (num0
, num1
);
5047 case PLUS
: case MINUS
:
5048 /* For addition and subtraction, we can have a 1-bit carry. However,
5049 if we are subtracting 1 from a positive number, there will not
5050 be such a carry. Furthermore, if the positive number is known to
5051 be 0 or 1, we know the result is either -1 or 0. */
5053 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5054 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5056 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5057 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5058 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5059 : bitwidth
- floor_log2 (nonzero
) - 1);
5062 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5063 known_x
, known_mode
, known_ret
);
5064 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5065 known_x
, known_mode
, known_ret
);
5066 result
= MAX (1, MIN (num0
, num1
) - 1);
5071 /* The number of bits of the product is the sum of the number of
5072 bits of both terms. However, unless one of the terms if known
5073 to be positive, we must allow for an additional bit since negating
5074 a negative number can remove one sign bit copy. */
5076 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5077 known_x
, known_mode
, known_ret
);
5078 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5079 known_x
, known_mode
, known_ret
);
5081 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5083 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5084 || (((nonzero_bits (XEXP (x
, 0), mode
)
5085 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5086 && ((nonzero_bits (XEXP (x
, 1), mode
)
5087 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5091 return MAX (1, result
);
5094 /* The result must be <= the first operand. If the first operand
5095 has the high bit set, we know nothing about the number of sign
5097 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5099 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5100 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5103 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5104 known_x
, known_mode
, known_ret
);
5107 /* The result must be <= the second operand. If the second operand
5108 has (or just might have) the high bit set, we know nothing about
5109 the number of sign bit copies. */
5110 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5112 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5113 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5116 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5117 known_x
, known_mode
, known_ret
);
5120 /* Similar to unsigned division, except that we have to worry about
5121 the case where the divisor is negative, in which case we have
5123 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5124 known_x
, known_mode
, known_ret
);
5126 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5127 || (nonzero_bits (XEXP (x
, 1), mode
)
5128 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5134 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5135 known_x
, known_mode
, known_ret
);
5137 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5138 || (nonzero_bits (XEXP (x
, 1), mode
)
5139 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5145 /* Shifts by a constant add to the number of bits equal to the
5147 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5148 known_x
, known_mode
, known_ret
);
5149 if (CONST_INT_P (XEXP (x
, 1))
5150 && INTVAL (XEXP (x
, 1)) > 0
5151 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
5152 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5157 /* Left shifts destroy copies. */
5158 if (!CONST_INT_P (XEXP (x
, 1))
5159 || INTVAL (XEXP (x
, 1)) < 0
5160 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5161 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
5164 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5165 known_x
, known_mode
, known_ret
);
5166 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5169 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5170 known_x
, known_mode
, known_ret
);
5171 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5172 known_x
, known_mode
, known_ret
);
5173 return MIN (num0
, num1
);
5175 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5176 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5177 case GEU
: case GTU
: case LEU
: case LTU
:
5178 case UNORDERED
: case ORDERED
:
5179 /* If the constant is negative, take its 1's complement and remask.
5180 Then see how many zero bits we have. */
5181 nonzero
= STORE_FLAG_VALUE
;
5182 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5183 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5184 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5186 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5192 /* If we haven't been able to figure it out by one of the above rules,
5193 see if some of the high-order bits are known to be zero. If so,
5194 count those bits and return one less than that amount. If we can't
5195 safely compute the mask for this mode, always return BITWIDTH. */
5197 bitwidth
= GET_MODE_PRECISION (mode
);
5198 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5201 nonzero
= nonzero_bits (x
, mode
);
5202 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5203 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5206 /* Calculate the rtx_cost of a single instruction. A return value of
5207 zero indicates an instruction pattern without a known cost. */
5210 insn_rtx_cost (rtx pat
, bool speed
)
5215 /* Extract the single set rtx from the instruction pattern.
5216 We can't use single_set since we only have the pattern. */
5217 if (GET_CODE (pat
) == SET
)
5219 else if (GET_CODE (pat
) == PARALLEL
)
5222 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5224 rtx x
= XVECEXP (pat
, 0, i
);
5225 if (GET_CODE (x
) == SET
)
5238 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5239 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5242 /* Returns estimate on cost of computing SEQ. */
5245 seq_cost (const rtx_insn
*seq
, bool speed
)
5250 for (; seq
; seq
= NEXT_INSN (seq
))
5252 set
= single_set (seq
);
5254 cost
+= set_rtx_cost (set
, speed
);
5262 /* Given an insn INSN and condition COND, return the condition in a
5263 canonical form to simplify testing by callers. Specifically:
5265 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5266 (2) Both operands will be machine operands; (cc0) will have been replaced.
5267 (3) If an operand is a constant, it will be the second operand.
5268 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5269 for GE, GEU, and LEU.
5271 If the condition cannot be understood, or is an inequality floating-point
5272 comparison which needs to be reversed, 0 will be returned.
5274 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5276 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5277 insn used in locating the condition was found. If a replacement test
5278 of the condition is desired, it should be placed in front of that
5279 insn and we will be sure that the inputs are still valid.
5281 If WANT_REG is nonzero, we wish the condition to be relative to that
5282 register, if possible. Therefore, do not canonicalize the condition
5283 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5284 to be a compare to a CC mode register.
5286 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5290 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5291 rtx_insn
**earliest
,
5292 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5295 rtx_insn
*prev
= insn
;
5299 int reverse_code
= 0;
5301 basic_block bb
= BLOCK_FOR_INSN (insn
);
5303 code
= GET_CODE (cond
);
5304 mode
= GET_MODE (cond
);
5305 op0
= XEXP (cond
, 0);
5306 op1
= XEXP (cond
, 1);
5309 code
= reversed_comparison_code (cond
, insn
);
5310 if (code
== UNKNOWN
)
5316 /* If we are comparing a register with zero, see if the register is set
5317 in the previous insn to a COMPARE or a comparison operation. Perform
5318 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5321 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5322 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5323 && op1
== CONST0_RTX (GET_MODE (op0
))
5326 /* Set nonzero when we find something of interest. */
5329 /* If comparison with cc0, import actual comparison from compare
5333 if ((prev
= prev_nonnote_insn (prev
)) == 0
5334 || !NONJUMP_INSN_P (prev
)
5335 || (set
= single_set (prev
)) == 0
5336 || SET_DEST (set
) != cc0_rtx
)
5339 op0
= SET_SRC (set
);
5340 op1
= CONST0_RTX (GET_MODE (op0
));
5345 /* If this is a COMPARE, pick up the two things being compared. */
5346 if (GET_CODE (op0
) == COMPARE
)
5348 op1
= XEXP (op0
, 1);
5349 op0
= XEXP (op0
, 0);
5352 else if (!REG_P (op0
))
5355 /* Go back to the previous insn. Stop if it is not an INSN. We also
5356 stop if it isn't a single set or if it has a REG_INC note because
5357 we don't want to bother dealing with it. */
5359 prev
= prev_nonnote_nondebug_insn (prev
);
5362 || !NONJUMP_INSN_P (prev
)
5363 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5364 /* In cfglayout mode, there do not have to be labels at the
5365 beginning of a block, or jumps at the end, so the previous
5366 conditions would not stop us when we reach bb boundary. */
5367 || BLOCK_FOR_INSN (prev
) != bb
)
5370 set
= set_of (op0
, prev
);
5373 && (GET_CODE (set
) != SET
5374 || !rtx_equal_p (SET_DEST (set
), op0
)))
5377 /* If this is setting OP0, get what it sets it to if it looks
5381 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5382 #ifdef FLOAT_STORE_FLAG_VALUE
5383 REAL_VALUE_TYPE fsfv
;
5386 /* ??? We may not combine comparisons done in a CCmode with
5387 comparisons not done in a CCmode. This is to aid targets
5388 like Alpha that have an IEEE compliant EQ instruction, and
5389 a non-IEEE compliant BEQ instruction. The use of CCmode is
5390 actually artificial, simply to prevent the combination, but
5391 should not affect other platforms.
5393 However, we must allow VOIDmode comparisons to match either
5394 CCmode or non-CCmode comparison, because some ports have
5395 modeless comparisons inside branch patterns.
5397 ??? This mode check should perhaps look more like the mode check
5398 in simplify_comparison in combine. */
5399 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5400 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5402 && inner_mode
!= VOIDmode
)
5404 if (GET_CODE (SET_SRC (set
)) == COMPARE
5407 && val_signbit_known_set_p (inner_mode
,
5409 #ifdef FLOAT_STORE_FLAG_VALUE
5411 && SCALAR_FLOAT_MODE_P (inner_mode
)
5412 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5413 REAL_VALUE_NEGATIVE (fsfv
)))
5416 && COMPARISON_P (SET_SRC (set
))))
5418 else if (((code
== EQ
5420 && val_signbit_known_set_p (inner_mode
,
5422 #ifdef FLOAT_STORE_FLAG_VALUE
5424 && SCALAR_FLOAT_MODE_P (inner_mode
)
5425 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5426 REAL_VALUE_NEGATIVE (fsfv
)))
5429 && COMPARISON_P (SET_SRC (set
)))
5434 else if ((code
== EQ
|| code
== NE
)
5435 && GET_CODE (SET_SRC (set
)) == XOR
)
5436 /* Handle sequences like:
5439 ...(eq|ne op0 (const_int 0))...
5443 (eq op0 (const_int 0)) reduces to (eq X Y)
5444 (ne op0 (const_int 0)) reduces to (ne X Y)
5446 This is the form used by MIPS16, for example. */
5452 else if (reg_set_p (op0
, prev
))
5453 /* If this sets OP0, but not directly, we have to give up. */
5458 /* If the caller is expecting the condition to be valid at INSN,
5459 make sure X doesn't change before INSN. */
5460 if (valid_at_insn_p
)
5461 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5463 if (COMPARISON_P (x
))
5464 code
= GET_CODE (x
);
5467 code
= reversed_comparison_code (x
, prev
);
5468 if (code
== UNKNOWN
)
5473 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5479 /* If constant is first, put it last. */
5480 if (CONSTANT_P (op0
))
5481 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5483 /* If OP0 is the result of a comparison, we weren't able to find what
5484 was really being compared, so fail. */
5486 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5489 /* Canonicalize any ordered comparison with integers involving equality
5490 if we can do computations in the relevant mode and we do not
5493 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5494 && CONST_INT_P (op1
)
5495 && GET_MODE (op0
) != VOIDmode
5496 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5498 HOST_WIDE_INT const_val
= INTVAL (op1
);
5499 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5500 unsigned HOST_WIDE_INT max_val
5501 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5506 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5507 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5510 /* When cross-compiling, const_val might be sign-extended from
5511 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5513 if ((const_val
& max_val
)
5514 != (HOST_WIDE_INT_1U
5515 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5516 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5520 if (uconst_val
< max_val
)
5521 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5525 if (uconst_val
!= 0)
5526 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5534 /* Never return CC0; return zero instead. */
5538 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5541 /* Given a jump insn JUMP, return the condition that will cause it to branch
5542 to its JUMP_LABEL. If the condition cannot be understood, or is an
5543 inequality floating-point comparison which needs to be reversed, 0 will
5546 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5547 insn used in locating the condition was found. If a replacement test
5548 of the condition is desired, it should be placed in front of that
5549 insn and we will be sure that the inputs are still valid. If EARLIEST
5550 is null, the returned condition will be valid at INSN.
5552 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5553 compare CC mode register.
5555 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5558 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5559 int valid_at_insn_p
)
5565 /* If this is not a standard conditional jump, we can't parse it. */
5567 || ! any_condjump_p (jump
))
5569 set
= pc_set (jump
);
5571 cond
= XEXP (SET_SRC (set
), 0);
5573 /* If this branches to JUMP_LABEL when the condition is false, reverse
5576 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5577 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5579 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5580 allow_cc_mode
, valid_at_insn_p
);
5583 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5584 TARGET_MODE_REP_EXTENDED.
5586 Note that we assume that the property of
5587 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5588 narrower than mode B. I.e., if A is a mode narrower than B then in
5589 order to be able to operate on it in mode B, mode A needs to
5590 satisfy the requirements set by the representation of mode B. */
5593 init_num_sign_bit_copies_in_rep (void)
5595 machine_mode mode
, in_mode
;
5597 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5598 in_mode
= GET_MODE_WIDER_MODE (mode
))
5599 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5600 mode
= GET_MODE_WIDER_MODE (mode
))
5604 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5605 extends to the next widest mode. */
5606 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5607 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5609 /* We are in in_mode. Count how many bits outside of mode
5610 have to be copies of the sign-bit. */
5611 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5613 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5615 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5616 /* We can only check sign-bit copies starting from the
5617 top-bit. In order to be able to check the bits we
5618 have already seen we pretend that subsequent bits
5619 have to be sign-bit copies too. */
5620 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5621 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5622 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5627 /* Suppose that truncation from the machine mode of X to MODE is not a
5628 no-op. See if there is anything special about X so that we can
5629 assume it already contains a truncated value of MODE. */
5632 truncated_to_mode (machine_mode mode
, const_rtx x
)
5634 /* This register has already been used in MODE without explicit
5636 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5639 /* See if we already satisfy the requirements of MODE. If yes we
5640 can just switch to MODE. */
5641 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5642 && (num_sign_bit_copies (x
, GET_MODE (x
))
5643 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5649 /* Return true if RTX code CODE has a single sequence of zero or more
5650 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5651 entry in that case. */
5654 setup_reg_subrtx_bounds (unsigned int code
)
5656 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5658 for (; format
[i
] != 'e'; ++i
)
5661 /* No subrtxes. Leave start and count as 0. */
5663 if (format
[i
] == 'E' || format
[i
] == 'V')
5667 /* Record the sequence of 'e's. */
5668 rtx_all_subrtx_bounds
[code
].start
= i
;
5671 while (format
[i
] == 'e');
5672 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5673 /* rtl-iter.h relies on this. */
5674 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5676 for (; format
[i
]; ++i
)
5677 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5683 /* Initialize rtx_all_subrtx_bounds. */
5688 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5690 if (!setup_reg_subrtx_bounds (i
))
5691 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5692 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5693 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5696 init_num_sign_bit_copies_in_rep ();
5699 /* Check whether this is a constant pool constant. */
5701 constant_pool_constant_p (rtx x
)
5703 x
= avoid_constant_pool_reference (x
);
5704 return CONST_DOUBLE_P (x
);
5707 /* If M is a bitmask that selects a field of low-order bits within an item but
5708 not the entire word, return the length of the field. Return -1 otherwise.
5709 M is used in machine mode MODE. */
5712 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5714 if (mode
!= VOIDmode
)
5716 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5718 m
&= GET_MODE_MASK (mode
);
5721 return exact_log2 (m
+ 1);
5724 /* Return the mode of MEM's address. */
5727 get_address_mode (rtx mem
)
5731 gcc_assert (MEM_P (mem
));
5732 mode
= GET_MODE (XEXP (mem
, 0));
5733 if (mode
!= VOIDmode
)
5735 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5738 /* Split up a CONST_DOUBLE or integer constant rtx
5739 into two rtx's for single words,
5740 storing in *FIRST the word that comes first in memory in the target
5741 and in *SECOND the other.
5743 TODO: This function needs to be rewritten to work on any size
5747 split_double (rtx value
, rtx
*first
, rtx
*second
)
5749 if (CONST_INT_P (value
))
5751 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5753 /* In this case the CONST_INT holds both target words.
5754 Extract the bits from it into two word-sized pieces.
5755 Sign extend each half to HOST_WIDE_INT. */
5756 unsigned HOST_WIDE_INT low
, high
;
5757 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5758 unsigned bits_per_word
= BITS_PER_WORD
;
5760 /* Set sign_bit to the most significant bit of a word. */
5762 sign_bit
<<= bits_per_word
- 1;
5764 /* Set mask so that all bits of the word are set. We could
5765 have used 1 << BITS_PER_WORD instead of basing the
5766 calculation on sign_bit. However, on machines where
5767 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5768 compiler warning, even though the code would never be
5770 mask
= sign_bit
<< 1;
5773 /* Set sign_extend as any remaining bits. */
5774 sign_extend
= ~mask
;
5776 /* Pick the lower word and sign-extend it. */
5777 low
= INTVAL (value
);
5782 /* Pick the higher word, shifted to the least significant
5783 bits, and sign-extend it. */
5784 high
= INTVAL (value
);
5785 high
>>= bits_per_word
- 1;
5788 if (high
& sign_bit
)
5789 high
|= sign_extend
;
5791 /* Store the words in the target machine order. */
5792 if (WORDS_BIG_ENDIAN
)
5794 *first
= GEN_INT (high
);
5795 *second
= GEN_INT (low
);
5799 *first
= GEN_INT (low
);
5800 *second
= GEN_INT (high
);
5805 /* The rule for using CONST_INT for a wider mode
5806 is that we regard the value as signed.
5807 So sign-extend it. */
5808 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5809 if (WORDS_BIG_ENDIAN
)
5821 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5823 /* All of this is scary code and needs to be converted to
5824 properly work with any size integer. */
5825 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5826 if (WORDS_BIG_ENDIAN
)
5828 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5829 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5833 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5834 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5837 else if (!CONST_DOUBLE_P (value
))
5839 if (WORDS_BIG_ENDIAN
)
5841 *first
= const0_rtx
;
5847 *second
= const0_rtx
;
5850 else if (GET_MODE (value
) == VOIDmode
5851 /* This is the old way we did CONST_DOUBLE integers. */
5852 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5854 /* In an integer, the words are defined as most and least significant.
5855 So order them by the target's convention. */
5856 if (WORDS_BIG_ENDIAN
)
5858 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5859 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5863 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5864 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5871 /* Note, this converts the REAL_VALUE_TYPE to the target's
5872 format, splits up the floating point double and outputs
5873 exactly 32 bits of it into each of l[0] and l[1] --
5874 not necessarily BITS_PER_WORD bits. */
5875 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
5877 /* If 32 bits is an entire word for the target, but not for the host,
5878 then sign-extend on the host so that the number will look the same
5879 way on the host that it would on the target. See for instance
5880 simplify_unary_operation. The #if is needed to avoid compiler
5883 #if HOST_BITS_PER_LONG > 32
5884 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5886 if (l
[0] & ((long) 1 << 31))
5887 l
[0] |= ((unsigned long) (-1) << 32);
5888 if (l
[1] & ((long) 1 << 31))
5889 l
[1] |= ((unsigned long) (-1) << 32);
5893 *first
= GEN_INT (l
[0]);
5894 *second
= GEN_INT (l
[1]);
5898 /* Return true if X is a sign_extract or zero_extract from the least
5902 lsb_bitfield_op_p (rtx x
)
5904 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5906 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5907 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5908 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5910 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5915 /* Strip outer address "mutations" from LOC and return a pointer to the
5916 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5917 stripped expression there.
5919 "Mutations" either convert between modes or apply some kind of
5920 extension, truncation or alignment. */
5923 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5927 enum rtx_code code
= GET_CODE (*loc
);
5928 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5929 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5930 used to convert between pointer sizes. */
5931 loc
= &XEXP (*loc
, 0);
5932 else if (lsb_bitfield_op_p (*loc
))
5933 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5934 acts as a combined truncation and extension. */
5935 loc
= &XEXP (*loc
, 0);
5936 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5937 /* (and ... (const_int -X)) is used to align to X bytes. */
5938 loc
= &XEXP (*loc
, 0);
5939 else if (code
== SUBREG
5940 && !OBJECT_P (SUBREG_REG (*loc
))
5941 && subreg_lowpart_p (*loc
))
5942 /* (subreg (operator ...) ...) inside and is used for mode
5944 loc
= &SUBREG_REG (*loc
);
5952 /* Return true if CODE applies some kind of scale. The scaled value is
5953 is the first operand and the scale is the second. */
5956 binary_scale_code_p (enum rtx_code code
)
5958 return (code
== MULT
5960 /* Needed by ARM targets. */
5964 || code
== ROTATERT
);
5967 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5968 (see address_info). Return null otherwise. */
5971 get_base_term (rtx
*inner
)
5973 if (GET_CODE (*inner
) == LO_SUM
)
5974 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5977 || GET_CODE (*inner
) == SUBREG
5978 || GET_CODE (*inner
) == SCRATCH
)
5983 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5984 (see address_info). Return null otherwise. */
5987 get_index_term (rtx
*inner
)
5989 /* At present, only constant scales are allowed. */
5990 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5991 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5994 || GET_CODE (*inner
) == SUBREG
5995 || GET_CODE (*inner
) == SCRATCH
)
6000 /* Set the segment part of address INFO to LOC, given that INNER is the
6004 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6006 gcc_assert (!info
->segment
);
6007 info
->segment
= loc
;
6008 info
->segment_term
= inner
;
6011 /* Set the base part of address INFO to LOC, given that INNER is the
6015 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6017 gcc_assert (!info
->base
);
6019 info
->base_term
= inner
;
6022 /* Set the index part of address INFO to LOC, given that INNER is the
6026 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6028 gcc_assert (!info
->index
);
6030 info
->index_term
= inner
;
6033 /* Set the displacement part of address INFO to LOC, given that INNER
6034 is the constant term. */
6037 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6039 gcc_assert (!info
->disp
);
6041 info
->disp_term
= inner
;
6044 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6045 rest of INFO accordingly. */
6048 decompose_incdec_address (struct address_info
*info
)
6050 info
->autoinc_p
= true;
6052 rtx
*base
= &XEXP (*info
->inner
, 0);
6053 set_address_base (info
, base
, base
);
6054 gcc_checking_assert (info
->base
== info
->base_term
);
6056 /* These addresses are only valid when the size of the addressed
6058 gcc_checking_assert (info
->mode
!= VOIDmode
);
6061 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6062 of INFO accordingly. */
6065 decompose_automod_address (struct address_info
*info
)
6067 info
->autoinc_p
= true;
6069 rtx
*base
= &XEXP (*info
->inner
, 0);
6070 set_address_base (info
, base
, base
);
6071 gcc_checking_assert (info
->base
== info
->base_term
);
6073 rtx plus
= XEXP (*info
->inner
, 1);
6074 gcc_assert (GET_CODE (plus
) == PLUS
);
6076 info
->base_term2
= &XEXP (plus
, 0);
6077 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6079 rtx
*step
= &XEXP (plus
, 1);
6080 rtx
*inner_step
= strip_address_mutations (step
);
6081 if (CONSTANT_P (*inner_step
))
6082 set_address_disp (info
, step
, inner_step
);
6084 set_address_index (info
, step
, inner_step
);
6087 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6088 values in [PTR, END). Return a pointer to the end of the used array. */
6091 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6094 if (GET_CODE (x
) == PLUS
)
6096 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6097 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6101 gcc_assert (ptr
!= end
);
6107 /* Evaluate the likelihood of X being a base or index value, returning
6108 positive if it is likely to be a base, negative if it is likely to be
6109 an index, and 0 if we can't tell. Make the magnitude of the return
6110 value reflect the amount of confidence we have in the answer.
6112 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6115 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6116 enum rtx_code outer_code
, enum rtx_code index_code
)
6118 /* Believe *_POINTER unless the address shape requires otherwise. */
6119 if (REG_P (x
) && REG_POINTER (x
))
6121 if (MEM_P (x
) && MEM_POINTER (x
))
6124 if (REG_P (x
) && HARD_REGISTER_P (x
))
6126 /* X is a hard register. If it only fits one of the base
6127 or index classes, choose that interpretation. */
6128 int regno
= REGNO (x
);
6129 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6130 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6131 if (base_p
!= index_p
)
6132 return base_p
? 1 : -1;
6137 /* INFO->INNER describes a normal, non-automodified address.
6138 Fill in the rest of INFO accordingly. */
6141 decompose_normal_address (struct address_info
*info
)
6143 /* Treat the address as the sum of up to four values. */
6145 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6146 ops
+ ARRAY_SIZE (ops
)) - ops
;
6148 /* If there is more than one component, any base component is in a PLUS. */
6150 info
->base_outer_code
= PLUS
;
6152 /* Try to classify each sum operand now. Leave those that could be
6153 either a base or an index in OPS. */
6156 for (size_t in
= 0; in
< n_ops
; ++in
)
6159 rtx
*inner
= strip_address_mutations (loc
);
6160 if (CONSTANT_P (*inner
))
6161 set_address_disp (info
, loc
, inner
);
6162 else if (GET_CODE (*inner
) == UNSPEC
)
6163 set_address_segment (info
, loc
, inner
);
6166 /* The only other possibilities are a base or an index. */
6167 rtx
*base_term
= get_base_term (inner
);
6168 rtx
*index_term
= get_index_term (inner
);
6169 gcc_assert (base_term
|| index_term
);
6171 set_address_index (info
, loc
, index_term
);
6172 else if (!index_term
)
6173 set_address_base (info
, loc
, base_term
);
6176 gcc_assert (base_term
== index_term
);
6178 inner_ops
[out
] = base_term
;
6184 /* Classify the remaining OPS members as bases and indexes. */
6187 /* If we haven't seen a base or an index yet, assume that this is
6188 the base. If we were confident that another term was the base
6189 or index, treat the remaining operand as the other kind. */
6191 set_address_base (info
, ops
[0], inner_ops
[0]);
6193 set_address_index (info
, ops
[0], inner_ops
[0]);
6197 /* In the event of a tie, assume the base comes first. */
6198 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6200 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6201 GET_CODE (*ops
[0])))
6203 set_address_base (info
, ops
[0], inner_ops
[0]);
6204 set_address_index (info
, ops
[1], inner_ops
[1]);
6208 set_address_base (info
, ops
[1], inner_ops
[1]);
6209 set_address_index (info
, ops
[0], inner_ops
[0]);
6213 gcc_assert (out
== 0);
6216 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6217 or VOIDmode if not known. AS is the address space associated with LOC.
6218 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6221 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6222 addr_space_t as
, enum rtx_code outer_code
)
6224 memset (info
, 0, sizeof (*info
));
6227 info
->addr_outer_code
= outer_code
;
6229 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6230 info
->base_outer_code
= outer_code
;
6231 switch (GET_CODE (*info
->inner
))
6237 decompose_incdec_address (info
);
6242 decompose_automod_address (info
);
6246 decompose_normal_address (info
);
6251 /* Describe address operand LOC in INFO. */
6254 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6256 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6259 /* Describe the address of MEM X in INFO. */
6262 decompose_mem_address (struct address_info
*info
, rtx x
)
6264 gcc_assert (MEM_P (x
));
6265 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6266 MEM_ADDR_SPACE (x
), MEM
);
6269 /* Update INFO after a change to the address it describes. */
6272 update_address (struct address_info
*info
)
6274 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6275 info
->addr_outer_code
);
6278 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6279 more complicated than that. */
6282 get_index_scale (const struct address_info
*info
)
6284 rtx index
= *info
->index
;
6285 if (GET_CODE (index
) == MULT
6286 && CONST_INT_P (XEXP (index
, 1))
6287 && info
->index_term
== &XEXP (index
, 0))
6288 return INTVAL (XEXP (index
, 1));
6290 if (GET_CODE (index
) == ASHIFT
6291 && CONST_INT_P (XEXP (index
, 1))
6292 && info
->index_term
== &XEXP (index
, 0))
6293 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6295 if (info
->index
== info
->index_term
)
6301 /* Return the "index code" of INFO, in the form required by
6305 get_index_code (const struct address_info
*info
)
6308 return GET_CODE (*info
->index
);
6311 return GET_CODE (*info
->disp
);
6316 /* Return true if RTL X contains a SYMBOL_REF. */
6319 contains_symbol_ref_p (const_rtx x
)
6321 subrtx_iterator::array_type array
;
6322 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6323 if (SYMBOL_REF_P (*iter
))
6329 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6332 contains_symbolic_reference_p (const_rtx x
)
6334 subrtx_iterator::array_type array
;
6335 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6336 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6342 /* Return true if X contains a thread-local symbol. */
6345 tls_referenced_p (const_rtx x
)
6347 if (!targetm
.have_tls
)
6350 subrtx_iterator::array_type array
;
6351 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6352 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)