1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx
, const_rtx
, void *);
41 static bool covers_regno_p (const_rtx
, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
43 static int computed_jump_p_1 (const_rtx
);
44 static void parms_set (rtx
, const_rtx
, void *);
46 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
47 const_rtx
, machine_mode
,
48 unsigned HOST_WIDE_INT
);
49 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
55 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
56 machine_mode
, unsigned int);
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
75 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
81 typename
T::value_type
*
82 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
84 size_t i
, value_type x
)
86 if (base
== array
.stack
)
93 gcc_checking_assert (i
== LOCAL_ELEMS
);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array
.heap
) <= i
)
97 vec_safe_grow (array
.heap
, i
+ 1);
98 base
= array
.heap
->address ();
99 memcpy (base
, array
.stack
, sizeof (array
.stack
));
100 base
[LOCAL_ELEMS
] = x
;
103 unsigned int length
= array
.heap
->length ();
106 gcc_checking_assert (base
== array
.heap
->address ());
112 gcc_checking_assert (i
== length
);
113 vec_safe_push (array
.heap
, x
);
114 return array
.heap
->address ();
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
121 template <typename T
>
123 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
125 size_t end
, rtx_type x
)
127 enum rtx_code code
= GET_CODE (x
);
128 const char *format
= GET_RTX_FORMAT (code
);
129 size_t orig_end
= end
;
130 if (__builtin_expect (INSN_P (x
), false))
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
135 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
136 if (format
[i
] == 'e')
138 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
139 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
142 base
= add_single_to_queue (array
, base
, end
++, subx
);
146 for (int i
= 0; format
[i
]; ++i
)
147 if (format
[i
] == 'e')
149 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
150 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
153 base
= add_single_to_queue (array
, base
, end
++, subx
);
155 else if (format
[i
] == 'E')
157 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
158 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
159 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
160 for (unsigned int j
= 0; j
< length
; j
++)
161 base
[end
++] = T::get_value (vec
[j
]);
163 for (unsigned int j
= 0; j
< length
; j
++)
164 base
= add_single_to_queue (array
, base
, end
++,
165 T::get_value (vec
[j
]));
166 if (code
== SEQUENCE
&& end
== length
)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
172 for (unsigned int j
= 0; j
< length
; j
++)
174 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
176 base
[j
] = T::get_value (PATTERN (x
));
179 return end
- orig_end
;
182 template <typename T
>
184 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
186 vec_free (array
.heap
);
189 template <typename T
>
190 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
192 template class generic_subrtx_iterator
<const_rtx_accessor
>;
193 template class generic_subrtx_iterator
<rtx_var_accessor
>;
194 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
202 rtx_unstable_p (const_rtx x
)
204 const RTX_CODE code
= GET_CODE (x
);
211 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
233 if (MEM_VOLATILE_P (x
))
242 fmt
= GET_RTX_FORMAT (code
);
243 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
246 if (rtx_unstable_p (XEXP (x
, i
)))
249 else if (fmt
[i
] == 'E')
252 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
253 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
268 rtx_varies_p (const_rtx x
, bool for_alias
)
281 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
294 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
298 if (x
== pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
312 || rtx_varies_p (XEXP (x
, 1), for_alias
);
315 if (MEM_VOLATILE_P (x
))
324 fmt
= GET_RTX_FORMAT (code
);
325 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
328 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
331 else if (fmt
[i
] == 'E')
334 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
335 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
347 get_initial_register_offset (int from
, int to
)
349 static const struct elim_table_t
353 } table
[] = ELIMINABLE_REGS
;
354 HOST_WIDE_INT offset1
, offset2
;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed
)
365 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
369 if (to
== STACK_POINTER_REGNUM
)
371 else if (from
== STACK_POINTER_REGNUM
)
377 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
378 if (table
[i
].from
== from
)
380 if (table
[i
].to
== to
)
382 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
386 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
388 if (table
[j
].to
== to
389 && table
[j
].from
== table
[i
].to
)
391 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
393 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
395 return offset1
+ offset2
;
397 if (table
[j
].from
== to
398 && table
[j
].to
== table
[i
].to
)
400 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
402 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
404 return offset1
- offset2
;
408 else if (table
[i
].to
== from
)
410 if (table
[i
].from
== to
)
412 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
416 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
418 if (table
[j
].to
== to
419 && table
[j
].from
== table
[i
].from
)
421 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
423 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
425 return - offset1
+ offset2
;
427 if (table
[j
].from
== to
428 && table
[j
].to
== table
[i
].from
)
430 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
432 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
434 return - offset1
- offset2
;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from
== ARG_POINTER_REGNUM
)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
443 else if (to
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
445 else if (from
== HARD_FRAME_POINTER_REGNUM
)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
447 else if (to
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
459 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
460 machine_mode mode
, bool unaligned_mems
)
462 enum rtx_code code
= GET_CODE (x
);
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
468 HOST_WIDE_INT actual_offset
= offset
;
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
476 actual_offset
-= STACK_POINTER_OFFSET
;
479 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
486 if (SYMBOL_REF_WEAK (x
))
488 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
491 HOST_WIDE_INT decl_size
;
496 size
= GET_MODE_SIZE (mode
);
500 /* If the size of the access or of the symbol is unknown,
502 decl
= SYMBOL_REF_DECL (x
);
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
509 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
512 else if (TREE_CODE (decl
) == STRING_CST
)
513 decl_size
= TREE_STRING_LENGTH (decl
);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
515 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
519 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
531 || x
== stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
536 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
538 HOST_WIDE_INT red_zone_size
= 0;
540 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
542 HOST_WIDE_INT low_bound
, high_bound
;
545 size
= GET_MODE_SIZE (mode
);
549 if (x
== frame_pointer_rtx
)
551 if (FRAME_GROWS_DOWNWARD
)
553 high_bound
= STARTING_FRAME_OFFSET
;
554 low_bound
= high_bound
- get_frame_size ();
558 low_bound
= STARTING_FRAME_OFFSET
;
559 high_bound
= low_bound
+ get_frame_size ();
562 else if (x
== hard_frame_pointer_rtx
)
564 HOST_WIDE_INT sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM
,
566 HARD_FRAME_POINTER_REGNUM
);
567 HOST_WIDE_INT ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM
,
569 HARD_FRAME_POINTER_REGNUM
);
571 #if STACK_GROWS_DOWNWARD
572 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
573 high_bound
= ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl
)
575 #if !ARGS_GROW_DOWNWARD
580 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
581 low_bound
= ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl
)
583 #if ARGS_GROW_DOWNWARD
589 else if (x
== stack_pointer_rtx
)
591 HOST_WIDE_INT ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM
,
593 STACK_POINTER_REGNUM
);
595 #if STACK_GROWS_DOWNWARD
596 low_bound
= - red_zone_size
- stack_boundary
;
597 high_bound
= ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl
)
599 #if !ARGS_GROW_DOWNWARD
604 high_bound
= red_zone_size
+ stack_boundary
;
605 low_bound
= ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl
)
607 #if ARGS_GROW_DOWNWARD
615 /* We assume that accesses are safe to at least the
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
621 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
622 - crtl
->args
.size
- stack_boundary
;
624 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
626 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
627 + crtl
->args
.size
+ stack_boundary
;
631 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
635 /* All of the virtual frame registers are stack references. */
636 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
637 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
642 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
643 mode
, unaligned_mems
);
646 /* An address is assumed not to trap if:
647 - it is the pic register plus a const unspec without offset. */
648 if (XEXP (x
, 0) == pic_offset_table_rtx
649 && GET_CODE (XEXP (x
, 1)) == CONST
650 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
654 /* - or it is an address that can't trap plus a constant integer. */
655 if (CONST_INT_P (XEXP (x
, 1))
656 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
657 size
, mode
, unaligned_mems
))
664 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
665 mode
, unaligned_mems
);
672 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
673 mode
, unaligned_mems
);
679 /* If it isn't one of the case above, it can cause a trap. */
683 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
686 rtx_addr_can_trap_p (const_rtx x
)
688 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
691 /* Return true if X contains a MEM subrtx. */
694 contains_mem_rtx_p (rtx x
)
696 subrtx_iterator::array_type array
;
697 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
704 /* Return true if X is an address that is known to not be zero. */
707 nonzero_address_p (const_rtx x
)
709 const enum rtx_code code
= GET_CODE (x
);
714 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
720 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
721 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
722 || x
== stack_pointer_rtx
723 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
725 /* All of the virtual frame registers are stack references. */
726 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
727 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
732 return nonzero_address_p (XEXP (x
, 0));
735 /* Handle PIC references. */
736 if (XEXP (x
, 0) == pic_offset_table_rtx
737 && CONSTANT_P (XEXP (x
, 1)))
742 /* Similar to the above; allow positive offsets. Further, since
743 auto-inc is only allowed in memories, the register must be a
745 if (CONST_INT_P (XEXP (x
, 1))
746 && INTVAL (XEXP (x
, 1)) > 0)
748 return nonzero_address_p (XEXP (x
, 0));
751 /* Similarly. Further, the offset is always positive. */
758 return nonzero_address_p (XEXP (x
, 0));
761 return nonzero_address_p (XEXP (x
, 1));
767 /* If it isn't one of the case above, might be zero. */
771 /* Return 1 if X refers to a memory location whose address
772 cannot be compared reliably with constant addresses,
773 or if X refers to a BLKmode memory object.
774 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
775 zero, we are slightly more conservative. */
778 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
789 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
791 fmt
= GET_RTX_FORMAT (code
);
792 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
795 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
798 else if (fmt
[i
] == 'E')
801 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
802 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
808 /* Return the CALL in X if there is one. */
811 get_call_rtx_from (rtx x
)
815 if (GET_CODE (x
) == PARALLEL
)
816 x
= XVECEXP (x
, 0, 0);
817 if (GET_CODE (x
) == SET
)
819 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
824 /* Return the value of the integer term in X, if one is apparent;
826 Only obvious integer terms are detected.
827 This is used in cse.c with the `related_value' field. */
830 get_integer_term (const_rtx x
)
832 if (GET_CODE (x
) == CONST
)
835 if (GET_CODE (x
) == MINUS
836 && CONST_INT_P (XEXP (x
, 1)))
837 return - INTVAL (XEXP (x
, 1));
838 if (GET_CODE (x
) == PLUS
839 && CONST_INT_P (XEXP (x
, 1)))
840 return INTVAL (XEXP (x
, 1));
844 /* If X is a constant, return the value sans apparent integer term;
846 Only obvious integer terms are detected. */
849 get_related_value (const_rtx x
)
851 if (GET_CODE (x
) != CONST
)
854 if (GET_CODE (x
) == PLUS
855 && CONST_INT_P (XEXP (x
, 1)))
857 else if (GET_CODE (x
) == MINUS
858 && CONST_INT_P (XEXP (x
, 1)))
863 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
864 to somewhere in the same object or object_block as SYMBOL. */
867 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
871 if (GET_CODE (symbol
) != SYMBOL_REF
)
879 if (CONSTANT_POOL_ADDRESS_P (symbol
)
880 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
883 decl
= SYMBOL_REF_DECL (symbol
);
884 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
888 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
889 && SYMBOL_REF_BLOCK (symbol
)
890 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
891 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
892 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
898 /* Split X into a base and a constant offset, storing them in *BASE_OUT
899 and *OFFSET_OUT respectively. */
902 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
904 if (GET_CODE (x
) == CONST
)
907 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
909 *base_out
= XEXP (x
, 0);
910 *offset_out
= XEXP (x
, 1);
915 *offset_out
= const0_rtx
;
918 /* Return the number of places FIND appears within X. If COUNT_DEST is
919 zero, we do not count occurrences inside the destination of a SET. */
922 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
926 const char *format_ptr
;
945 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
947 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
951 if (MEM_P (find
) && rtx_equal_p (x
, find
))
956 if (SET_DEST (x
) == find
&& ! count_dest
)
957 return count_occurrences (SET_SRC (x
), find
, count_dest
);
964 format_ptr
= GET_RTX_FORMAT (code
);
967 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
969 switch (*format_ptr
++)
972 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
976 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
977 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
985 /* Return TRUE if OP is a register or subreg of a register that
986 holds an unsigned quantity. Otherwise, return FALSE. */
989 unsigned_reg_p (rtx op
)
993 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
996 if (GET_CODE (op
) == SUBREG
997 && SUBREG_PROMOTED_SIGN (op
))
1004 /* Nonzero if register REG appears somewhere within IN.
1005 Also works if REG is not a register; in this case it checks
1006 for a subexpression of IN that is Lisp "equal" to REG. */
1009 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1021 if (GET_CODE (in
) == LABEL_REF
)
1022 return reg
== label_ref_label (in
);
1024 code
= GET_CODE (in
);
1028 /* Compare registers by number. */
1030 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1032 /* These codes have no constituent expressions
1040 /* These are kept unique for a given value. */
1047 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1050 fmt
= GET_RTX_FORMAT (code
);
1052 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1057 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1058 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1061 else if (fmt
[i
] == 'e'
1062 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1068 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1069 no CODE_LABEL insn. */
1072 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1077 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1083 /* Nonzero if register REG is used in an insn between
1084 FROM_INSN and TO_INSN (exclusive of those two). */
1087 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1088 const rtx_insn
*to_insn
)
1092 if (from_insn
== to_insn
)
1095 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1096 if (NONDEBUG_INSN_P (insn
)
1097 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1098 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1103 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1104 is entirely replaced by a new value and the only use is as a SET_DEST,
1105 we do not consider it a reference. */
1108 reg_referenced_p (const_rtx x
, const_rtx body
)
1112 switch (GET_CODE (body
))
1115 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1118 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1119 of a REG that occupies all of the REG, the insn references X if
1120 it is mentioned in the destination. */
1121 if (GET_CODE (SET_DEST (body
)) != CC0
1122 && GET_CODE (SET_DEST (body
)) != PC
1123 && !REG_P (SET_DEST (body
))
1124 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1125 && REG_P (SUBREG_REG (SET_DEST (body
)))
1126 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1127 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1128 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1129 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1130 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1135 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1136 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1143 return reg_overlap_mentioned_p (x
, body
);
1146 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1149 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1152 case UNSPEC_VOLATILE
:
1153 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1154 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1159 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1160 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1165 if (MEM_P (XEXP (body
, 0)))
1166 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1171 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1173 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1180 /* Nonzero if register REG is set or clobbered in an insn between
1181 FROM_INSN and TO_INSN (exclusive of those two). */
1184 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1185 const rtx_insn
*to_insn
)
1187 const rtx_insn
*insn
;
1189 if (from_insn
== to_insn
)
1192 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1193 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1198 /* Return true if REG is set or clobbered inside INSN. */
1201 reg_set_p (const_rtx reg
, const_rtx insn
)
1203 /* After delay slot handling, call and branch insns might be in a
1204 sequence. Check all the elements there. */
1205 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1207 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1208 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1214 /* We can be passed an insn or part of one. If we are passed an insn,
1215 check if a side-effect of the insn clobbers REG. */
1217 && (FIND_REG_INC_NOTE (insn
, reg
)
1220 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1221 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1222 GET_MODE (reg
), REGNO (reg
)))
1224 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1227 /* There are no REG_INC notes for SP autoinc. */
1228 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1230 subrtx_var_iterator::array_type array
;
1231 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1236 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1238 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1240 iter
.skip_subrtxes ();
1245 return set_of (reg
, insn
) != NULL_RTX
;
1248 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1249 only if none of them are modified between START and END. Return 1 if
1250 X contains a MEM; this routine does use memory aliasing. */
1253 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1255 const enum rtx_code code
= GET_CODE (x
);
1276 if (modified_between_p (XEXP (x
, 0), start
, end
))
1278 if (MEM_READONLY_P (x
))
1280 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1281 if (memory_modified_in_insn_p (x
, insn
))
1286 return reg_set_between_p (x
, start
, end
);
1292 fmt
= GET_RTX_FORMAT (code
);
1293 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1295 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1298 else if (fmt
[i
] == 'E')
1299 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1300 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1307 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1308 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1309 does use memory aliasing. */
1312 modified_in_p (const_rtx x
, const_rtx insn
)
1314 const enum rtx_code code
= GET_CODE (x
);
1331 if (modified_in_p (XEXP (x
, 0), insn
))
1333 if (MEM_READONLY_P (x
))
1335 if (memory_modified_in_insn_p (x
, insn
))
1340 return reg_set_p (x
, insn
);
1346 fmt
= GET_RTX_FORMAT (code
);
1347 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1349 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1352 else if (fmt
[i
] == 'E')
1353 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1354 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1361 /* Helper function for set_of. */
1369 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1371 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1372 if (rtx_equal_p (x
, data
->pat
)
1373 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1377 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1378 (either directly or via STRICT_LOW_PART and similar modifiers). */
1380 set_of (const_rtx pat
, const_rtx insn
)
1382 struct set_of_data data
;
1383 data
.found
= NULL_RTX
;
1385 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1389 /* Add all hard register in X to *PSET. */
1391 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1393 subrtx_iterator::array_type array
;
1394 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1396 const_rtx x
= *iter
;
1397 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1398 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1402 /* This function, called through note_stores, collects sets and
1403 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1406 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1408 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1409 if (REG_P (x
) && HARD_REGISTER_P (x
))
1410 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1413 /* Examine INSN, and compute the set of hard registers written by it.
1414 Store it in *PSET. Should only be called after reload. */
1416 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1420 CLEAR_HARD_REG_SET (*pset
);
1421 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1425 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1427 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1428 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1430 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1431 if (REG_NOTE_KIND (link
) == REG_INC
)
1432 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1435 /* Like record_hard_reg_sets, but called through note_uses. */
1437 record_hard_reg_uses (rtx
*px
, void *data
)
1439 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1442 /* Given an INSN, return a SET expression if this insn has only a single SET.
1443 It may also have CLOBBERs, USEs, or SET whose output
1444 will not be used, which we ignore. */
1447 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1450 int set_verified
= 1;
1453 if (GET_CODE (pat
) == PARALLEL
)
1455 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1457 rtx sub
= XVECEXP (pat
, 0, i
);
1458 switch (GET_CODE (sub
))
1465 /* We can consider insns having multiple sets, where all
1466 but one are dead as single set insns. In common case
1467 only single set is present in the pattern so we want
1468 to avoid checking for REG_UNUSED notes unless necessary.
1470 When we reach set first time, we just expect this is
1471 the single set we are looking for and only when more
1472 sets are found in the insn, we check them. */
1475 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1476 && !side_effects_p (set
))
1482 set
= sub
, set_verified
= 0;
1483 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1484 || side_effects_p (sub
))
1496 /* Given an INSN, return nonzero if it has more than one SET, else return
1500 multiple_sets (const_rtx insn
)
1505 /* INSN must be an insn. */
1506 if (! INSN_P (insn
))
1509 /* Only a PARALLEL can have multiple SETs. */
1510 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1512 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1513 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1515 /* If we have already found a SET, then return now. */
1523 /* Either zero or one SET. */
1527 /* Return nonzero if the destination of SET equals the source
1528 and there are no side effects. */
1531 set_noop_p (const_rtx set
)
1533 rtx src
= SET_SRC (set
);
1534 rtx dst
= SET_DEST (set
);
1536 if (dst
== pc_rtx
&& src
== pc_rtx
)
1539 if (MEM_P (dst
) && MEM_P (src
))
1540 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1542 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1543 return rtx_equal_p (XEXP (dst
, 0), src
)
1544 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1545 && !side_effects_p (src
);
1547 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1548 dst
= XEXP (dst
, 0);
1550 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1552 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1554 src
= SUBREG_REG (src
);
1555 dst
= SUBREG_REG (dst
);
1558 /* It is a NOOP if destination overlaps with selected src vector
1560 if (GET_CODE (src
) == VEC_SELECT
1561 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1562 && HARD_REGISTER_P (XEXP (src
, 0))
1563 && HARD_REGISTER_P (dst
))
1566 rtx par
= XEXP (src
, 1);
1567 rtx src0
= XEXP (src
, 0);
1568 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1569 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1571 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1572 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1575 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1576 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1579 return (REG_P (src
) && REG_P (dst
)
1580 && REGNO (src
) == REGNO (dst
));
1583 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1587 noop_move_p (const rtx_insn
*insn
)
1589 rtx pat
= PATTERN (insn
);
1591 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1594 /* Insns carrying these notes are useful later on. */
1595 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1598 /* Check the code to be executed for COND_EXEC. */
1599 if (GET_CODE (pat
) == COND_EXEC
)
1600 pat
= COND_EXEC_CODE (pat
);
1602 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1605 if (GET_CODE (pat
) == PARALLEL
)
1608 /* If nothing but SETs of registers to themselves,
1609 this insn can also be deleted. */
1610 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1612 rtx tem
= XVECEXP (pat
, 0, i
);
1614 if (GET_CODE (tem
) == USE
1615 || GET_CODE (tem
) == CLOBBER
)
1618 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1628 /* Return nonzero if register in range [REGNO, ENDREGNO)
1629 appears either explicitly or implicitly in X
1630 other than being stored into.
1632 References contained within the substructure at LOC do not count.
1633 LOC may be zero, meaning don't ignore anything. */
1636 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1640 unsigned int x_regno
;
1645 /* The contents of a REG_NONNEG note is always zero, so we must come here
1646 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1650 code
= GET_CODE (x
);
1655 x_regno
= REGNO (x
);
1657 /* If we modifying the stack, frame, or argument pointer, it will
1658 clobber a virtual register. In fact, we could be more precise,
1659 but it isn't worth it. */
1660 if ((x_regno
== STACK_POINTER_REGNUM
1661 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1662 && x_regno
== ARG_POINTER_REGNUM
)
1663 || x_regno
== FRAME_POINTER_REGNUM
)
1664 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1667 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1670 /* If this is a SUBREG of a hard reg, we can see exactly which
1671 registers are being modified. Otherwise, handle normally. */
1672 if (REG_P (SUBREG_REG (x
))
1673 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1675 unsigned int inner_regno
= subreg_regno (x
);
1676 unsigned int inner_endregno
1677 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1678 ? subreg_nregs (x
) : 1);
1680 return endregno
> inner_regno
&& regno
< inner_endregno
;
1686 if (&SET_DEST (x
) != loc
1687 /* Note setting a SUBREG counts as referring to the REG it is in for
1688 a pseudo but not for hard registers since we can
1689 treat each word individually. */
1690 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1691 && loc
!= &SUBREG_REG (SET_DEST (x
))
1692 && REG_P (SUBREG_REG (SET_DEST (x
)))
1693 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1694 && refers_to_regno_p (regno
, endregno
,
1695 SUBREG_REG (SET_DEST (x
)), loc
))
1696 || (!REG_P (SET_DEST (x
))
1697 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1700 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1709 /* X does not match, so try its subexpressions. */
1711 fmt
= GET_RTX_FORMAT (code
);
1712 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1714 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1722 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1725 else if (fmt
[i
] == 'E')
1728 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1729 if (loc
!= &XVECEXP (x
, i
, j
)
1730 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1737 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1738 we check if any register number in X conflicts with the relevant register
1739 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1740 contains a MEM (we don't bother checking for memory addresses that can't
1741 conflict because we expect this to be a rare case. */
1744 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1746 unsigned int regno
, endregno
;
1748 /* If either argument is a constant, then modifying X can not
1749 affect IN. Here we look at IN, we can profitably combine
1750 CONSTANT_P (x) with the switch statement below. */
1751 if (CONSTANT_P (in
))
1755 switch (GET_CODE (x
))
1757 case STRICT_LOW_PART
:
1760 /* Overly conservative. */
1765 regno
= REGNO (SUBREG_REG (x
));
1766 if (regno
< FIRST_PSEUDO_REGISTER
)
1767 regno
= subreg_regno (x
);
1768 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1769 ? subreg_nregs (x
) : 1);
1774 endregno
= END_REGNO (x
);
1776 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1786 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1787 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1790 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1793 else if (fmt
[i
] == 'E')
1796 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1797 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1807 return reg_mentioned_p (x
, in
);
1813 /* If any register in here refers to it we return true. */
1814 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1815 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1816 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1822 gcc_assert (CONSTANT_P (x
));
1827 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1828 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1829 ignored by note_stores, but passed to FUN.
1831 FUN receives three arguments:
1832 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1833 2. the SET or CLOBBER rtx that does the store,
1834 3. the pointer DATA provided to note_stores.
1836 If the item being stored in or clobbered is a SUBREG of a hard register,
1837 the SUBREG will be passed. */
1840 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1844 if (GET_CODE (x
) == COND_EXEC
)
1845 x
= COND_EXEC_CODE (x
);
1847 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1849 rtx dest
= SET_DEST (x
);
1851 while ((GET_CODE (dest
) == SUBREG
1852 && (!REG_P (SUBREG_REG (dest
))
1853 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1854 || GET_CODE (dest
) == ZERO_EXTRACT
1855 || GET_CODE (dest
) == STRICT_LOW_PART
)
1856 dest
= XEXP (dest
, 0);
1858 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1859 each of whose first operand is a register. */
1860 if (GET_CODE (dest
) == PARALLEL
)
1862 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1863 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1864 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1867 (*fun
) (dest
, x
, data
);
1870 else if (GET_CODE (x
) == PARALLEL
)
1871 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1872 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1875 /* Like notes_stores, but call FUN for each expression that is being
1876 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1877 FUN for each expression, not any interior subexpressions. FUN receives a
1878 pointer to the expression and the DATA passed to this function.
1880 Note that this is not quite the same test as that done in reg_referenced_p
1881 since that considers something as being referenced if it is being
1882 partially set, while we do not. */
1885 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1890 switch (GET_CODE (body
))
1893 (*fun
) (&COND_EXEC_TEST (body
), data
);
1894 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1898 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1899 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1903 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1904 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1908 (*fun
) (&XEXP (body
, 0), data
);
1912 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1913 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1917 (*fun
) (&TRAP_CONDITION (body
), data
);
1921 (*fun
) (&XEXP (body
, 0), data
);
1925 case UNSPEC_VOLATILE
:
1926 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1927 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1931 if (MEM_P (XEXP (body
, 0)))
1932 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1937 rtx dest
= SET_DEST (body
);
1939 /* For sets we replace everything in source plus registers in memory
1940 expression in store and operands of a ZERO_EXTRACT. */
1941 (*fun
) (&SET_SRC (body
), data
);
1943 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1945 (*fun
) (&XEXP (dest
, 1), data
);
1946 (*fun
) (&XEXP (dest
, 2), data
);
1949 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1950 dest
= XEXP (dest
, 0);
1953 (*fun
) (&XEXP (dest
, 0), data
);
1958 /* All the other possibilities never store. */
1959 (*fun
) (pbody
, data
);
1964 /* Return nonzero if X's old contents don't survive after INSN.
1965 This will be true if X is (cc0) or if X is a register and
1966 X dies in INSN or because INSN entirely sets X.
1968 "Entirely set" means set directly and not through a SUBREG, or
1969 ZERO_EXTRACT, so no trace of the old contents remains.
1970 Likewise, REG_INC does not count.
1972 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1973 but for this use that makes no difference, since regs don't overlap
1974 during their lifetimes. Therefore, this function may be used
1975 at any time after deaths have been computed.
1977 If REG is a hard reg that occupies multiple machine registers, this
1978 function will only return 1 if each of those registers will be replaced
1982 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
1984 unsigned int regno
, end_regno
;
1987 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1988 if (GET_CODE (x
) == CC0
)
1991 gcc_assert (REG_P (x
));
1994 end_regno
= END_REGNO (x
);
1995 for (i
= regno
; i
< end_regno
; i
++)
1996 if (! dead_or_set_regno_p (insn
, i
))
2002 /* Return TRUE iff DEST is a register or subreg of a register and
2003 doesn't change the number of words of the inner register, and any
2004 part of the register is TEST_REGNO. */
2007 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2009 unsigned int regno
, endregno
;
2011 if (GET_CODE (dest
) == SUBREG
2012 && (((GET_MODE_SIZE (GET_MODE (dest
))
2013 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
2014 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
2015 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
2016 dest
= SUBREG_REG (dest
);
2021 regno
= REGNO (dest
);
2022 endregno
= END_REGNO (dest
);
2023 return (test_regno
>= regno
&& test_regno
< endregno
);
2026 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2027 any member matches the covers_regno_no_parallel_p criteria. */
2030 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2032 if (GET_CODE (dest
) == PARALLEL
)
2034 /* Some targets place small structures in registers for return
2035 values of functions, and those registers are wrapped in
2036 PARALLELs that we may see as the destination of a SET. */
2039 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2041 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2042 if (inner
!= NULL_RTX
2043 && covers_regno_no_parallel_p (inner
, test_regno
))
2050 return covers_regno_no_parallel_p (dest
, test_regno
);
2053 /* Utility function for dead_or_set_p to check an individual register. */
2056 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2060 /* See if there is a death note for something that includes TEST_REGNO. */
2061 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2065 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2068 pattern
= PATTERN (insn
);
2070 /* If a COND_EXEC is not executed, the value survives. */
2071 if (GET_CODE (pattern
) == COND_EXEC
)
2074 if (GET_CODE (pattern
) == SET
)
2075 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2076 else if (GET_CODE (pattern
) == PARALLEL
)
2080 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2082 rtx body
= XVECEXP (pattern
, 0, i
);
2084 if (GET_CODE (body
) == COND_EXEC
)
2085 body
= COND_EXEC_CODE (body
);
2087 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2088 && covers_regno_p (SET_DEST (body
), test_regno
))
2096 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2097 If DATUM is nonzero, look for one whose datum is DATUM. */
2100 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2104 gcc_checking_assert (insn
);
2106 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2107 if (! INSN_P (insn
))
2111 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2112 if (REG_NOTE_KIND (link
) == kind
)
2117 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2118 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2123 /* Return the reg-note of kind KIND in insn INSN which applies to register
2124 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2125 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2126 it might be the case that the note overlaps REGNO. */
2129 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2133 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2134 if (! INSN_P (insn
))
2137 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2138 if (REG_NOTE_KIND (link
) == kind
2139 /* Verify that it is a register, so that scratch and MEM won't cause a
2141 && REG_P (XEXP (link
, 0))
2142 && REGNO (XEXP (link
, 0)) <= regno
2143 && END_REGNO (XEXP (link
, 0)) > regno
)
2148 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2152 find_reg_equal_equiv_note (const_rtx insn
)
2159 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2160 if (REG_NOTE_KIND (link
) == REG_EQUAL
2161 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2163 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2164 insns that have multiple sets. Checking single_set to
2165 make sure of this is not the proper check, as explained
2166 in the comment in set_unique_reg_note.
2168 This should be changed into an assert. */
2169 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2176 /* Check whether INSN is a single_set whose source is known to be
2177 equivalent to a constant. Return that constant if so, otherwise
2181 find_constant_src (const rtx_insn
*insn
)
2185 set
= single_set (insn
);
2188 x
= avoid_constant_pool_reference (SET_SRC (set
));
2193 note
= find_reg_equal_equiv_note (insn
);
2194 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2195 return XEXP (note
, 0);
2200 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2201 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2204 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2206 /* If it's not a CALL_INSN, it can't possibly have a
2207 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2217 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2219 link
= XEXP (link
, 1))
2220 if (GET_CODE (XEXP (link
, 0)) == code
2221 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2226 unsigned int regno
= REGNO (datum
);
2228 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2229 to pseudo registers, so don't bother checking. */
2231 if (regno
< FIRST_PSEUDO_REGISTER
)
2233 unsigned int end_regno
= END_REGNO (datum
);
2236 for (i
= regno
; i
< end_regno
; i
++)
2237 if (find_regno_fusage (insn
, code
, i
))
2245 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2246 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2249 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2253 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2254 to pseudo registers, so don't bother checking. */
2256 if (regno
>= FIRST_PSEUDO_REGISTER
2260 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2264 if (GET_CODE (op
= XEXP (link
, 0)) == code
2265 && REG_P (reg
= XEXP (op
, 0))
2266 && REGNO (reg
) <= regno
2267 && END_REGNO (reg
) > regno
)
2275 /* Return true if KIND is an integer REG_NOTE. */
2278 int_reg_note_p (enum reg_note kind
)
2280 return kind
== REG_BR_PROB
;
2283 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2284 stored as the pointer to the next register note. */
2287 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2291 gcc_checking_assert (!int_reg_note_p (kind
));
2296 case REG_LABEL_TARGET
:
2297 case REG_LABEL_OPERAND
:
2299 /* These types of register notes use an INSN_LIST rather than an
2300 EXPR_LIST, so that copying is done right and dumps look
2302 note
= alloc_INSN_LIST (datum
, list
);
2303 PUT_REG_NOTE_KIND (note
, kind
);
2307 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2314 /* Add register note with kind KIND and datum DATUM to INSN. */
2317 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2319 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2322 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2325 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2327 gcc_checking_assert (int_reg_note_p (kind
));
2328 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2329 datum
, REG_NOTES (insn
));
2332 /* Add a register note like NOTE to INSN. */
2335 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2337 if (GET_CODE (note
) == INT_LIST
)
2338 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2340 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2343 /* Duplicate NOTE and return the copy. */
2345 duplicate_reg_note (rtx note
)
2347 reg_note kind
= REG_NOTE_KIND (note
);
2349 if (GET_CODE (note
) == INT_LIST
)
2350 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2351 else if (GET_CODE (note
) == EXPR_LIST
)
2352 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2354 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2357 /* Remove register note NOTE from the REG_NOTES of INSN. */
2360 remove_note (rtx_insn
*insn
, const_rtx note
)
2364 if (note
== NULL_RTX
)
2367 if (REG_NOTES (insn
) == note
)
2368 REG_NOTES (insn
) = XEXP (note
, 1);
2370 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2371 if (XEXP (link
, 1) == note
)
2373 XEXP (link
, 1) = XEXP (note
, 1);
2377 switch (REG_NOTE_KIND (note
))
2381 df_notes_rescan (insn
);
2388 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2389 Return true if any note has been removed. */
2392 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2397 loc
= ®_NOTES (insn
);
2400 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2401 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2403 *loc
= XEXP (*loc
, 1);
2407 loc
= &XEXP (*loc
, 1);
2412 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2415 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2422 /* This loop is a little tricky. We cannot just go down the chain because
2423 it is being modified by some actions in the loop. So we just iterate
2424 over the head. We plan to drain the list anyway. */
2425 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2427 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2428 rtx note
= find_reg_equal_equiv_note (insn
);
2430 /* This assert is generally triggered when someone deletes a REG_EQUAL
2431 or REG_EQUIV note by hacking the list manually rather than calling
2435 remove_note (insn
, note
);
2439 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2440 return 1 if it is found. A simple equality test is used to determine if
2444 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2448 for (x
= listp
; x
; x
= XEXP (x
, 1))
2449 if (node
== XEXP (x
, 0))
2455 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2456 remove that entry from the list if it is found.
2458 A simple equality test is used to determine if NODE matches. */
2461 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2463 rtx_expr_list
*temp
= *listp
;
2464 rtx_expr_list
*prev
= NULL
;
2468 if (node
== temp
->element ())
2470 /* Splice the node out of the list. */
2472 XEXP (prev
, 1) = temp
->next ();
2474 *listp
= temp
->next ();
2480 temp
= temp
->next ();
2484 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2485 remove that entry from the list if it is found.
2487 A simple equality test is used to determine if NODE matches. */
2490 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2492 rtx_insn_list
*temp
= *listp
;
2493 rtx_insn_list
*prev
= NULL
;
2497 if (node
== temp
->insn ())
2499 /* Splice the node out of the list. */
2501 XEXP (prev
, 1) = temp
->next ();
2503 *listp
= temp
->next ();
2509 temp
= temp
->next ();
2513 /* Nonzero if X contains any volatile instructions. These are instructions
2514 which may cause unpredictable machine state instructions, and thus no
2515 instructions or register uses should be moved or combined across them.
2516 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2519 volatile_insn_p (const_rtx x
)
2521 const RTX_CODE code
= GET_CODE (x
);
2539 case UNSPEC_VOLATILE
:
2544 if (MEM_VOLATILE_P (x
))
2551 /* Recursively scan the operands of this expression. */
2554 const char *const fmt
= GET_RTX_FORMAT (code
);
2557 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2561 if (volatile_insn_p (XEXP (x
, i
)))
2564 else if (fmt
[i
] == 'E')
2567 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2568 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2576 /* Nonzero if X contains any volatile memory references
2577 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2580 volatile_refs_p (const_rtx x
)
2582 const RTX_CODE code
= GET_CODE (x
);
2598 case UNSPEC_VOLATILE
:
2604 if (MEM_VOLATILE_P (x
))
2611 /* Recursively scan the operands of this expression. */
2614 const char *const fmt
= GET_RTX_FORMAT (code
);
2617 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2621 if (volatile_refs_p (XEXP (x
, i
)))
2624 else if (fmt
[i
] == 'E')
2627 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2628 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2636 /* Similar to above, except that it also rejects register pre- and post-
2640 side_effects_p (const_rtx x
)
2642 const RTX_CODE code
= GET_CODE (x
);
2659 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2660 when some combination can't be done. If we see one, don't think
2661 that we can simplify the expression. */
2662 return (GET_MODE (x
) != VOIDmode
);
2671 case UNSPEC_VOLATILE
:
2677 if (MEM_VOLATILE_P (x
))
2684 /* Recursively scan the operands of this expression. */
2687 const char *fmt
= GET_RTX_FORMAT (code
);
2690 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2694 if (side_effects_p (XEXP (x
, i
)))
2697 else if (fmt
[i
] == 'E')
2700 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2701 if (side_effects_p (XVECEXP (x
, i
, j
)))
2709 /* Return nonzero if evaluating rtx X might cause a trap.
2710 FLAGS controls how to consider MEMs. A nonzero means the context
2711 of the access may have changed from the original, such that the
2712 address may have become invalid. */
2715 may_trap_p_1 (const_rtx x
, unsigned flags
)
2721 /* We make no distinction currently, but this function is part of
2722 the internal target-hooks ABI so we keep the parameter as
2723 "unsigned flags". */
2724 bool code_changed
= flags
!= 0;
2728 code
= GET_CODE (x
);
2731 /* Handle these cases quickly. */
2743 return targetm
.unspec_may_trap_p (x
, flags
);
2745 case UNSPEC_VOLATILE
:
2751 return MEM_VOLATILE_P (x
);
2753 /* Memory ref can trap unless it's a static var or a stack slot. */
2755 /* Recognize specific pattern of stack checking probes. */
2756 if (flag_stack_check
2757 && MEM_VOLATILE_P (x
)
2758 && XEXP (x
, 0) == stack_pointer_rtx
)
2760 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2761 reference; moving it out of context such as when moving code
2762 when optimizing, might cause its address to become invalid. */
2764 || !MEM_NOTRAP_P (x
))
2766 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2767 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2768 GET_MODE (x
), code_changed
);
2773 /* Division by a non-constant might trap. */
2778 if (HONOR_SNANS (x
))
2780 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2781 return flag_trapping_math
;
2782 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2787 /* An EXPR_LIST is used to represent a function call. This
2788 certainly may trap. */
2797 /* Some floating point comparisons may trap. */
2798 if (!flag_trapping_math
)
2800 /* ??? There is no machine independent way to check for tests that trap
2801 when COMPARE is used, though many targets do make this distinction.
2802 For instance, sparc uses CCFPE for compares which generate exceptions
2803 and CCFP for compares which do not generate exceptions. */
2806 /* But often the compare has some CC mode, so check operand
2808 if (HONOR_NANS (XEXP (x
, 0))
2809 || HONOR_NANS (XEXP (x
, 1)))
2815 if (HONOR_SNANS (x
))
2817 /* Often comparison is CC mode, so check operand modes. */
2818 if (HONOR_SNANS (XEXP (x
, 0))
2819 || HONOR_SNANS (XEXP (x
, 1)))
2824 /* Conversion of floating point might trap. */
2825 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2832 /* These operations don't trap even with floating point. */
2836 /* Any floating arithmetic may trap. */
2837 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2841 fmt
= GET_RTX_FORMAT (code
);
2842 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2846 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2849 else if (fmt
[i
] == 'E')
2852 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2853 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2860 /* Return nonzero if evaluating rtx X might cause a trap. */
2863 may_trap_p (const_rtx x
)
2865 return may_trap_p_1 (x
, 0);
2868 /* Same as above, but additionally return nonzero if evaluating rtx X might
2869 cause a fault. We define a fault for the purpose of this function as a
2870 erroneous execution condition that cannot be encountered during the normal
2871 execution of a valid program; the typical example is an unaligned memory
2872 access on a strict alignment machine. The compiler guarantees that it
2873 doesn't generate code that will fault from a valid program, but this
2874 guarantee doesn't mean anything for individual instructions. Consider
2875 the following example:
2877 struct S { int d; union { char *cp; int *ip; }; };
2879 int foo(struct S *s)
2887 on a strict alignment machine. In a valid program, foo will never be
2888 invoked on a structure for which d is equal to 1 and the underlying
2889 unique field of the union not aligned on a 4-byte boundary, but the
2890 expression *s->ip might cause a fault if considered individually.
2892 At the RTL level, potentially problematic expressions will almost always
2893 verify may_trap_p; for example, the above dereference can be emitted as
2894 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2895 However, suppose that foo is inlined in a caller that causes s->cp to
2896 point to a local character variable and guarantees that s->d is not set
2897 to 1; foo may have been effectively translated into pseudo-RTL as:
2900 (set (reg:SI) (mem:SI (%fp - 7)))
2902 (set (reg:QI) (mem:QI (%fp - 7)))
2904 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2905 memory reference to a stack slot, but it will certainly cause a fault
2906 on a strict alignment machine. */
2909 may_trap_or_fault_p (const_rtx x
)
2911 return may_trap_p_1 (x
, 1);
2914 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2915 i.e., an inequality. */
2918 inequality_comparisons_p (const_rtx x
)
2922 const enum rtx_code code
= GET_CODE (x
);
2950 len
= GET_RTX_LENGTH (code
);
2951 fmt
= GET_RTX_FORMAT (code
);
2953 for (i
= 0; i
< len
; i
++)
2957 if (inequality_comparisons_p (XEXP (x
, i
)))
2960 else if (fmt
[i
] == 'E')
2963 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2964 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2972 /* Replace any occurrence of FROM in X with TO. The function does
2973 not enter into CONST_DOUBLE for the replace.
2975 Note that copying is not done so X must not be shared unless all copies
2978 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2979 those pointer-equal ones. */
2982 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
2990 /* Allow this function to make replacements in EXPR_LISTs. */
2997 && REGNO (x
) == REGNO (from
))
2999 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3002 else if (GET_CODE (x
) == SUBREG
)
3004 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3006 if (CONST_INT_P (new_rtx
))
3008 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3009 GET_MODE (SUBREG_REG (x
)),
3014 SUBREG_REG (x
) = new_rtx
;
3018 else if (GET_CODE (x
) == ZERO_EXTEND
)
3020 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3022 if (CONST_INT_P (new_rtx
))
3024 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3025 new_rtx
, GET_MODE (XEXP (x
, 0)));
3029 XEXP (x
, 0) = new_rtx
;
3034 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3035 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3038 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3039 else if (fmt
[i
] == 'E')
3040 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3041 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3042 from
, to
, all_regs
);
3048 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3049 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3052 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3054 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3056 if (JUMP_TABLE_DATA_P (x
))
3059 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3060 int len
= GET_NUM_ELEM (vec
);
3061 for (int i
= 0; i
< len
; ++i
)
3063 rtx ref
= RTVEC_ELT (vec
, i
);
3064 if (XEXP (ref
, 0) == old_label
)
3066 XEXP (ref
, 0) = new_label
;
3067 if (update_label_nuses
)
3069 ++LABEL_NUSES (new_label
);
3070 --LABEL_NUSES (old_label
);
3077 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3078 field. This is not handled by the iterator because it doesn't
3079 handle unprinted ('0') fields. */
3080 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3081 JUMP_LABEL (x
) = new_label
;
3083 subrtx_ptr_iterator::array_type array
;
3084 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3089 if (GET_CODE (x
) == SYMBOL_REF
3090 && CONSTANT_POOL_ADDRESS_P (x
))
3092 rtx c
= get_pool_constant (x
);
3093 if (rtx_referenced_p (old_label
, c
))
3095 /* Create a copy of constant C; replace the label inside
3096 but do not update LABEL_NUSES because uses in constant pool
3098 rtx new_c
= copy_rtx (c
);
3099 replace_label (&new_c
, old_label
, new_label
, false);
3101 /* Add the new constant NEW_C to constant pool and replace
3102 the old reference to constant by new reference. */
3103 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3104 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3108 if ((GET_CODE (x
) == LABEL_REF
3109 || GET_CODE (x
) == INSN_LIST
)
3110 && XEXP (x
, 0) == old_label
)
3112 XEXP (x
, 0) = new_label
;
3113 if (update_label_nuses
)
3115 ++LABEL_NUSES (new_label
);
3116 --LABEL_NUSES (old_label
);
3124 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3125 rtx_insn
*new_label
, bool update_label_nuses
)
3127 rtx insn_as_rtx
= insn
;
3128 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3129 gcc_checking_assert (insn_as_rtx
== insn
);
3132 /* Return true if X is referenced in BODY. */
3135 rtx_referenced_p (const_rtx x
, const_rtx body
)
3137 subrtx_iterator::array_type array
;
3138 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3139 if (const_rtx y
= *iter
)
3141 /* Check if a label_ref Y refers to label X. */
3142 if (GET_CODE (y
) == LABEL_REF
3144 && label_ref_label (y
) == x
)
3147 if (rtx_equal_p (x
, y
))
3150 /* If Y is a reference to pool constant traverse the constant. */
3151 if (GET_CODE (y
) == SYMBOL_REF
3152 && CONSTANT_POOL_ADDRESS_P (y
))
3153 iter
.substitute (get_pool_constant (y
));
3158 /* If INSN is a tablejump return true and store the label (before jump table) to
3159 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3162 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3163 rtx_jump_table_data
**tablep
)
3168 rtx target
= JUMP_LABEL (insn
);
3169 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3172 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3173 rtx_insn
*table
= next_insn (label
);
3174 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3180 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3184 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3185 constant that is not in the constant pool and not in the condition
3186 of an IF_THEN_ELSE. */
3189 computed_jump_p_1 (const_rtx x
)
3191 const enum rtx_code code
= GET_CODE (x
);
3208 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3209 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3212 return (computed_jump_p_1 (XEXP (x
, 1))
3213 || computed_jump_p_1 (XEXP (x
, 2)));
3219 fmt
= GET_RTX_FORMAT (code
);
3220 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3223 && computed_jump_p_1 (XEXP (x
, i
)))
3226 else if (fmt
[i
] == 'E')
3227 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3228 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3235 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3237 Tablejumps and casesi insns are not considered indirect jumps;
3238 we can recognize them by a (use (label_ref)). */
3241 computed_jump_p (const rtx_insn
*insn
)
3246 rtx pat
= PATTERN (insn
);
3248 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3249 if (JUMP_LABEL (insn
) != NULL
)
3252 if (GET_CODE (pat
) == PARALLEL
)
3254 int len
= XVECLEN (pat
, 0);
3255 int has_use_labelref
= 0;
3257 for (i
= len
- 1; i
>= 0; i
--)
3258 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3259 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3262 has_use_labelref
= 1;
3266 if (! has_use_labelref
)
3267 for (i
= len
- 1; i
>= 0; i
--)
3268 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3269 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3270 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3273 else if (GET_CODE (pat
) == SET
3274 && SET_DEST (pat
) == pc_rtx
3275 && computed_jump_p_1 (SET_SRC (pat
)))
3283 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3284 the equivalent add insn and pass the result to FN, using DATA as the
3288 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3290 rtx x
= XEXP (mem
, 0);
3291 switch (GET_CODE (x
))
3296 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3297 rtx r1
= XEXP (x
, 0);
3298 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3299 return fn (mem
, x
, r1
, r1
, c
, data
);
3305 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3306 rtx r1
= XEXP (x
, 0);
3307 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3308 return fn (mem
, x
, r1
, r1
, c
, data
);
3314 rtx r1
= XEXP (x
, 0);
3315 rtx add
= XEXP (x
, 1);
3316 return fn (mem
, x
, r1
, add
, NULL
, data
);
3324 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3325 For each such autoinc operation found, call FN, passing it
3326 the innermost enclosing MEM, the operation itself, the RTX modified
3327 by the operation, two RTXs (the second may be NULL) that, once
3328 added, represent the value to be held by the modified RTX
3329 afterwards, and DATA. FN is to return 0 to continue the
3330 traversal or any other value to have it returned to the caller of
3331 for_each_inc_dec. */
3334 for_each_inc_dec (rtx x
,
3335 for_each_inc_dec_fn fn
,
3338 subrtx_var_iterator::array_type array
;
3339 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3344 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3346 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3349 iter
.skip_subrtxes ();
3356 /* Searches X for any reference to REGNO, returning the rtx of the
3357 reference found if any. Otherwise, returns NULL_RTX. */
3360 regno_use_in (unsigned int regno
, rtx x
)
3366 if (REG_P (x
) && REGNO (x
) == regno
)
3369 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3370 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3374 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3377 else if (fmt
[i
] == 'E')
3378 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3379 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3386 /* Return a value indicating whether OP, an operand of a commutative
3387 operation, is preferred as the first or second operand. The more
3388 positive the value, the stronger the preference for being the first
3392 commutative_operand_precedence (rtx op
)
3394 enum rtx_code code
= GET_CODE (op
);
3396 /* Constants always become the second operand. Prefer "nice" constants. */
3397 if (code
== CONST_INT
)
3399 if (code
== CONST_WIDE_INT
)
3401 if (code
== CONST_DOUBLE
)
3403 if (code
== CONST_FIXED
)
3405 op
= avoid_constant_pool_reference (op
);
3406 code
= GET_CODE (op
);
3408 switch (GET_RTX_CLASS (code
))
3411 if (code
== CONST_INT
)
3413 if (code
== CONST_WIDE_INT
)
3415 if (code
== CONST_DOUBLE
)
3417 if (code
== CONST_FIXED
)
3422 /* SUBREGs of objects should come second. */
3423 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3428 /* Complex expressions should be the first, so decrease priority
3429 of objects. Prefer pointer objects over non pointer objects. */
3430 if ((REG_P (op
) && REG_POINTER (op
))
3431 || (MEM_P (op
) && MEM_POINTER (op
)))
3435 case RTX_COMM_ARITH
:
3436 /* Prefer operands that are themselves commutative to be first.
3437 This helps to make things linear. In particular,
3438 (and (and (reg) (reg)) (not (reg))) is canonical. */
3442 /* If only one operand is a binary expression, it will be the first
3443 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3444 is canonical, although it will usually be further simplified. */
3448 /* Then prefer NEG and NOT. */
3449 if (code
== NEG
|| code
== NOT
)
3458 /* Return 1 iff it is necessary to swap operands of commutative operation
3459 in order to canonicalize expression. */
3462 swap_commutative_operands_p (rtx x
, rtx y
)
3464 return (commutative_operand_precedence (x
)
3465 < commutative_operand_precedence (y
));
3468 /* Return 1 if X is an autoincrement side effect and the register is
3469 not the stack pointer. */
3471 auto_inc_p (const_rtx x
)
3473 switch (GET_CODE (x
))
3481 /* There are no REG_INC notes for SP. */
3482 if (XEXP (x
, 0) != stack_pointer_rtx
)
3490 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3492 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3501 code
= GET_CODE (in
);
3502 fmt
= GET_RTX_FORMAT (code
);
3503 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3507 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3510 else if (fmt
[i
] == 'E')
3511 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3512 if (loc
== &XVECEXP (in
, i
, j
)
3513 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3519 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3520 and SUBREG_BYTE, return the bit offset where the subreg begins
3521 (counting from the least significant bit of the operand). */
3524 subreg_lsb_1 (machine_mode outer_mode
,
3525 machine_mode inner_mode
,
3526 unsigned int subreg_byte
)
3528 unsigned int bitpos
;
3532 /* A paradoxical subreg begins at bit position 0. */
3533 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3536 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3537 /* If the subreg crosses a word boundary ensure that
3538 it also begins and ends on a word boundary. */
3539 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3540 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3541 && (subreg_byte
% UNITS_PER_WORD
3542 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3544 if (WORDS_BIG_ENDIAN
)
3545 word
= (GET_MODE_SIZE (inner_mode
)
3546 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3548 word
= subreg_byte
/ UNITS_PER_WORD
;
3549 bitpos
= word
* BITS_PER_WORD
;
3551 if (BYTES_BIG_ENDIAN
)
3552 byte
= (GET_MODE_SIZE (inner_mode
)
3553 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3555 byte
= subreg_byte
% UNITS_PER_WORD
;
3556 bitpos
+= byte
* BITS_PER_UNIT
;
3561 /* Given a subreg X, return the bit offset where the subreg begins
3562 (counting from the least significant bit of the reg). */
3565 subreg_lsb (const_rtx x
)
3567 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3571 /* Return the subreg byte offset for a subreg whose outer value has
3572 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3573 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3574 lsb of the inner value. This is the inverse of the calculation
3575 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3578 subreg_size_offset_from_lsb (unsigned int outer_bytes
,
3579 unsigned int inner_bytes
,
3580 unsigned int lsb_shift
)
3582 /* A paradoxical subreg begins at bit position 0. */
3583 if (outer_bytes
> inner_bytes
)
3585 gcc_checking_assert (lsb_shift
== 0);
3589 gcc_assert (lsb_shift
% BITS_PER_UNIT
== 0);
3590 unsigned int lower_bytes
= lsb_shift
/ BITS_PER_UNIT
;
3591 unsigned int upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3592 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3594 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3598 unsigned int lower_word_part
= lower_bytes
& -UNITS_PER_WORD
;
3599 unsigned int upper_word_part
= upper_bytes
& -UNITS_PER_WORD
;
3600 if (WORDS_BIG_ENDIAN
)
3601 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3603 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3607 /* Fill in information about a subreg of a hard register.
3608 xregno - A regno of an inner hard subreg_reg (or what will become one).
3609 xmode - The mode of xregno.
3610 offset - The byte offset.
3611 ymode - The mode of a top level SUBREG (or what may become one).
3612 info - Pointer to structure to fill in.
3614 Rather than considering one particular inner register (and thus one
3615 particular "outer" register) in isolation, this function really uses
3616 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3617 function does not check whether adding INFO->offset to XREGNO gives
3618 a valid hard register; even if INFO->offset + XREGNO is out of range,
3619 there might be another register of the same type that is in range.
3620 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3621 register, since that can depend on things like whether the final
3622 register number is even or odd. Callers that want to check whether
3623 this particular subreg can be replaced by a simple (reg ...) should
3624 use simplify_subreg_regno. */
3627 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3628 unsigned int offset
, machine_mode ymode
,
3629 struct subreg_info
*info
)
3631 unsigned int nregs_xmode
, nregs_ymode
;
3633 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3635 unsigned int xsize
= GET_MODE_SIZE (xmode
);
3636 unsigned int ysize
= GET_MODE_SIZE (ymode
);
3637 bool rknown
= false;
3639 /* If the register representation of a non-scalar mode has holes in it,
3640 we expect the scalar units to be concatenated together, with the holes
3641 distributed evenly among the scalar units. Each scalar unit must occupy
3642 at least one register. */
3643 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3645 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3646 unsigned int nunits
= GET_MODE_NUNITS (xmode
);
3647 machine_mode xmode_unit
= GET_MODE_INNER (xmode
);
3648 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3649 gcc_assert (nregs_xmode
3651 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3652 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3653 == hard_regno_nregs
[xregno
][xmode_unit
] * nunits
);
3655 /* You can only ask for a SUBREG of a value with holes in the middle
3656 if you don't cross the holes. (Such a SUBREG should be done by
3657 picking a different register class, or doing it in memory if
3658 necessary.) An example of a value with holes is XCmode on 32-bit
3659 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3660 3 for each part, but in memory it's two 128-bit parts.
3661 Padding is assumed to be at the end (not necessarily the 'high part')
3663 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3664 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3665 != ((offset
+ ysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3667 info
->representable_p
= false;
3672 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3674 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3676 /* Paradoxical subregs are otherwise valid. */
3677 if (!rknown
&& offset
== 0 && ysize
> xsize
)
3679 info
->representable_p
= true;
3680 /* If this is a big endian paradoxical subreg, which uses more
3681 actual hard registers than the original register, we must
3682 return a negative offset so that we find the proper highpart
3685 We assume that the ordering of registers within a multi-register
3686 value has a consistent endianness: if bytes and register words
3687 have different endianness, the hard registers that make up a
3688 multi-register value must be at least word-sized. */
3689 if (REG_WORDS_BIG_ENDIAN
)
3690 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3693 info
->nregs
= nregs_ymode
;
3697 /* If registers store different numbers of bits in the different
3698 modes, we cannot generally form this subreg. */
3699 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3700 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3701 && (xsize
% nregs_xmode
) == 0
3702 && (ysize
% nregs_ymode
) == 0)
3704 int regsize_xmode
= xsize
/ nregs_xmode
;
3705 int regsize_ymode
= ysize
/ nregs_ymode
;
3707 && ((nregs_ymode
> 1 && regsize_xmode
> regsize_ymode
)
3708 || (nregs_xmode
> 1 && regsize_ymode
> regsize_xmode
)))
3710 info
->representable_p
= false;
3711 info
->nregs
= CEIL (ysize
, regsize_xmode
);
3712 info
->offset
= offset
/ regsize_xmode
;
3715 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3716 would go outside of XMODE. */
3717 if (!rknown
&& ysize
+ offset
> xsize
)
3719 info
->representable_p
= false;
3720 info
->nregs
= nregs_ymode
;
3721 info
->offset
= offset
/ regsize_xmode
;
3724 /* Quick exit for the simple and common case of extracting whole
3725 subregisters from a multiregister value. */
3726 /* ??? It would be better to integrate this into the code below,
3727 if we can generalize the concept enough and figure out how
3728 odd-sized modes can coexist with the other weird cases we support. */
3730 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3731 && regsize_xmode
== regsize_ymode
3732 && (offset
% regsize_ymode
) == 0)
3734 info
->representable_p
= true;
3735 info
->nregs
= nregs_ymode
;
3736 info
->offset
= offset
/ regsize_ymode
;
3737 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3742 /* Lowpart subregs are otherwise valid. */
3743 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3745 info
->representable_p
= true;
3748 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3751 info
->nregs
= nregs_ymode
;
3756 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3757 values there are in (reg:XMODE XREGNO). We can view the register
3758 as consisting of this number of independent "blocks", where each
3759 block occupies NREGS_YMODE registers and contains exactly one
3760 representable YMODE value. */
3761 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3762 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3764 /* Calculate the number of bytes in each block. This must always
3765 be exact, otherwise we don't know how to verify the constraint.
3766 These conditions may be relaxed but subreg_regno_offset would
3767 need to be redesigned. */
3768 gcc_assert ((xsize
% num_blocks
) == 0);
3769 unsigned int bytes_per_block
= xsize
/ num_blocks
;
3771 /* Get the number of the first block that contains the subreg and the byte
3772 offset of the subreg from the start of that block. */
3773 unsigned int block_number
= offset
/ bytes_per_block
;
3774 unsigned int subblock_offset
= offset
% bytes_per_block
;
3778 /* Only the lowpart of each block is representable. */
3779 info
->representable_p
3781 == subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3785 /* We assume that the ordering of registers within a multi-register
3786 value has a consistent endianness: if bytes and register words
3787 have different endianness, the hard registers that make up a
3788 multi-register value must be at least word-sized. */
3789 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3790 /* The block number we calculated above followed memory endianness.
3791 Convert it to register endianness by counting back from the end.
3792 (Note that, because of the assumption above, each block must be
3793 at least word-sized.) */
3794 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3796 info
->offset
= block_number
* nregs_ymode
;
3797 info
->nregs
= nregs_ymode
;
3800 /* This function returns the regno offset of a subreg expression.
3801 xregno - A regno of an inner hard subreg_reg (or what will become one).
3802 xmode - The mode of xregno.
3803 offset - The byte offset.
3804 ymode - The mode of a top level SUBREG (or what may become one).
3805 RETURN - The regno offset which would be used. */
3807 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3808 unsigned int offset
, machine_mode ymode
)
3810 struct subreg_info info
;
3811 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3815 /* This function returns true when the offset is representable via
3816 subreg_offset in the given regno.
3817 xregno - A regno of an inner hard subreg_reg (or what will become one).
3818 xmode - The mode of xregno.
3819 offset - The byte offset.
3820 ymode - The mode of a top level SUBREG (or what may become one).
3821 RETURN - Whether the offset is representable. */
3823 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3824 unsigned int offset
, machine_mode ymode
)
3826 struct subreg_info info
;
3827 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3828 return info
.representable_p
;
3831 /* Return the number of a YMODE register to which
3833 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3835 can be simplified. Return -1 if the subreg can't be simplified.
3837 XREGNO is a hard register number. */
3840 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3841 unsigned int offset
, machine_mode ymode
)
3843 struct subreg_info info
;
3844 unsigned int yregno
;
3846 #ifdef CANNOT_CHANGE_MODE_CLASS
3847 /* Give the backend a chance to disallow the mode change. */
3848 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3849 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3850 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3851 /* We can use mode change in LRA for some transformations. */
3852 && ! lra_in_progress
)
3856 /* We shouldn't simplify stack-related registers. */
3857 if ((!reload_completed
|| frame_pointer_needed
)
3858 && xregno
== FRAME_POINTER_REGNUM
)
3861 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3862 && xregno
== ARG_POINTER_REGNUM
)
3865 if (xregno
== STACK_POINTER_REGNUM
3866 /* We should convert hard stack register in LRA if it is
3868 && ! lra_in_progress
)
3871 /* Try to get the register offset. */
3872 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3873 if (!info
.representable_p
)
3876 /* Make sure that the offsetted register value is in range. */
3877 yregno
= xregno
+ info
.offset
;
3878 if (!HARD_REGISTER_NUM_P (yregno
))
3881 /* See whether (reg:YMODE YREGNO) is valid.
3883 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3884 This is a kludge to work around how complex FP arguments are passed
3885 on IA-64 and should be fixed. See PR target/49226. */
3886 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3887 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3890 return (int) yregno
;
3893 /* Return the final regno that a subreg expression refers to. */
3895 subreg_regno (const_rtx x
)
3898 rtx subreg
= SUBREG_REG (x
);
3899 int regno
= REGNO (subreg
);
3901 ret
= regno
+ subreg_regno_offset (regno
,
3909 /* Return the number of registers that a subreg expression refers
3912 subreg_nregs (const_rtx x
)
3914 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3917 /* Return the number of registers that a subreg REG with REGNO
3918 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3919 changed so that the regno can be passed in. */
3922 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3924 struct subreg_info info
;
3925 rtx subreg
= SUBREG_REG (x
);
3927 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3932 struct parms_set_data
3938 /* Helper function for noticing stores to parameter registers. */
3940 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3942 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3943 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3944 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3946 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3951 /* Look backward for first parameter to be loaded.
3952 Note that loads of all parameters will not necessarily be
3953 found if CSE has eliminated some of them (e.g., an argument
3954 to the outer function is passed down as a parameter).
3955 Do not skip BOUNDARY. */
3957 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3959 struct parms_set_data parm
;
3961 rtx_insn
*before
, *first_set
;
3963 /* Since different machines initialize their parameter registers
3964 in different orders, assume nothing. Collect the set of all
3965 parameter registers. */
3966 CLEAR_HARD_REG_SET (parm
.regs
);
3968 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3969 if (GET_CODE (XEXP (p
, 0)) == USE
3970 && REG_P (XEXP (XEXP (p
, 0), 0))
3971 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
3973 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3975 /* We only care about registers which can hold function
3977 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3980 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3984 first_set
= call_insn
;
3986 /* Search backward for the first set of a register in this set. */
3987 while (parm
.nregs
&& before
!= boundary
)
3989 before
= PREV_INSN (before
);
3991 /* It is possible that some loads got CSEed from one call to
3992 another. Stop in that case. */
3993 if (CALL_P (before
))
3996 /* Our caller needs either ensure that we will find all sets
3997 (in case code has not been optimized yet), or take care
3998 for possible labels in a way by setting boundary to preceding
4000 if (LABEL_P (before
))
4002 gcc_assert (before
== boundary
);
4006 if (INSN_P (before
))
4008 int nregs_old
= parm
.nregs
;
4009 note_stores (PATTERN (before
), parms_set
, &parm
);
4010 /* If we found something that did not set a parameter reg,
4011 we're done. Do not keep going, as that might result
4012 in hoisting an insn before the setting of a pseudo
4013 that is used by the hoisted insn. */
4014 if (nregs_old
!= parm
.nregs
)
4023 /* Return true if we should avoid inserting code between INSN and preceding
4024 call instruction. */
4027 keep_with_call_p (const rtx_insn
*insn
)
4031 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4033 if (REG_P (SET_DEST (set
))
4034 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4035 && fixed_regs
[REGNO (SET_DEST (set
))]
4036 && general_operand (SET_SRC (set
), VOIDmode
))
4038 if (REG_P (SET_SRC (set
))
4039 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4040 && REG_P (SET_DEST (set
))
4041 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4043 /* There may be a stack pop just after the call and before the store
4044 of the return register. Search for the actual store when deciding
4045 if we can break or not. */
4046 if (SET_DEST (set
) == stack_pointer_rtx
)
4048 /* This CONST_CAST is okay because next_nonnote_insn just
4049 returns its argument and we assign it to a const_rtx
4052 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4053 if (i2
&& keep_with_call_p (i2
))
4060 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4061 to non-complex jumps. That is, direct unconditional, conditional,
4062 and tablejumps, but not computed jumps or returns. It also does
4063 not apply to the fallthru case of a conditional jump. */
4066 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4068 rtx tmp
= JUMP_LABEL (jump_insn
);
4069 rtx_jump_table_data
*table
;
4074 if (tablejump_p (jump_insn
, NULL
, &table
))
4076 rtvec vec
= table
->get_labels ();
4077 int i
, veclen
= GET_NUM_ELEM (vec
);
4079 for (i
= 0; i
< veclen
; ++i
)
4080 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4084 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4091 /* Return an estimate of the cost of computing rtx X.
4092 One use is in cse, to decide which expression to keep in the hash table.
4093 Another is in rtl generation, to pick the cheapest way to multiply.
4094 Other uses like the latter are expected in the future.
4096 X appears as operand OPNO in an expression with code OUTER_CODE.
4097 SPEED specifies whether costs optimized for speed or size should
4101 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4102 int opno
, bool speed
)
4113 if (GET_MODE (x
) != VOIDmode
)
4114 mode
= GET_MODE (x
);
4116 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4117 many insns, taking N times as long. */
4118 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4122 /* Compute the default costs of certain things.
4123 Note that targetm.rtx_costs can override the defaults. */
4125 code
= GET_CODE (x
);
4129 /* Multiplication has time-complexity O(N*N), where N is the
4130 number of units (translated from digits) when using
4131 schoolbook long multiplication. */
4132 total
= factor
* factor
* COSTS_N_INSNS (5);
4138 /* Similarly, complexity for schoolbook long division. */
4139 total
= factor
* factor
* COSTS_N_INSNS (7);
4142 /* Used in combine.c as a marker. */
4146 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4147 the mode for the factor. */
4148 mode
= GET_MODE (SET_DEST (x
));
4149 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4154 total
= factor
* COSTS_N_INSNS (1);
4164 /* If we can't tie these modes, make this expensive. The larger
4165 the mode, the more expensive it is. */
4166 if (! MODES_TIEABLE_P (mode
, GET_MODE (SUBREG_REG (x
))))
4167 return COSTS_N_INSNS (2 + factor
);
4171 if (MODES_TIEABLE_P (mode
, GET_MODE (XEXP (x
, 0))))
4178 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4183 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4184 which is already in total. */
4186 fmt
= GET_RTX_FORMAT (code
);
4187 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4189 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4190 else if (fmt
[i
] == 'E')
4191 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4192 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4197 /* Fill in the structure C with information about both speed and size rtx
4198 costs for X, which is operand OPNO in an expression with code OUTER. */
4201 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4202 struct full_rtx_costs
*c
)
4204 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4205 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4209 /* Return cost of address expression X.
4210 Expect that X is properly formed address reference.
4212 SPEED parameter specify whether costs optimized for speed or size should
4216 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4218 /* We may be asked for cost of various unusual addresses, such as operands
4219 of push instruction. It is not worthwhile to complicate writing
4220 of the target hook by such cases. */
4222 if (!memory_address_addr_space_p (mode
, x
, as
))
4225 return targetm
.address_cost (x
, mode
, as
, speed
);
4228 /* If the target doesn't override, compute the cost as with arithmetic. */
4231 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4233 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4237 unsigned HOST_WIDE_INT
4238 nonzero_bits (const_rtx x
, machine_mode mode
)
4240 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4244 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4246 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4249 /* Return true if nonzero_bits1 might recurse into both operands
4253 nonzero_bits_binary_arith_p (const_rtx x
)
4255 if (!ARITHMETIC_P (x
))
4257 switch (GET_CODE (x
))
4279 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4280 It avoids exponential behavior in nonzero_bits1 when X has
4281 identical subexpressions on the first or the second level. */
4283 static unsigned HOST_WIDE_INT
4284 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4285 machine_mode known_mode
,
4286 unsigned HOST_WIDE_INT known_ret
)
4288 if (x
== known_x
&& mode
== known_mode
)
4291 /* Try to find identical subexpressions. If found call
4292 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4293 precomputed value for the subexpression as KNOWN_RET. */
4295 if (nonzero_bits_binary_arith_p (x
))
4297 rtx x0
= XEXP (x
, 0);
4298 rtx x1
= XEXP (x
, 1);
4300 /* Check the first level. */
4302 return nonzero_bits1 (x
, mode
, x0
, mode
,
4303 cached_nonzero_bits (x0
, mode
, known_x
,
4304 known_mode
, known_ret
));
4306 /* Check the second level. */
4307 if (nonzero_bits_binary_arith_p (x0
)
4308 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4309 return nonzero_bits1 (x
, mode
, x1
, mode
,
4310 cached_nonzero_bits (x1
, mode
, known_x
,
4311 known_mode
, known_ret
));
4313 if (nonzero_bits_binary_arith_p (x1
)
4314 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4315 return nonzero_bits1 (x
, mode
, x0
, mode
,
4316 cached_nonzero_bits (x0
, mode
, known_x
,
4317 known_mode
, known_ret
));
4320 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4323 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4324 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4325 is less useful. We can't allow both, because that results in exponential
4326 run time recursion. There is a nullstone testcase that triggered
4327 this. This macro avoids accidental uses of num_sign_bit_copies. */
4328 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4330 /* Given an expression, X, compute which bits in X can be nonzero.
4331 We don't care about bits outside of those defined in MODE.
4333 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4334 an arithmetic operation, we can do better. */
4336 static unsigned HOST_WIDE_INT
4337 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4338 machine_mode known_mode
,
4339 unsigned HOST_WIDE_INT known_ret
)
4341 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4342 unsigned HOST_WIDE_INT inner_nz
;
4344 machine_mode inner_mode
;
4345 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4347 /* For floating-point and vector values, assume all bits are needed. */
4348 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4349 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4352 /* If X is wider than MODE, use its mode instead. */
4353 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4355 mode
= GET_MODE (x
);
4356 nonzero
= GET_MODE_MASK (mode
);
4357 mode_width
= GET_MODE_PRECISION (mode
);
4360 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4361 /* Our only callers in this case look for single bit values. So
4362 just return the mode mask. Those tests will then be false. */
4365 /* If MODE is wider than X, but both are a single word for both the host
4366 and target machines, we can compute this from which bits of the
4367 object might be nonzero in its own mode, taking into account the fact
4368 that on many CISC machines, accessing an object in a wider mode
4369 causes the high-order bits to become undefined. So they are
4370 not known to be zero. */
4372 if (!WORD_REGISTER_OPERATIONS
4373 && GET_MODE (x
) != VOIDmode
4374 && GET_MODE (x
) != mode
4375 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4376 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4377 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4379 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4380 known_x
, known_mode
, known_ret
);
4381 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4385 /* Please keep nonzero_bits_binary_arith_p above in sync with
4386 the code in the switch below. */
4387 code
= GET_CODE (x
);
4391 #if defined(POINTERS_EXTEND_UNSIGNED)
4392 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4393 all the bits above ptr_mode are known to be zero. */
4394 /* As we do not know which address space the pointer is referring to,
4395 we can do this only if the target does not support different pointer
4396 or address modes depending on the address space. */
4397 if (target_default_pointer_address_modes_p ()
4398 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4400 && !targetm
.have_ptr_extend ())
4401 nonzero
&= GET_MODE_MASK (ptr_mode
);
4404 /* Include declared information about alignment of pointers. */
4405 /* ??? We don't properly preserve REG_POINTER changes across
4406 pointer-to-integer casts, so we can't trust it except for
4407 things that we know must be pointers. See execute/960116-1.c. */
4408 if ((x
== stack_pointer_rtx
4409 || x
== frame_pointer_rtx
4410 || x
== arg_pointer_rtx
)
4411 && REGNO_POINTER_ALIGN (REGNO (x
)))
4413 unsigned HOST_WIDE_INT alignment
4414 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4416 #ifdef PUSH_ROUNDING
4417 /* If PUSH_ROUNDING is defined, it is possible for the
4418 stack to be momentarily aligned only to that amount,
4419 so we pick the least alignment. */
4420 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4421 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4425 nonzero
&= ~(alignment
- 1);
4429 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4430 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4431 known_mode
, known_ret
,
4435 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4436 known_mode
, known_ret
);
4438 return nonzero_for_hook
;
4442 /* If X is negative in MODE, sign-extend the value. */
4443 if (SHORT_IMMEDIATES_SIGN_EXTEND
&& INTVAL (x
) > 0
4444 && mode_width
< BITS_PER_WORD
4445 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
4447 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4452 /* In many, if not most, RISC machines, reading a byte from memory
4453 zeros the rest of the register. Noticing that fact saves a lot
4454 of extra zero-extends. */
4455 if (load_extend_op (GET_MODE (x
)) == ZERO_EXTEND
)
4456 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4460 case UNEQ
: case LTGT
:
4461 case GT
: case GTU
: case UNGT
:
4462 case LT
: case LTU
: case UNLT
:
4463 case GE
: case GEU
: case UNGE
:
4464 case LE
: case LEU
: case UNLE
:
4465 case UNORDERED
: case ORDERED
:
4466 /* If this produces an integer result, we know which bits are set.
4467 Code here used to clear bits outside the mode of X, but that is
4469 /* Mind that MODE is the mode the caller wants to look at this
4470 operation in, and not the actual operation mode. We can wind
4471 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4472 that describes the results of a vector compare. */
4473 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4474 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4475 nonzero
= STORE_FLAG_VALUE
;
4480 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4481 and num_sign_bit_copies. */
4482 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4483 == GET_MODE_PRECISION (GET_MODE (x
)))
4487 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4488 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4493 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4494 and num_sign_bit_copies. */
4495 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4496 == GET_MODE_PRECISION (GET_MODE (x
)))
4502 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4503 known_x
, known_mode
, known_ret
)
4504 & GET_MODE_MASK (mode
));
4508 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4509 known_x
, known_mode
, known_ret
);
4510 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4511 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4515 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4516 Otherwise, show all the bits in the outer mode but not the inner
4518 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4519 known_x
, known_mode
, known_ret
);
4520 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4522 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4523 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4524 inner_nz
|= (GET_MODE_MASK (mode
)
4525 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4528 nonzero
&= inner_nz
;
4532 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4533 known_x
, known_mode
, known_ret
)
4534 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4535 known_x
, known_mode
, known_ret
);
4539 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4541 unsigned HOST_WIDE_INT nonzero0
4542 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4543 known_x
, known_mode
, known_ret
);
4545 /* Don't call nonzero_bits for the second time if it cannot change
4547 if ((nonzero
& nonzero0
) != nonzero
)
4549 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4550 known_x
, known_mode
, known_ret
);
4554 case PLUS
: case MINUS
:
4556 case DIV
: case UDIV
:
4557 case MOD
: case UMOD
:
4558 /* We can apply the rules of arithmetic to compute the number of
4559 high- and low-order zero bits of these operations. We start by
4560 computing the width (position of the highest-order nonzero bit)
4561 and the number of low-order zero bits for each value. */
4563 unsigned HOST_WIDE_INT nz0
4564 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4565 known_x
, known_mode
, known_ret
);
4566 unsigned HOST_WIDE_INT nz1
4567 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4568 known_x
, known_mode
, known_ret
);
4569 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4570 int width0
= floor_log2 (nz0
) + 1;
4571 int width1
= floor_log2 (nz1
) + 1;
4572 int low0
= ctz_or_zero (nz0
);
4573 int low1
= ctz_or_zero (nz1
);
4574 unsigned HOST_WIDE_INT op0_maybe_minusp
4575 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4576 unsigned HOST_WIDE_INT op1_maybe_minusp
4577 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4578 unsigned int result_width
= mode_width
;
4584 result_width
= MAX (width0
, width1
) + 1;
4585 result_low
= MIN (low0
, low1
);
4588 result_low
= MIN (low0
, low1
);
4591 result_width
= width0
+ width1
;
4592 result_low
= low0
+ low1
;
4597 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4598 result_width
= width0
;
4603 result_width
= width0
;
4608 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4609 result_width
= MIN (width0
, width1
);
4610 result_low
= MIN (low0
, low1
);
4615 result_width
= MIN (width0
, width1
);
4616 result_low
= MIN (low0
, low1
);
4622 if (result_width
< mode_width
)
4623 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4626 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4631 if (CONST_INT_P (XEXP (x
, 1))
4632 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4633 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4637 /* If this is a SUBREG formed for a promoted variable that has
4638 been zero-extended, we know that at least the high-order bits
4639 are zero, though others might be too. */
4640 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4641 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4642 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4643 known_x
, known_mode
, known_ret
);
4645 /* If the inner mode is a single word for both the host and target
4646 machines, we can compute this from which bits of the inner
4647 object might be nonzero. */
4648 inner_mode
= GET_MODE (SUBREG_REG (x
));
4649 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4650 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
)
4652 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4653 known_x
, known_mode
, known_ret
);
4655 /* On many CISC machines, accessing an object in a wider mode
4656 causes the high-order bits to become undefined. So they are
4657 not known to be zero. */
4659 if ((!WORD_REGISTER_OPERATIONS
4660 /* If this is a typical RISC machine, we only have to worry
4661 about the way loads are extended. */
4662 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4663 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4664 : extend_op
!= ZERO_EXTEND
)
4665 || (!MEM_P (SUBREG_REG (x
)) && !REG_P (SUBREG_REG (x
))))
4666 && GET_MODE_PRECISION (GET_MODE (x
))
4667 > GET_MODE_PRECISION (inner_mode
))
4669 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
4677 /* The nonzero bits are in two classes: any bits within MODE
4678 that aren't in GET_MODE (x) are always significant. The rest of the
4679 nonzero bits are those that are significant in the operand of
4680 the shift when shifted the appropriate number of bits. This
4681 shows that high-order bits are cleared by the right shift and
4682 low-order bits by left shifts. */
4683 if (CONST_INT_P (XEXP (x
, 1))
4684 && INTVAL (XEXP (x
, 1)) >= 0
4685 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4686 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4688 machine_mode inner_mode
= GET_MODE (x
);
4689 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4690 int count
= INTVAL (XEXP (x
, 1));
4691 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4692 unsigned HOST_WIDE_INT op_nonzero
4693 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4694 known_x
, known_mode
, known_ret
);
4695 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4696 unsigned HOST_WIDE_INT outer
= 0;
4698 if (mode_width
> width
)
4699 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4701 if (code
== LSHIFTRT
)
4703 else if (code
== ASHIFTRT
)
4707 /* If the sign bit may have been nonzero before the shift, we
4708 need to mark all the places it could have been copied to
4709 by the shift as possibly nonzero. */
4710 if (inner
& (HOST_WIDE_INT_1U
<< (width
- 1 - count
)))
4711 inner
|= ((HOST_WIDE_INT_1U
<< count
) - 1)
4714 else if (code
== ASHIFT
)
4717 inner
= ((inner
<< (count
% width
)
4718 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4720 nonzero
&= (outer
| inner
);
4726 /* This is at most the number of bits in the mode. */
4727 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4731 /* If CLZ has a known value at zero, then the nonzero bits are
4732 that value, plus the number of bits in the mode minus one. */
4733 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4735 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4741 /* If CTZ has a known value at zero, then the nonzero bits are
4742 that value, plus the number of bits in the mode minus one. */
4743 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4745 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4751 /* This is at most the number of bits in the mode minus 1. */
4752 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4761 unsigned HOST_WIDE_INT nonzero_true
4762 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4763 known_x
, known_mode
, known_ret
);
4765 /* Don't call nonzero_bits for the second time if it cannot change
4767 if ((nonzero
& nonzero_true
) != nonzero
)
4768 nonzero
&= nonzero_true
4769 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4770 known_x
, known_mode
, known_ret
);
4781 /* See the macro definition above. */
4782 #undef cached_num_sign_bit_copies
4785 /* Return true if num_sign_bit_copies1 might recurse into both operands
4789 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4791 if (!ARITHMETIC_P (x
))
4793 switch (GET_CODE (x
))
4811 /* The function cached_num_sign_bit_copies is a wrapper around
4812 num_sign_bit_copies1. It avoids exponential behavior in
4813 num_sign_bit_copies1 when X has identical subexpressions on the
4814 first or the second level. */
4817 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4818 machine_mode known_mode
,
4819 unsigned int known_ret
)
4821 if (x
== known_x
&& mode
== known_mode
)
4824 /* Try to find identical subexpressions. If found call
4825 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4826 the precomputed value for the subexpression as KNOWN_RET. */
4828 if (num_sign_bit_copies_binary_arith_p (x
))
4830 rtx x0
= XEXP (x
, 0);
4831 rtx x1
= XEXP (x
, 1);
4833 /* Check the first level. */
4836 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4837 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4841 /* Check the second level. */
4842 if (num_sign_bit_copies_binary_arith_p (x0
)
4843 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4845 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4846 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4850 if (num_sign_bit_copies_binary_arith_p (x1
)
4851 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4853 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4854 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4859 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4862 /* Return the number of bits at the high-order end of X that are known to
4863 be equal to the sign bit. X will be used in mode MODE; if MODE is
4864 VOIDmode, X will be used in its own mode. The returned value will always
4865 be between 1 and the number of bits in MODE. */
4868 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4869 machine_mode known_mode
,
4870 unsigned int known_ret
)
4872 enum rtx_code code
= GET_CODE (x
);
4873 machine_mode inner_mode
;
4874 int num0
, num1
, result
;
4875 unsigned HOST_WIDE_INT nonzero
;
4877 /* If we weren't given a mode, use the mode of X. If the mode is still
4878 VOIDmode, we don't know anything. Likewise if one of the modes is
4881 if (mode
== VOIDmode
)
4882 mode
= GET_MODE (x
);
4884 gcc_checking_assert (mode
!= BLKmode
);
4886 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4887 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4890 /* For a smaller mode, just ignore the high bits. */
4891 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4892 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4894 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4895 known_x
, known_mode
, known_ret
);
4897 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4900 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4902 /* If this machine does not do all register operations on the entire
4903 register and MODE is wider than the mode of X, we can say nothing
4904 at all about the high-order bits. */
4905 if (!WORD_REGISTER_OPERATIONS
)
4908 /* Likewise on machines that do, if the mode of the object is smaller
4909 than a word and loads of that size don't sign extend, we can say
4910 nothing about the high order bits. */
4911 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4912 && load_extend_op (GET_MODE (x
)) != SIGN_EXTEND
)
4916 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4917 the code in the switch below. */
4922 #if defined(POINTERS_EXTEND_UNSIGNED)
4923 /* If pointers extend signed and this is a pointer in Pmode, say that
4924 all the bits above ptr_mode are known to be sign bit copies. */
4925 /* As we do not know which address space the pointer is referring to,
4926 we can do this only if the target does not support different pointer
4927 or address modes depending on the address space. */
4928 if (target_default_pointer_address_modes_p ()
4929 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4930 && mode
== Pmode
&& REG_POINTER (x
)
4931 && !targetm
.have_ptr_extend ())
4932 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4936 unsigned int copies_for_hook
= 1, copies
= 1;
4937 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4938 known_mode
, known_ret
,
4942 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4943 known_mode
, known_ret
);
4945 if (copies
> 1 || copies_for_hook
> 1)
4946 return MAX (copies
, copies_for_hook
);
4948 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4953 /* Some RISC machines sign-extend all loads of smaller than a word. */
4954 if (load_extend_op (GET_MODE (x
)) == SIGN_EXTEND
)
4955 return MAX (1, ((int) bitwidth
4956 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4960 /* If the constant is negative, take its 1's complement and remask.
4961 Then see how many zero bits we have. */
4962 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4963 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4964 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4965 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4967 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4970 /* If this is a SUBREG for a promoted object that is sign-extended
4971 and we are looking at it in a wider mode, we know that at least the
4972 high-order bits are known to be sign bit copies. */
4974 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4976 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4977 known_x
, known_mode
, known_ret
);
4978 return MAX ((int) bitwidth
4979 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4983 /* For a smaller object, just ignore the high bits. */
4984 inner_mode
= GET_MODE (SUBREG_REG (x
));
4985 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
4987 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4988 known_x
, known_mode
, known_ret
);
4990 MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
) - bitwidth
));
4993 /* For paradoxical SUBREGs on machines where all register operations
4994 affect the entire register, just look inside. Note that we are
4995 passing MODE to the recursive call, so the number of sign bit copies
4996 will remain relative to that mode, not the inner mode. */
4998 /* This works only if loads sign extend. Otherwise, if we get a
4999 reload for the inner part, it may be loaded from the stack, and
5000 then we lose all sign bit copies that existed before the store
5003 if (WORD_REGISTER_OPERATIONS
5004 && load_extend_op (inner_mode
) == SIGN_EXTEND
5005 && paradoxical_subreg_p (x
)
5006 && (MEM_P (SUBREG_REG (x
)) || REG_P (SUBREG_REG (x
))))
5007 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5008 known_x
, known_mode
, known_ret
);
5012 if (CONST_INT_P (XEXP (x
, 1)))
5013 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5017 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
5018 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
5019 known_x
, known_mode
, known_ret
));
5022 /* For a smaller object, just ignore the high bits. */
5023 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
5024 known_x
, known_mode
, known_ret
);
5025 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
5029 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5030 known_x
, known_mode
, known_ret
);
5032 case ROTATE
: case ROTATERT
:
5033 /* If we are rotating left by a number of bits less than the number
5034 of sign bit copies, we can just subtract that amount from the
5036 if (CONST_INT_P (XEXP (x
, 1))
5037 && INTVAL (XEXP (x
, 1)) >= 0
5038 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5040 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5041 known_x
, known_mode
, known_ret
);
5042 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5043 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5048 /* In general, this subtracts one sign bit copy. But if the value
5049 is known to be positive, the number of sign bit copies is the
5050 same as that of the input. Finally, if the input has just one bit
5051 that might be nonzero, all the bits are copies of the sign bit. */
5052 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5053 known_x
, known_mode
, known_ret
);
5054 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5055 return num0
> 1 ? num0
- 1 : 1;
5057 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5062 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5067 case IOR
: case AND
: case XOR
:
5068 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5069 /* Logical operations will preserve the number of sign-bit copies.
5070 MIN and MAX operations always return one of the operands. */
5071 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5072 known_x
, known_mode
, known_ret
);
5073 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5074 known_x
, known_mode
, known_ret
);
5076 /* If num1 is clearing some of the top bits then regardless of
5077 the other term, we are guaranteed to have at least that many
5078 high-order zero bits. */
5081 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5082 && CONST_INT_P (XEXP (x
, 1))
5083 && (UINTVAL (XEXP (x
, 1))
5084 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5087 /* Similarly for IOR when setting high-order bits. */
5090 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5091 && CONST_INT_P (XEXP (x
, 1))
5092 && (UINTVAL (XEXP (x
, 1))
5093 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5096 return MIN (num0
, num1
);
5098 case PLUS
: case MINUS
:
5099 /* For addition and subtraction, we can have a 1-bit carry. However,
5100 if we are subtracting 1 from a positive number, there will not
5101 be such a carry. Furthermore, if the positive number is known to
5102 be 0 or 1, we know the result is either -1 or 0. */
5104 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5105 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5107 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5108 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5109 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5110 : bitwidth
- floor_log2 (nonzero
) - 1);
5113 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5114 known_x
, known_mode
, known_ret
);
5115 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5116 known_x
, known_mode
, known_ret
);
5117 result
= MAX (1, MIN (num0
, num1
) - 1);
5122 /* The number of bits of the product is the sum of the number of
5123 bits of both terms. However, unless one of the terms if known
5124 to be positive, we must allow for an additional bit since negating
5125 a negative number can remove one sign bit copy. */
5127 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5128 known_x
, known_mode
, known_ret
);
5129 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5130 known_x
, known_mode
, known_ret
);
5132 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5134 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5135 || (((nonzero_bits (XEXP (x
, 0), mode
)
5136 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5137 && ((nonzero_bits (XEXP (x
, 1), mode
)
5138 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5142 return MAX (1, result
);
5145 /* The result must be <= the first operand. If the first operand
5146 has the high bit set, we know nothing about the number of sign
5148 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5150 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5151 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5154 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5155 known_x
, known_mode
, known_ret
);
5158 /* The result must be <= the second operand. If the second operand
5159 has (or just might have) the high bit set, we know nothing about
5160 the number of sign bit copies. */
5161 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5163 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5164 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5167 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5168 known_x
, known_mode
, known_ret
);
5171 /* Similar to unsigned division, except that we have to worry about
5172 the case where the divisor is negative, in which case we have
5174 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5175 known_x
, known_mode
, known_ret
);
5177 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5178 || (nonzero_bits (XEXP (x
, 1), mode
)
5179 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5185 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5186 known_x
, known_mode
, known_ret
);
5188 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5189 || (nonzero_bits (XEXP (x
, 1), mode
)
5190 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5196 /* Shifts by a constant add to the number of bits equal to the
5198 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5199 known_x
, known_mode
, known_ret
);
5200 if (CONST_INT_P (XEXP (x
, 1))
5201 && INTVAL (XEXP (x
, 1)) > 0
5202 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
5203 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5208 /* Left shifts destroy copies. */
5209 if (!CONST_INT_P (XEXP (x
, 1))
5210 || INTVAL (XEXP (x
, 1)) < 0
5211 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5212 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
5215 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5216 known_x
, known_mode
, known_ret
);
5217 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5220 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5221 known_x
, known_mode
, known_ret
);
5222 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5223 known_x
, known_mode
, known_ret
);
5224 return MIN (num0
, num1
);
5226 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5227 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5228 case GEU
: case GTU
: case LEU
: case LTU
:
5229 case UNORDERED
: case ORDERED
:
5230 /* If the constant is negative, take its 1's complement and remask.
5231 Then see how many zero bits we have. */
5232 nonzero
= STORE_FLAG_VALUE
;
5233 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5234 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5235 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5237 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5243 /* If we haven't been able to figure it out by one of the above rules,
5244 see if some of the high-order bits are known to be zero. If so,
5245 count those bits and return one less than that amount. If we can't
5246 safely compute the mask for this mode, always return BITWIDTH. */
5248 bitwidth
= GET_MODE_PRECISION (mode
);
5249 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5252 nonzero
= nonzero_bits (x
, mode
);
5253 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5254 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5257 /* Calculate the rtx_cost of a single instruction. A return value of
5258 zero indicates an instruction pattern without a known cost. */
5261 insn_rtx_cost (rtx pat
, bool speed
)
5266 /* Extract the single set rtx from the instruction pattern. We
5267 can't use single_set since we only have the pattern. We also
5268 consider PARALLELs of a normal set and a single comparison. In
5269 that case we use the cost of the non-comparison SET operation,
5270 which is most-likely to be the real cost of this operation. */
5271 if (GET_CODE (pat
) == SET
)
5273 else if (GET_CODE (pat
) == PARALLEL
)
5276 rtx comparison
= NULL_RTX
;
5278 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5280 rtx x
= XVECEXP (pat
, 0, i
);
5281 if (GET_CODE (x
) == SET
)
5283 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5298 if (!set
&& comparison
)
5307 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5308 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5311 /* Returns estimate on cost of computing SEQ. */
5314 seq_cost (const rtx_insn
*seq
, bool speed
)
5319 for (; seq
; seq
= NEXT_INSN (seq
))
5321 set
= single_set (seq
);
5323 cost
+= set_rtx_cost (set
, speed
);
5331 /* Given an insn INSN and condition COND, return the condition in a
5332 canonical form to simplify testing by callers. Specifically:
5334 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5335 (2) Both operands will be machine operands; (cc0) will have been replaced.
5336 (3) If an operand is a constant, it will be the second operand.
5337 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5338 for GE, GEU, and LEU.
5340 If the condition cannot be understood, or is an inequality floating-point
5341 comparison which needs to be reversed, 0 will be returned.
5343 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5345 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5346 insn used in locating the condition was found. If a replacement test
5347 of the condition is desired, it should be placed in front of that
5348 insn and we will be sure that the inputs are still valid.
5350 If WANT_REG is nonzero, we wish the condition to be relative to that
5351 register, if possible. Therefore, do not canonicalize the condition
5352 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5353 to be a compare to a CC mode register.
5355 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5359 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5360 rtx_insn
**earliest
,
5361 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5364 rtx_insn
*prev
= insn
;
5368 int reverse_code
= 0;
5370 basic_block bb
= BLOCK_FOR_INSN (insn
);
5372 code
= GET_CODE (cond
);
5373 mode
= GET_MODE (cond
);
5374 op0
= XEXP (cond
, 0);
5375 op1
= XEXP (cond
, 1);
5378 code
= reversed_comparison_code (cond
, insn
);
5379 if (code
== UNKNOWN
)
5385 /* If we are comparing a register with zero, see if the register is set
5386 in the previous insn to a COMPARE or a comparison operation. Perform
5387 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5390 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5391 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5392 && op1
== CONST0_RTX (GET_MODE (op0
))
5395 /* Set nonzero when we find something of interest. */
5398 /* If comparison with cc0, import actual comparison from compare
5402 if ((prev
= prev_nonnote_insn (prev
)) == 0
5403 || !NONJUMP_INSN_P (prev
)
5404 || (set
= single_set (prev
)) == 0
5405 || SET_DEST (set
) != cc0_rtx
)
5408 op0
= SET_SRC (set
);
5409 op1
= CONST0_RTX (GET_MODE (op0
));
5414 /* If this is a COMPARE, pick up the two things being compared. */
5415 if (GET_CODE (op0
) == COMPARE
)
5417 op1
= XEXP (op0
, 1);
5418 op0
= XEXP (op0
, 0);
5421 else if (!REG_P (op0
))
5424 /* Go back to the previous insn. Stop if it is not an INSN. We also
5425 stop if it isn't a single set or if it has a REG_INC note because
5426 we don't want to bother dealing with it. */
5428 prev
= prev_nonnote_nondebug_insn (prev
);
5431 || !NONJUMP_INSN_P (prev
)
5432 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5433 /* In cfglayout mode, there do not have to be labels at the
5434 beginning of a block, or jumps at the end, so the previous
5435 conditions would not stop us when we reach bb boundary. */
5436 || BLOCK_FOR_INSN (prev
) != bb
)
5439 set
= set_of (op0
, prev
);
5442 && (GET_CODE (set
) != SET
5443 || !rtx_equal_p (SET_DEST (set
), op0
)))
5446 /* If this is setting OP0, get what it sets it to if it looks
5450 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5451 #ifdef FLOAT_STORE_FLAG_VALUE
5452 REAL_VALUE_TYPE fsfv
;
5455 /* ??? We may not combine comparisons done in a CCmode with
5456 comparisons not done in a CCmode. This is to aid targets
5457 like Alpha that have an IEEE compliant EQ instruction, and
5458 a non-IEEE compliant BEQ instruction. The use of CCmode is
5459 actually artificial, simply to prevent the combination, but
5460 should not affect other platforms.
5462 However, we must allow VOIDmode comparisons to match either
5463 CCmode or non-CCmode comparison, because some ports have
5464 modeless comparisons inside branch patterns.
5466 ??? This mode check should perhaps look more like the mode check
5467 in simplify_comparison in combine. */
5468 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5469 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5471 && inner_mode
!= VOIDmode
)
5473 if (GET_CODE (SET_SRC (set
)) == COMPARE
5476 && val_signbit_known_set_p (inner_mode
,
5478 #ifdef FLOAT_STORE_FLAG_VALUE
5480 && SCALAR_FLOAT_MODE_P (inner_mode
)
5481 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5482 REAL_VALUE_NEGATIVE (fsfv
)))
5485 && COMPARISON_P (SET_SRC (set
))))
5487 else if (((code
== EQ
5489 && val_signbit_known_set_p (inner_mode
,
5491 #ifdef FLOAT_STORE_FLAG_VALUE
5493 && SCALAR_FLOAT_MODE_P (inner_mode
)
5494 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5495 REAL_VALUE_NEGATIVE (fsfv
)))
5498 && COMPARISON_P (SET_SRC (set
)))
5503 else if ((code
== EQ
|| code
== NE
)
5504 && GET_CODE (SET_SRC (set
)) == XOR
)
5505 /* Handle sequences like:
5508 ...(eq|ne op0 (const_int 0))...
5512 (eq op0 (const_int 0)) reduces to (eq X Y)
5513 (ne op0 (const_int 0)) reduces to (ne X Y)
5515 This is the form used by MIPS16, for example. */
5521 else if (reg_set_p (op0
, prev
))
5522 /* If this sets OP0, but not directly, we have to give up. */
5527 /* If the caller is expecting the condition to be valid at INSN,
5528 make sure X doesn't change before INSN. */
5529 if (valid_at_insn_p
)
5530 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5532 if (COMPARISON_P (x
))
5533 code
= GET_CODE (x
);
5536 code
= reversed_comparison_code (x
, prev
);
5537 if (code
== UNKNOWN
)
5542 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5548 /* If constant is first, put it last. */
5549 if (CONSTANT_P (op0
))
5550 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5552 /* If OP0 is the result of a comparison, we weren't able to find what
5553 was really being compared, so fail. */
5555 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5558 /* Canonicalize any ordered comparison with integers involving equality
5559 if we can do computations in the relevant mode and we do not
5562 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5563 && CONST_INT_P (op1
)
5564 && GET_MODE (op0
) != VOIDmode
5565 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5567 HOST_WIDE_INT const_val
= INTVAL (op1
);
5568 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5569 unsigned HOST_WIDE_INT max_val
5570 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5575 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5576 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5579 /* When cross-compiling, const_val might be sign-extended from
5580 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5582 if ((const_val
& max_val
)
5583 != (HOST_WIDE_INT_1U
5584 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5585 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5589 if (uconst_val
< max_val
)
5590 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5594 if (uconst_val
!= 0)
5595 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5603 /* Never return CC0; return zero instead. */
5607 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5610 /* Given a jump insn JUMP, return the condition that will cause it to branch
5611 to its JUMP_LABEL. If the condition cannot be understood, or is an
5612 inequality floating-point comparison which needs to be reversed, 0 will
5615 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5616 insn used in locating the condition was found. If a replacement test
5617 of the condition is desired, it should be placed in front of that
5618 insn and we will be sure that the inputs are still valid. If EARLIEST
5619 is null, the returned condition will be valid at INSN.
5621 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5622 compare CC mode register.
5624 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5627 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5628 int valid_at_insn_p
)
5634 /* If this is not a standard conditional jump, we can't parse it. */
5636 || ! any_condjump_p (jump
))
5638 set
= pc_set (jump
);
5640 cond
= XEXP (SET_SRC (set
), 0);
5642 /* If this branches to JUMP_LABEL when the condition is false, reverse
5645 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5646 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5648 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5649 allow_cc_mode
, valid_at_insn_p
);
5652 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5653 TARGET_MODE_REP_EXTENDED.
5655 Note that we assume that the property of
5656 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5657 narrower than mode B. I.e., if A is a mode narrower than B then in
5658 order to be able to operate on it in mode B, mode A needs to
5659 satisfy the requirements set by the representation of mode B. */
5662 init_num_sign_bit_copies_in_rep (void)
5664 machine_mode mode
, in_mode
;
5666 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5667 in_mode
= GET_MODE_WIDER_MODE (mode
))
5668 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5669 mode
= GET_MODE_WIDER_MODE (mode
))
5673 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5674 extends to the next widest mode. */
5675 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5676 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5678 /* We are in in_mode. Count how many bits outside of mode
5679 have to be copies of the sign-bit. */
5680 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5682 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5684 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5685 /* We can only check sign-bit copies starting from the
5686 top-bit. In order to be able to check the bits we
5687 have already seen we pretend that subsequent bits
5688 have to be sign-bit copies too. */
5689 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5690 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5691 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5696 /* Suppose that truncation from the machine mode of X to MODE is not a
5697 no-op. See if there is anything special about X so that we can
5698 assume it already contains a truncated value of MODE. */
5701 truncated_to_mode (machine_mode mode
, const_rtx x
)
5703 /* This register has already been used in MODE without explicit
5705 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5708 /* See if we already satisfy the requirements of MODE. If yes we
5709 can just switch to MODE. */
5710 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5711 && (num_sign_bit_copies (x
, GET_MODE (x
))
5712 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5718 /* Return true if RTX code CODE has a single sequence of zero or more
5719 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5720 entry in that case. */
5723 setup_reg_subrtx_bounds (unsigned int code
)
5725 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5727 for (; format
[i
] != 'e'; ++i
)
5730 /* No subrtxes. Leave start and count as 0. */
5732 if (format
[i
] == 'E' || format
[i
] == 'V')
5736 /* Record the sequence of 'e's. */
5737 rtx_all_subrtx_bounds
[code
].start
= i
;
5740 while (format
[i
] == 'e');
5741 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5742 /* rtl-iter.h relies on this. */
5743 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5745 for (; format
[i
]; ++i
)
5746 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5752 /* Initialize rtx_all_subrtx_bounds. */
5757 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5759 if (!setup_reg_subrtx_bounds (i
))
5760 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5761 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5762 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5765 init_num_sign_bit_copies_in_rep ();
5768 /* Check whether this is a constant pool constant. */
5770 constant_pool_constant_p (rtx x
)
5772 x
= avoid_constant_pool_reference (x
);
5773 return CONST_DOUBLE_P (x
);
5776 /* If M is a bitmask that selects a field of low-order bits within an item but
5777 not the entire word, return the length of the field. Return -1 otherwise.
5778 M is used in machine mode MODE. */
5781 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5783 if (mode
!= VOIDmode
)
5785 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5787 m
&= GET_MODE_MASK (mode
);
5790 return exact_log2 (m
+ 1);
5793 /* Return the mode of MEM's address. */
5796 get_address_mode (rtx mem
)
5800 gcc_assert (MEM_P (mem
));
5801 mode
= GET_MODE (XEXP (mem
, 0));
5802 if (mode
!= VOIDmode
)
5804 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5807 /* Split up a CONST_DOUBLE or integer constant rtx
5808 into two rtx's for single words,
5809 storing in *FIRST the word that comes first in memory in the target
5810 and in *SECOND the other.
5812 TODO: This function needs to be rewritten to work on any size
5816 split_double (rtx value
, rtx
*first
, rtx
*second
)
5818 if (CONST_INT_P (value
))
5820 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5822 /* In this case the CONST_INT holds both target words.
5823 Extract the bits from it into two word-sized pieces.
5824 Sign extend each half to HOST_WIDE_INT. */
5825 unsigned HOST_WIDE_INT low
, high
;
5826 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5827 unsigned bits_per_word
= BITS_PER_WORD
;
5829 /* Set sign_bit to the most significant bit of a word. */
5831 sign_bit
<<= bits_per_word
- 1;
5833 /* Set mask so that all bits of the word are set. We could
5834 have used 1 << BITS_PER_WORD instead of basing the
5835 calculation on sign_bit. However, on machines where
5836 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5837 compiler warning, even though the code would never be
5839 mask
= sign_bit
<< 1;
5842 /* Set sign_extend as any remaining bits. */
5843 sign_extend
= ~mask
;
5845 /* Pick the lower word and sign-extend it. */
5846 low
= INTVAL (value
);
5851 /* Pick the higher word, shifted to the least significant
5852 bits, and sign-extend it. */
5853 high
= INTVAL (value
);
5854 high
>>= bits_per_word
- 1;
5857 if (high
& sign_bit
)
5858 high
|= sign_extend
;
5860 /* Store the words in the target machine order. */
5861 if (WORDS_BIG_ENDIAN
)
5863 *first
= GEN_INT (high
);
5864 *second
= GEN_INT (low
);
5868 *first
= GEN_INT (low
);
5869 *second
= GEN_INT (high
);
5874 /* The rule for using CONST_INT for a wider mode
5875 is that we regard the value as signed.
5876 So sign-extend it. */
5877 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5878 if (WORDS_BIG_ENDIAN
)
5890 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5892 /* All of this is scary code and needs to be converted to
5893 properly work with any size integer. */
5894 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5895 if (WORDS_BIG_ENDIAN
)
5897 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5898 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5902 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5903 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5906 else if (!CONST_DOUBLE_P (value
))
5908 if (WORDS_BIG_ENDIAN
)
5910 *first
= const0_rtx
;
5916 *second
= const0_rtx
;
5919 else if (GET_MODE (value
) == VOIDmode
5920 /* This is the old way we did CONST_DOUBLE integers. */
5921 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5923 /* In an integer, the words are defined as most and least significant.
5924 So order them by the target's convention. */
5925 if (WORDS_BIG_ENDIAN
)
5927 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5928 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5932 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5933 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5940 /* Note, this converts the REAL_VALUE_TYPE to the target's
5941 format, splits up the floating point double and outputs
5942 exactly 32 bits of it into each of l[0] and l[1] --
5943 not necessarily BITS_PER_WORD bits. */
5944 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
5946 /* If 32 bits is an entire word for the target, but not for the host,
5947 then sign-extend on the host so that the number will look the same
5948 way on the host that it would on the target. See for instance
5949 simplify_unary_operation. The #if is needed to avoid compiler
5952 #if HOST_BITS_PER_LONG > 32
5953 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5955 if (l
[0] & ((long) 1 << 31))
5956 l
[0] |= ((unsigned long) (-1) << 32);
5957 if (l
[1] & ((long) 1 << 31))
5958 l
[1] |= ((unsigned long) (-1) << 32);
5962 *first
= GEN_INT (l
[0]);
5963 *second
= GEN_INT (l
[1]);
5967 /* Return true if X is a sign_extract or zero_extract from the least
5971 lsb_bitfield_op_p (rtx x
)
5973 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5975 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5976 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5977 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5979 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5984 /* Strip outer address "mutations" from LOC and return a pointer to the
5985 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5986 stripped expression there.
5988 "Mutations" either convert between modes or apply some kind of
5989 extension, truncation or alignment. */
5992 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5996 enum rtx_code code
= GET_CODE (*loc
);
5997 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5998 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5999 used to convert between pointer sizes. */
6000 loc
= &XEXP (*loc
, 0);
6001 else if (lsb_bitfield_op_p (*loc
))
6002 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6003 acts as a combined truncation and extension. */
6004 loc
= &XEXP (*loc
, 0);
6005 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6006 /* (and ... (const_int -X)) is used to align to X bytes. */
6007 loc
= &XEXP (*loc
, 0);
6008 else if (code
== SUBREG
6009 && !OBJECT_P (SUBREG_REG (*loc
))
6010 && subreg_lowpart_p (*loc
))
6011 /* (subreg (operator ...) ...) inside and is used for mode
6013 loc
= &SUBREG_REG (*loc
);
6021 /* Return true if CODE applies some kind of scale. The scaled value is
6022 is the first operand and the scale is the second. */
6025 binary_scale_code_p (enum rtx_code code
)
6027 return (code
== MULT
6029 /* Needed by ARM targets. */
6033 || code
== ROTATERT
);
6036 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6037 (see address_info). Return null otherwise. */
6040 get_base_term (rtx
*inner
)
6042 if (GET_CODE (*inner
) == LO_SUM
)
6043 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6046 || GET_CODE (*inner
) == SUBREG
6047 || GET_CODE (*inner
) == SCRATCH
)
6052 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6053 (see address_info). Return null otherwise. */
6056 get_index_term (rtx
*inner
)
6058 /* At present, only constant scales are allowed. */
6059 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6060 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6063 || GET_CODE (*inner
) == SUBREG
6064 || GET_CODE (*inner
) == SCRATCH
)
6069 /* Set the segment part of address INFO to LOC, given that INNER is the
6073 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6075 gcc_assert (!info
->segment
);
6076 info
->segment
= loc
;
6077 info
->segment_term
= inner
;
6080 /* Set the base part of address INFO to LOC, given that INNER is the
6084 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6086 gcc_assert (!info
->base
);
6088 info
->base_term
= inner
;
6091 /* Set the index part of address INFO to LOC, given that INNER is the
6095 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6097 gcc_assert (!info
->index
);
6099 info
->index_term
= inner
;
6102 /* Set the displacement part of address INFO to LOC, given that INNER
6103 is the constant term. */
6106 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6108 gcc_assert (!info
->disp
);
6110 info
->disp_term
= inner
;
6113 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6114 rest of INFO accordingly. */
6117 decompose_incdec_address (struct address_info
*info
)
6119 info
->autoinc_p
= true;
6121 rtx
*base
= &XEXP (*info
->inner
, 0);
6122 set_address_base (info
, base
, base
);
6123 gcc_checking_assert (info
->base
== info
->base_term
);
6125 /* These addresses are only valid when the size of the addressed
6127 gcc_checking_assert (info
->mode
!= VOIDmode
);
6130 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6131 of INFO accordingly. */
6134 decompose_automod_address (struct address_info
*info
)
6136 info
->autoinc_p
= true;
6138 rtx
*base
= &XEXP (*info
->inner
, 0);
6139 set_address_base (info
, base
, base
);
6140 gcc_checking_assert (info
->base
== info
->base_term
);
6142 rtx plus
= XEXP (*info
->inner
, 1);
6143 gcc_assert (GET_CODE (plus
) == PLUS
);
6145 info
->base_term2
= &XEXP (plus
, 0);
6146 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6148 rtx
*step
= &XEXP (plus
, 1);
6149 rtx
*inner_step
= strip_address_mutations (step
);
6150 if (CONSTANT_P (*inner_step
))
6151 set_address_disp (info
, step
, inner_step
);
6153 set_address_index (info
, step
, inner_step
);
6156 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6157 values in [PTR, END). Return a pointer to the end of the used array. */
6160 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6163 if (GET_CODE (x
) == PLUS
)
6165 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6166 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6170 gcc_assert (ptr
!= end
);
6176 /* Evaluate the likelihood of X being a base or index value, returning
6177 positive if it is likely to be a base, negative if it is likely to be
6178 an index, and 0 if we can't tell. Make the magnitude of the return
6179 value reflect the amount of confidence we have in the answer.
6181 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6184 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6185 enum rtx_code outer_code
, enum rtx_code index_code
)
6187 /* Believe *_POINTER unless the address shape requires otherwise. */
6188 if (REG_P (x
) && REG_POINTER (x
))
6190 if (MEM_P (x
) && MEM_POINTER (x
))
6193 if (REG_P (x
) && HARD_REGISTER_P (x
))
6195 /* X is a hard register. If it only fits one of the base
6196 or index classes, choose that interpretation. */
6197 int regno
= REGNO (x
);
6198 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6199 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6200 if (base_p
!= index_p
)
6201 return base_p
? 1 : -1;
6206 /* INFO->INNER describes a normal, non-automodified address.
6207 Fill in the rest of INFO accordingly. */
6210 decompose_normal_address (struct address_info
*info
)
6212 /* Treat the address as the sum of up to four values. */
6214 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6215 ops
+ ARRAY_SIZE (ops
)) - ops
;
6217 /* If there is more than one component, any base component is in a PLUS. */
6219 info
->base_outer_code
= PLUS
;
6221 /* Try to classify each sum operand now. Leave those that could be
6222 either a base or an index in OPS. */
6225 for (size_t in
= 0; in
< n_ops
; ++in
)
6228 rtx
*inner
= strip_address_mutations (loc
);
6229 if (CONSTANT_P (*inner
))
6230 set_address_disp (info
, loc
, inner
);
6231 else if (GET_CODE (*inner
) == UNSPEC
)
6232 set_address_segment (info
, loc
, inner
);
6235 /* The only other possibilities are a base or an index. */
6236 rtx
*base_term
= get_base_term (inner
);
6237 rtx
*index_term
= get_index_term (inner
);
6238 gcc_assert (base_term
|| index_term
);
6240 set_address_index (info
, loc
, index_term
);
6241 else if (!index_term
)
6242 set_address_base (info
, loc
, base_term
);
6245 gcc_assert (base_term
== index_term
);
6247 inner_ops
[out
] = base_term
;
6253 /* Classify the remaining OPS members as bases and indexes. */
6256 /* If we haven't seen a base or an index yet, assume that this is
6257 the base. If we were confident that another term was the base
6258 or index, treat the remaining operand as the other kind. */
6260 set_address_base (info
, ops
[0], inner_ops
[0]);
6262 set_address_index (info
, ops
[0], inner_ops
[0]);
6266 /* In the event of a tie, assume the base comes first. */
6267 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6269 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6270 GET_CODE (*ops
[0])))
6272 set_address_base (info
, ops
[0], inner_ops
[0]);
6273 set_address_index (info
, ops
[1], inner_ops
[1]);
6277 set_address_base (info
, ops
[1], inner_ops
[1]);
6278 set_address_index (info
, ops
[0], inner_ops
[0]);
6282 gcc_assert (out
== 0);
6285 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6286 or VOIDmode if not known. AS is the address space associated with LOC.
6287 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6290 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6291 addr_space_t as
, enum rtx_code outer_code
)
6293 memset (info
, 0, sizeof (*info
));
6296 info
->addr_outer_code
= outer_code
;
6298 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6299 info
->base_outer_code
= outer_code
;
6300 switch (GET_CODE (*info
->inner
))
6306 decompose_incdec_address (info
);
6311 decompose_automod_address (info
);
6315 decompose_normal_address (info
);
6320 /* Describe address operand LOC in INFO. */
6323 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6325 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6328 /* Describe the address of MEM X in INFO. */
6331 decompose_mem_address (struct address_info
*info
, rtx x
)
6333 gcc_assert (MEM_P (x
));
6334 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6335 MEM_ADDR_SPACE (x
), MEM
);
6338 /* Update INFO after a change to the address it describes. */
6341 update_address (struct address_info
*info
)
6343 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6344 info
->addr_outer_code
);
6347 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6348 more complicated than that. */
6351 get_index_scale (const struct address_info
*info
)
6353 rtx index
= *info
->index
;
6354 if (GET_CODE (index
) == MULT
6355 && CONST_INT_P (XEXP (index
, 1))
6356 && info
->index_term
== &XEXP (index
, 0))
6357 return INTVAL (XEXP (index
, 1));
6359 if (GET_CODE (index
) == ASHIFT
6360 && CONST_INT_P (XEXP (index
, 1))
6361 && info
->index_term
== &XEXP (index
, 0))
6362 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6364 if (info
->index
== info
->index_term
)
6370 /* Return the "index code" of INFO, in the form required by
6374 get_index_code (const struct address_info
*info
)
6377 return GET_CODE (*info
->index
);
6380 return GET_CODE (*info
->disp
);
6385 /* Return true if RTL X contains a SYMBOL_REF. */
6388 contains_symbol_ref_p (const_rtx x
)
6390 subrtx_iterator::array_type array
;
6391 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6392 if (SYMBOL_REF_P (*iter
))
6398 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6401 contains_symbolic_reference_p (const_rtx x
)
6403 subrtx_iterator::array_type array
;
6404 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6405 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6411 /* Return true if X contains a thread-local symbol. */
6414 tls_referenced_p (const_rtx x
)
6416 if (!targetm
.have_tls
)
6419 subrtx_iterator::array_type array
;
6420 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6421 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)