1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx
, const_rtx
, void *);
41 static bool covers_regno_p (const_rtx
, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
43 static int computed_jump_p_1 (const_rtx
);
44 static void parms_set (rtx
, const_rtx
, void *);
46 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
47 const_rtx
, machine_mode
,
48 unsigned HOST_WIDE_INT
);
49 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
55 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
56 machine_mode
, unsigned int);
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
75 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
81 typename
T::value_type
*
82 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
84 size_t i
, value_type x
)
86 if (base
== array
.stack
)
93 gcc_checking_assert (i
== LOCAL_ELEMS
);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array
.heap
) <= i
)
97 vec_safe_grow (array
.heap
, i
+ 1);
98 base
= array
.heap
->address ();
99 memcpy (base
, array
.stack
, sizeof (array
.stack
));
100 base
[LOCAL_ELEMS
] = x
;
103 unsigned int length
= array
.heap
->length ();
106 gcc_checking_assert (base
== array
.heap
->address ());
112 gcc_checking_assert (i
== length
);
113 vec_safe_push (array
.heap
, x
);
114 return array
.heap
->address ();
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
121 template <typename T
>
123 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
125 size_t end
, rtx_type x
)
127 enum rtx_code code
= GET_CODE (x
);
128 const char *format
= GET_RTX_FORMAT (code
);
129 size_t orig_end
= end
;
130 if (__builtin_expect (INSN_P (x
), false))
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
135 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
136 if (format
[i
] == 'e')
138 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
139 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
142 base
= add_single_to_queue (array
, base
, end
++, subx
);
146 for (int i
= 0; format
[i
]; ++i
)
147 if (format
[i
] == 'e')
149 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
150 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
153 base
= add_single_to_queue (array
, base
, end
++, subx
);
155 else if (format
[i
] == 'E')
157 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
158 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
159 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
160 for (unsigned int j
= 0; j
< length
; j
++)
161 base
[end
++] = T::get_value (vec
[j
]);
163 for (unsigned int j
= 0; j
< length
; j
++)
164 base
= add_single_to_queue (array
, base
, end
++,
165 T::get_value (vec
[j
]));
166 if (code
== SEQUENCE
&& end
== length
)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
172 for (unsigned int j
= 0; j
< length
; j
++)
174 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
176 base
[j
] = T::get_value (PATTERN (x
));
179 return end
- orig_end
;
182 template <typename T
>
184 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
186 vec_free (array
.heap
);
189 template <typename T
>
190 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
192 template class generic_subrtx_iterator
<const_rtx_accessor
>;
193 template class generic_subrtx_iterator
<rtx_var_accessor
>;
194 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
202 rtx_unstable_p (const_rtx x
)
204 const RTX_CODE code
= GET_CODE (x
);
211 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
233 if (MEM_VOLATILE_P (x
))
242 fmt
= GET_RTX_FORMAT (code
);
243 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
246 if (rtx_unstable_p (XEXP (x
, i
)))
249 else if (fmt
[i
] == 'E')
252 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
253 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
268 rtx_varies_p (const_rtx x
, bool for_alias
)
281 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
294 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
298 if (x
== pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
312 || rtx_varies_p (XEXP (x
, 1), for_alias
);
315 if (MEM_VOLATILE_P (x
))
324 fmt
= GET_RTX_FORMAT (code
);
325 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
328 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
331 else if (fmt
[i
] == 'E')
334 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
335 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
347 get_initial_register_offset (int from
, int to
)
349 static const struct elim_table_t
353 } table
[] = ELIMINABLE_REGS
;
354 HOST_WIDE_INT offset1
, offset2
;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed
)
365 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
369 if (to
== STACK_POINTER_REGNUM
)
371 else if (from
== STACK_POINTER_REGNUM
)
377 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
378 if (table
[i
].from
== from
)
380 if (table
[i
].to
== to
)
382 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
386 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
388 if (table
[j
].to
== to
389 && table
[j
].from
== table
[i
].to
)
391 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
393 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
395 return offset1
+ offset2
;
397 if (table
[j
].from
== to
398 && table
[j
].to
== table
[i
].to
)
400 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
402 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
404 return offset1
- offset2
;
408 else if (table
[i
].to
== from
)
410 if (table
[i
].from
== to
)
412 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
416 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
418 if (table
[j
].to
== to
419 && table
[j
].from
== table
[i
].from
)
421 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
423 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
425 return - offset1
+ offset2
;
427 if (table
[j
].from
== to
428 && table
[j
].to
== table
[i
].from
)
430 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
432 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
434 return - offset1
- offset2
;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from
== ARG_POINTER_REGNUM
)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
443 else if (to
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
445 else if (from
== HARD_FRAME_POINTER_REGNUM
)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
447 else if (to
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
459 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
460 machine_mode mode
, bool unaligned_mems
)
462 enum rtx_code code
= GET_CODE (x
);
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
468 HOST_WIDE_INT actual_offset
= offset
;
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
476 actual_offset
-= STACK_POINTER_OFFSET
;
479 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
486 if (SYMBOL_REF_WEAK (x
))
488 if (!CONSTANT_POOL_ADDRESS_P (x
))
491 HOST_WIDE_INT decl_size
;
496 size
= GET_MODE_SIZE (mode
);
500 /* If the size of the access or of the symbol is unknown,
502 decl
= SYMBOL_REF_DECL (x
);
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
509 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
512 else if (TREE_CODE (decl
) == STRING_CST
)
513 decl_size
= TREE_STRING_LENGTH (decl
);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
515 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
519 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
531 || x
== stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
536 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
538 HOST_WIDE_INT red_zone_size
= 0;
540 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
542 HOST_WIDE_INT low_bound
, high_bound
;
545 size
= GET_MODE_SIZE (mode
);
549 if (x
== frame_pointer_rtx
)
551 if (FRAME_GROWS_DOWNWARD
)
553 high_bound
= STARTING_FRAME_OFFSET
;
554 low_bound
= high_bound
- get_frame_size ();
558 low_bound
= STARTING_FRAME_OFFSET
;
559 high_bound
= low_bound
+ get_frame_size ();
562 else if (x
== hard_frame_pointer_rtx
)
564 HOST_WIDE_INT sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM
,
566 HARD_FRAME_POINTER_REGNUM
);
567 HOST_WIDE_INT ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM
,
569 HARD_FRAME_POINTER_REGNUM
);
571 #if STACK_GROWS_DOWNWARD
572 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
573 high_bound
= ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl
)
575 #if !ARGS_GROW_DOWNWARD
580 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
581 low_bound
= ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl
)
583 #if ARGS_GROW_DOWNWARD
589 else if (x
== stack_pointer_rtx
)
591 HOST_WIDE_INT ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM
,
593 STACK_POINTER_REGNUM
);
595 #if STACK_GROWS_DOWNWARD
596 low_bound
= - red_zone_size
- stack_boundary
;
597 high_bound
= ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl
)
599 #if !ARGS_GROW_DOWNWARD
604 high_bound
= red_zone_size
+ stack_boundary
;
605 low_bound
= ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl
)
607 #if ARGS_GROW_DOWNWARD
615 /* We assume that accesses are safe to at least the
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
621 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
622 - crtl
->args
.size
- stack_boundary
;
624 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
626 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
627 + crtl
->args
.size
+ stack_boundary
;
631 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
635 /* All of the virtual frame registers are stack references. */
636 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
637 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
642 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
643 mode
, unaligned_mems
);
646 /* An address is assumed not to trap if:
647 - it is the pic register plus a constant. */
648 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
651 /* - or it is an address that can't trap plus a constant integer. */
652 if (CONST_INT_P (XEXP (x
, 1))
653 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
654 size
, mode
, unaligned_mems
))
661 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
662 mode
, unaligned_mems
);
669 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
670 mode
, unaligned_mems
);
676 /* If it isn't one of the case above, it can cause a trap. */
680 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
683 rtx_addr_can_trap_p (const_rtx x
)
685 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
688 /* Return true if X contains a MEM subrtx. */
691 contains_mem_rtx_p (rtx x
)
693 subrtx_iterator::array_type array
;
694 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
701 /* Return true if X is an address that is known to not be zero. */
704 nonzero_address_p (const_rtx x
)
706 const enum rtx_code code
= GET_CODE (x
);
711 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
717 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
718 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
719 || x
== stack_pointer_rtx
720 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
722 /* All of the virtual frame registers are stack references. */
723 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
724 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
729 return nonzero_address_p (XEXP (x
, 0));
732 /* Handle PIC references. */
733 if (XEXP (x
, 0) == pic_offset_table_rtx
734 && CONSTANT_P (XEXP (x
, 1)))
739 /* Similar to the above; allow positive offsets. Further, since
740 auto-inc is only allowed in memories, the register must be a
742 if (CONST_INT_P (XEXP (x
, 1))
743 && INTVAL (XEXP (x
, 1)) > 0)
745 return nonzero_address_p (XEXP (x
, 0));
748 /* Similarly. Further, the offset is always positive. */
755 return nonzero_address_p (XEXP (x
, 0));
758 return nonzero_address_p (XEXP (x
, 1));
764 /* If it isn't one of the case above, might be zero. */
768 /* Return 1 if X refers to a memory location whose address
769 cannot be compared reliably with constant addresses,
770 or if X refers to a BLKmode memory object.
771 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
772 zero, we are slightly more conservative. */
775 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
786 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
788 fmt
= GET_RTX_FORMAT (code
);
789 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
792 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
795 else if (fmt
[i
] == 'E')
798 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
799 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
805 /* Return the CALL in X if there is one. */
808 get_call_rtx_from (rtx x
)
812 if (GET_CODE (x
) == PARALLEL
)
813 x
= XVECEXP (x
, 0, 0);
814 if (GET_CODE (x
) == SET
)
816 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
821 /* Return the value of the integer term in X, if one is apparent;
823 Only obvious integer terms are detected.
824 This is used in cse.c with the `related_value' field. */
827 get_integer_term (const_rtx x
)
829 if (GET_CODE (x
) == CONST
)
832 if (GET_CODE (x
) == MINUS
833 && CONST_INT_P (XEXP (x
, 1)))
834 return - INTVAL (XEXP (x
, 1));
835 if (GET_CODE (x
) == PLUS
836 && CONST_INT_P (XEXP (x
, 1)))
837 return INTVAL (XEXP (x
, 1));
841 /* If X is a constant, return the value sans apparent integer term;
843 Only obvious integer terms are detected. */
846 get_related_value (const_rtx x
)
848 if (GET_CODE (x
) != CONST
)
851 if (GET_CODE (x
) == PLUS
852 && CONST_INT_P (XEXP (x
, 1)))
854 else if (GET_CODE (x
) == MINUS
855 && CONST_INT_P (XEXP (x
, 1)))
860 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
861 to somewhere in the same object or object_block as SYMBOL. */
864 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
868 if (GET_CODE (symbol
) != SYMBOL_REF
)
876 if (CONSTANT_POOL_ADDRESS_P (symbol
)
877 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
880 decl
= SYMBOL_REF_DECL (symbol
);
881 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
885 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
886 && SYMBOL_REF_BLOCK (symbol
)
887 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
888 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
889 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
895 /* Split X into a base and a constant offset, storing them in *BASE_OUT
896 and *OFFSET_OUT respectively. */
899 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
901 if (GET_CODE (x
) == CONST
)
904 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
906 *base_out
= XEXP (x
, 0);
907 *offset_out
= XEXP (x
, 1);
912 *offset_out
= const0_rtx
;
915 /* Return the number of places FIND appears within X. If COUNT_DEST is
916 zero, we do not count occurrences inside the destination of a SET. */
919 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
923 const char *format_ptr
;
942 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
944 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
948 if (MEM_P (find
) && rtx_equal_p (x
, find
))
953 if (SET_DEST (x
) == find
&& ! count_dest
)
954 return count_occurrences (SET_SRC (x
), find
, count_dest
);
961 format_ptr
= GET_RTX_FORMAT (code
);
964 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
966 switch (*format_ptr
++)
969 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
973 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
974 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
982 /* Return TRUE if OP is a register or subreg of a register that
983 holds an unsigned quantity. Otherwise, return FALSE. */
986 unsigned_reg_p (rtx op
)
990 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
993 if (GET_CODE (op
) == SUBREG
994 && SUBREG_PROMOTED_SIGN (op
))
1001 /* Nonzero if register REG appears somewhere within IN.
1002 Also works if REG is not a register; in this case it checks
1003 for a subexpression of IN that is Lisp "equal" to REG. */
1006 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1018 if (GET_CODE (in
) == LABEL_REF
)
1019 return reg
== label_ref_label (in
);
1021 code
= GET_CODE (in
);
1025 /* Compare registers by number. */
1027 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1029 /* These codes have no constituent expressions
1037 /* These are kept unique for a given value. */
1044 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1047 fmt
= GET_RTX_FORMAT (code
);
1049 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1054 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1055 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1058 else if (fmt
[i
] == 'e'
1059 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1065 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1066 no CODE_LABEL insn. */
1069 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1074 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1080 /* Nonzero if register REG is used in an insn between
1081 FROM_INSN and TO_INSN (exclusive of those two). */
1084 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1085 const rtx_insn
*to_insn
)
1089 if (from_insn
== to_insn
)
1092 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1093 if (NONDEBUG_INSN_P (insn
)
1094 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1095 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1100 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1101 is entirely replaced by a new value and the only use is as a SET_DEST,
1102 we do not consider it a reference. */
1105 reg_referenced_p (const_rtx x
, const_rtx body
)
1109 switch (GET_CODE (body
))
1112 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1115 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1116 of a REG that occupies all of the REG, the insn references X if
1117 it is mentioned in the destination. */
1118 if (GET_CODE (SET_DEST (body
)) != CC0
1119 && GET_CODE (SET_DEST (body
)) != PC
1120 && !REG_P (SET_DEST (body
))
1121 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1122 && REG_P (SUBREG_REG (SET_DEST (body
)))
1123 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1124 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1125 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1126 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1127 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1132 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1133 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1140 return reg_overlap_mentioned_p (x
, body
);
1143 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1146 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1149 case UNSPEC_VOLATILE
:
1150 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1151 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1156 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1157 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1162 if (MEM_P (XEXP (body
, 0)))
1163 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1168 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1170 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1177 /* Nonzero if register REG is set or clobbered in an insn between
1178 FROM_INSN and TO_INSN (exclusive of those two). */
1181 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1182 const rtx_insn
*to_insn
)
1184 const rtx_insn
*insn
;
1186 if (from_insn
== to_insn
)
1189 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1190 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1195 /* Return true if REG is set or clobbered inside INSN. */
1198 reg_set_p (const_rtx reg
, const_rtx insn
)
1200 /* After delay slot handling, call and branch insns might be in a
1201 sequence. Check all the elements there. */
1202 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1204 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1205 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1211 /* We can be passed an insn or part of one. If we are passed an insn,
1212 check if a side-effect of the insn clobbers REG. */
1214 && (FIND_REG_INC_NOTE (insn
, reg
)
1217 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1218 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1219 GET_MODE (reg
), REGNO (reg
)))
1221 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1224 /* There are no REG_INC notes for SP autoinc. */
1225 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1227 subrtx_var_iterator::array_type array
;
1228 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1233 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1235 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1237 iter
.skip_subrtxes ();
1242 return set_of (reg
, insn
) != NULL_RTX
;
1245 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1246 only if none of them are modified between START and END. Return 1 if
1247 X contains a MEM; this routine does use memory aliasing. */
1250 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1252 const enum rtx_code code
= GET_CODE (x
);
1273 if (modified_between_p (XEXP (x
, 0), start
, end
))
1275 if (MEM_READONLY_P (x
))
1277 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1278 if (memory_modified_in_insn_p (x
, insn
))
1283 return reg_set_between_p (x
, start
, end
);
1289 fmt
= GET_RTX_FORMAT (code
);
1290 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1292 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1295 else if (fmt
[i
] == 'E')
1296 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1297 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1304 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1305 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1306 does use memory aliasing. */
1309 modified_in_p (const_rtx x
, const_rtx insn
)
1311 const enum rtx_code code
= GET_CODE (x
);
1328 if (modified_in_p (XEXP (x
, 0), insn
))
1330 if (MEM_READONLY_P (x
))
1332 if (memory_modified_in_insn_p (x
, insn
))
1337 return reg_set_p (x
, insn
);
1343 fmt
= GET_RTX_FORMAT (code
);
1344 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1346 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1349 else if (fmt
[i
] == 'E')
1350 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1351 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1358 /* Helper function for set_of. */
1366 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1368 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1369 if (rtx_equal_p (x
, data
->pat
)
1370 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1374 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1375 (either directly or via STRICT_LOW_PART and similar modifiers). */
1377 set_of (const_rtx pat
, const_rtx insn
)
1379 struct set_of_data data
;
1380 data
.found
= NULL_RTX
;
1382 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1386 /* Add all hard register in X to *PSET. */
1388 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1390 subrtx_iterator::array_type array
;
1391 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1393 const_rtx x
= *iter
;
1394 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1395 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1399 /* This function, called through note_stores, collects sets and
1400 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1403 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1405 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1406 if (REG_P (x
) && HARD_REGISTER_P (x
))
1407 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1410 /* Examine INSN, and compute the set of hard registers written by it.
1411 Store it in *PSET. Should only be called after reload. */
1413 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1417 CLEAR_HARD_REG_SET (*pset
);
1418 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1422 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1424 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1425 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1427 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1428 if (REG_NOTE_KIND (link
) == REG_INC
)
1429 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1432 /* Like record_hard_reg_sets, but called through note_uses. */
1434 record_hard_reg_uses (rtx
*px
, void *data
)
1436 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1439 /* Given an INSN, return a SET expression if this insn has only a single SET.
1440 It may also have CLOBBERs, USEs, or SET whose output
1441 will not be used, which we ignore. */
1444 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1447 int set_verified
= 1;
1450 if (GET_CODE (pat
) == PARALLEL
)
1452 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1454 rtx sub
= XVECEXP (pat
, 0, i
);
1455 switch (GET_CODE (sub
))
1462 /* We can consider insns having multiple sets, where all
1463 but one are dead as single set insns. In common case
1464 only single set is present in the pattern so we want
1465 to avoid checking for REG_UNUSED notes unless necessary.
1467 When we reach set first time, we just expect this is
1468 the single set we are looking for and only when more
1469 sets are found in the insn, we check them. */
1472 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1473 && !side_effects_p (set
))
1479 set
= sub
, set_verified
= 0;
1480 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1481 || side_effects_p (sub
))
1493 /* Given an INSN, return nonzero if it has more than one SET, else return
1497 multiple_sets (const_rtx insn
)
1502 /* INSN must be an insn. */
1503 if (! INSN_P (insn
))
1506 /* Only a PARALLEL can have multiple SETs. */
1507 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1509 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1510 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1512 /* If we have already found a SET, then return now. */
1520 /* Either zero or one SET. */
1524 /* Return nonzero if the destination of SET equals the source
1525 and there are no side effects. */
1528 set_noop_p (const_rtx set
)
1530 rtx src
= SET_SRC (set
);
1531 rtx dst
= SET_DEST (set
);
1533 if (dst
== pc_rtx
&& src
== pc_rtx
)
1536 if (MEM_P (dst
) && MEM_P (src
))
1537 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1539 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1540 return rtx_equal_p (XEXP (dst
, 0), src
)
1541 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1542 && !side_effects_p (src
);
1544 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1545 dst
= XEXP (dst
, 0);
1547 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1549 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1551 src
= SUBREG_REG (src
);
1552 dst
= SUBREG_REG (dst
);
1555 /* It is a NOOP if destination overlaps with selected src vector
1557 if (GET_CODE (src
) == VEC_SELECT
1558 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1559 && HARD_REGISTER_P (XEXP (src
, 0))
1560 && HARD_REGISTER_P (dst
))
1563 rtx par
= XEXP (src
, 1);
1564 rtx src0
= XEXP (src
, 0);
1565 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1566 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1568 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1569 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1572 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1573 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1576 return (REG_P (src
) && REG_P (dst
)
1577 && REGNO (src
) == REGNO (dst
));
1580 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1584 noop_move_p (const rtx_insn
*insn
)
1586 rtx pat
= PATTERN (insn
);
1588 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1591 /* Insns carrying these notes are useful later on. */
1592 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1595 /* Check the code to be executed for COND_EXEC. */
1596 if (GET_CODE (pat
) == COND_EXEC
)
1597 pat
= COND_EXEC_CODE (pat
);
1599 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1602 if (GET_CODE (pat
) == PARALLEL
)
1605 /* If nothing but SETs of registers to themselves,
1606 this insn can also be deleted. */
1607 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1609 rtx tem
= XVECEXP (pat
, 0, i
);
1611 if (GET_CODE (tem
) == USE
1612 || GET_CODE (tem
) == CLOBBER
)
1615 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1625 /* Return nonzero if register in range [REGNO, ENDREGNO)
1626 appears either explicitly or implicitly in X
1627 other than being stored into.
1629 References contained within the substructure at LOC do not count.
1630 LOC may be zero, meaning don't ignore anything. */
1633 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1637 unsigned int x_regno
;
1642 /* The contents of a REG_NONNEG note is always zero, so we must come here
1643 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1647 code
= GET_CODE (x
);
1652 x_regno
= REGNO (x
);
1654 /* If we modifying the stack, frame, or argument pointer, it will
1655 clobber a virtual register. In fact, we could be more precise,
1656 but it isn't worth it. */
1657 if ((x_regno
== STACK_POINTER_REGNUM
1658 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1659 && x_regno
== ARG_POINTER_REGNUM
)
1660 || x_regno
== FRAME_POINTER_REGNUM
)
1661 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1664 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1667 /* If this is a SUBREG of a hard reg, we can see exactly which
1668 registers are being modified. Otherwise, handle normally. */
1669 if (REG_P (SUBREG_REG (x
))
1670 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1672 unsigned int inner_regno
= subreg_regno (x
);
1673 unsigned int inner_endregno
1674 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1675 ? subreg_nregs (x
) : 1);
1677 return endregno
> inner_regno
&& regno
< inner_endregno
;
1683 if (&SET_DEST (x
) != loc
1684 /* Note setting a SUBREG counts as referring to the REG it is in for
1685 a pseudo but not for hard registers since we can
1686 treat each word individually. */
1687 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1688 && loc
!= &SUBREG_REG (SET_DEST (x
))
1689 && REG_P (SUBREG_REG (SET_DEST (x
)))
1690 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1691 && refers_to_regno_p (regno
, endregno
,
1692 SUBREG_REG (SET_DEST (x
)), loc
))
1693 || (!REG_P (SET_DEST (x
))
1694 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1697 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1706 /* X does not match, so try its subexpressions. */
1708 fmt
= GET_RTX_FORMAT (code
);
1709 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1711 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1719 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1722 else if (fmt
[i
] == 'E')
1725 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1726 if (loc
!= &XVECEXP (x
, i
, j
)
1727 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1734 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1735 we check if any register number in X conflicts with the relevant register
1736 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1737 contains a MEM (we don't bother checking for memory addresses that can't
1738 conflict because we expect this to be a rare case. */
1741 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1743 unsigned int regno
, endregno
;
1745 /* If either argument is a constant, then modifying X can not
1746 affect IN. Here we look at IN, we can profitably combine
1747 CONSTANT_P (x) with the switch statement below. */
1748 if (CONSTANT_P (in
))
1752 switch (GET_CODE (x
))
1754 case STRICT_LOW_PART
:
1757 /* Overly conservative. */
1762 regno
= REGNO (SUBREG_REG (x
));
1763 if (regno
< FIRST_PSEUDO_REGISTER
)
1764 regno
= subreg_regno (x
);
1765 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1766 ? subreg_nregs (x
) : 1);
1771 endregno
= END_REGNO (x
);
1773 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1783 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1784 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1787 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1790 else if (fmt
[i
] == 'E')
1793 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1794 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1804 return reg_mentioned_p (x
, in
);
1810 /* If any register in here refers to it we return true. */
1811 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1812 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1813 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1819 gcc_assert (CONSTANT_P (x
));
1824 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1825 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1826 ignored by note_stores, but passed to FUN.
1828 FUN receives three arguments:
1829 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1830 2. the SET or CLOBBER rtx that does the store,
1831 3. the pointer DATA provided to note_stores.
1833 If the item being stored in or clobbered is a SUBREG of a hard register,
1834 the SUBREG will be passed. */
1837 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1841 if (GET_CODE (x
) == COND_EXEC
)
1842 x
= COND_EXEC_CODE (x
);
1844 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1846 rtx dest
= SET_DEST (x
);
1848 while ((GET_CODE (dest
) == SUBREG
1849 && (!REG_P (SUBREG_REG (dest
))
1850 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1851 || GET_CODE (dest
) == ZERO_EXTRACT
1852 || GET_CODE (dest
) == STRICT_LOW_PART
)
1853 dest
= XEXP (dest
, 0);
1855 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1856 each of whose first operand is a register. */
1857 if (GET_CODE (dest
) == PARALLEL
)
1859 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1860 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1861 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1864 (*fun
) (dest
, x
, data
);
1867 else if (GET_CODE (x
) == PARALLEL
)
1868 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1869 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1872 /* Like notes_stores, but call FUN for each expression that is being
1873 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1874 FUN for each expression, not any interior subexpressions. FUN receives a
1875 pointer to the expression and the DATA passed to this function.
1877 Note that this is not quite the same test as that done in reg_referenced_p
1878 since that considers something as being referenced if it is being
1879 partially set, while we do not. */
1882 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1887 switch (GET_CODE (body
))
1890 (*fun
) (&COND_EXEC_TEST (body
), data
);
1891 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1895 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1896 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1900 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1901 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1905 (*fun
) (&XEXP (body
, 0), data
);
1909 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1910 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1914 (*fun
) (&TRAP_CONDITION (body
), data
);
1918 (*fun
) (&XEXP (body
, 0), data
);
1922 case UNSPEC_VOLATILE
:
1923 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1924 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1928 if (MEM_P (XEXP (body
, 0)))
1929 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1934 rtx dest
= SET_DEST (body
);
1936 /* For sets we replace everything in source plus registers in memory
1937 expression in store and operands of a ZERO_EXTRACT. */
1938 (*fun
) (&SET_SRC (body
), data
);
1940 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1942 (*fun
) (&XEXP (dest
, 1), data
);
1943 (*fun
) (&XEXP (dest
, 2), data
);
1946 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1947 dest
= XEXP (dest
, 0);
1950 (*fun
) (&XEXP (dest
, 0), data
);
1955 /* All the other possibilities never store. */
1956 (*fun
) (pbody
, data
);
1961 /* Return nonzero if X's old contents don't survive after INSN.
1962 This will be true if X is (cc0) or if X is a register and
1963 X dies in INSN or because INSN entirely sets X.
1965 "Entirely set" means set directly and not through a SUBREG, or
1966 ZERO_EXTRACT, so no trace of the old contents remains.
1967 Likewise, REG_INC does not count.
1969 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1970 but for this use that makes no difference, since regs don't overlap
1971 during their lifetimes. Therefore, this function may be used
1972 at any time after deaths have been computed.
1974 If REG is a hard reg that occupies multiple machine registers, this
1975 function will only return 1 if each of those registers will be replaced
1979 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
1981 unsigned int regno
, end_regno
;
1984 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1985 if (GET_CODE (x
) == CC0
)
1988 gcc_assert (REG_P (x
));
1991 end_regno
= END_REGNO (x
);
1992 for (i
= regno
; i
< end_regno
; i
++)
1993 if (! dead_or_set_regno_p (insn
, i
))
1999 /* Return TRUE iff DEST is a register or subreg of a register and
2000 doesn't change the number of words of the inner register, and any
2001 part of the register is TEST_REGNO. */
2004 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2006 unsigned int regno
, endregno
;
2008 if (GET_CODE (dest
) == SUBREG
2009 && (((GET_MODE_SIZE (GET_MODE (dest
))
2010 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
2011 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
2012 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
2013 dest
= SUBREG_REG (dest
);
2018 regno
= REGNO (dest
);
2019 endregno
= END_REGNO (dest
);
2020 return (test_regno
>= regno
&& test_regno
< endregno
);
2023 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2024 any member matches the covers_regno_no_parallel_p criteria. */
2027 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2029 if (GET_CODE (dest
) == PARALLEL
)
2031 /* Some targets place small structures in registers for return
2032 values of functions, and those registers are wrapped in
2033 PARALLELs that we may see as the destination of a SET. */
2036 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2038 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2039 if (inner
!= NULL_RTX
2040 && covers_regno_no_parallel_p (inner
, test_regno
))
2047 return covers_regno_no_parallel_p (dest
, test_regno
);
2050 /* Utility function for dead_or_set_p to check an individual register. */
2053 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2057 /* See if there is a death note for something that includes TEST_REGNO. */
2058 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2062 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2065 pattern
= PATTERN (insn
);
2067 /* If a COND_EXEC is not executed, the value survives. */
2068 if (GET_CODE (pattern
) == COND_EXEC
)
2071 if (GET_CODE (pattern
) == SET
)
2072 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2073 else if (GET_CODE (pattern
) == PARALLEL
)
2077 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2079 rtx body
= XVECEXP (pattern
, 0, i
);
2081 if (GET_CODE (body
) == COND_EXEC
)
2082 body
= COND_EXEC_CODE (body
);
2084 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2085 && covers_regno_p (SET_DEST (body
), test_regno
))
2093 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2094 If DATUM is nonzero, look for one whose datum is DATUM. */
2097 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2101 gcc_checking_assert (insn
);
2103 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2104 if (! INSN_P (insn
))
2108 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2109 if (REG_NOTE_KIND (link
) == kind
)
2114 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2115 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2120 /* Return the reg-note of kind KIND in insn INSN which applies to register
2121 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2122 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2123 it might be the case that the note overlaps REGNO. */
2126 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2130 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2131 if (! INSN_P (insn
))
2134 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2135 if (REG_NOTE_KIND (link
) == kind
2136 /* Verify that it is a register, so that scratch and MEM won't cause a
2138 && REG_P (XEXP (link
, 0))
2139 && REGNO (XEXP (link
, 0)) <= regno
2140 && END_REGNO (XEXP (link
, 0)) > regno
)
2145 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2149 find_reg_equal_equiv_note (const_rtx insn
)
2156 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2157 if (REG_NOTE_KIND (link
) == REG_EQUAL
2158 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2160 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2161 insns that have multiple sets. Checking single_set to
2162 make sure of this is not the proper check, as explained
2163 in the comment in set_unique_reg_note.
2165 This should be changed into an assert. */
2166 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2173 /* Check whether INSN is a single_set whose source is known to be
2174 equivalent to a constant. Return that constant if so, otherwise
2178 find_constant_src (const rtx_insn
*insn
)
2182 set
= single_set (insn
);
2185 x
= avoid_constant_pool_reference (SET_SRC (set
));
2190 note
= find_reg_equal_equiv_note (insn
);
2191 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2192 return XEXP (note
, 0);
2197 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2198 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2201 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2203 /* If it's not a CALL_INSN, it can't possibly have a
2204 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2214 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2216 link
= XEXP (link
, 1))
2217 if (GET_CODE (XEXP (link
, 0)) == code
2218 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2223 unsigned int regno
= REGNO (datum
);
2225 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2226 to pseudo registers, so don't bother checking. */
2228 if (regno
< FIRST_PSEUDO_REGISTER
)
2230 unsigned int end_regno
= END_REGNO (datum
);
2233 for (i
= regno
; i
< end_regno
; i
++)
2234 if (find_regno_fusage (insn
, code
, i
))
2242 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2243 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2246 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2250 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2251 to pseudo registers, so don't bother checking. */
2253 if (regno
>= FIRST_PSEUDO_REGISTER
2257 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2261 if (GET_CODE (op
= XEXP (link
, 0)) == code
2262 && REG_P (reg
= XEXP (op
, 0))
2263 && REGNO (reg
) <= regno
2264 && END_REGNO (reg
) > regno
)
2272 /* Return true if KIND is an integer REG_NOTE. */
2275 int_reg_note_p (enum reg_note kind
)
2277 return kind
== REG_BR_PROB
;
2280 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2281 stored as the pointer to the next register note. */
2284 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2288 gcc_checking_assert (!int_reg_note_p (kind
));
2293 case REG_LABEL_TARGET
:
2294 case REG_LABEL_OPERAND
:
2296 /* These types of register notes use an INSN_LIST rather than an
2297 EXPR_LIST, so that copying is done right and dumps look
2299 note
= alloc_INSN_LIST (datum
, list
);
2300 PUT_REG_NOTE_KIND (note
, kind
);
2304 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2311 /* Add register note with kind KIND and datum DATUM to INSN. */
2314 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2316 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2319 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2322 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2324 gcc_checking_assert (int_reg_note_p (kind
));
2325 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2326 datum
, REG_NOTES (insn
));
2329 /* Add a register note like NOTE to INSN. */
2332 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2334 if (GET_CODE (note
) == INT_LIST
)
2335 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2337 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2340 /* Duplicate NOTE and return the copy. */
2342 duplicate_reg_note (rtx note
)
2344 reg_note kind
= REG_NOTE_KIND (note
);
2346 if (GET_CODE (note
) == INT_LIST
)
2347 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2348 else if (GET_CODE (note
) == EXPR_LIST
)
2349 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2351 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2354 /* Remove register note NOTE from the REG_NOTES of INSN. */
2357 remove_note (rtx_insn
*insn
, const_rtx note
)
2361 if (note
== NULL_RTX
)
2364 if (REG_NOTES (insn
) == note
)
2365 REG_NOTES (insn
) = XEXP (note
, 1);
2367 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2368 if (XEXP (link
, 1) == note
)
2370 XEXP (link
, 1) = XEXP (note
, 1);
2374 switch (REG_NOTE_KIND (note
))
2378 df_notes_rescan (insn
);
2385 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2386 Return true if any note has been removed. */
2389 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2394 loc
= ®_NOTES (insn
);
2397 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2398 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2400 *loc
= XEXP (*loc
, 1);
2404 loc
= &XEXP (*loc
, 1);
2409 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2412 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2419 /* This loop is a little tricky. We cannot just go down the chain because
2420 it is being modified by some actions in the loop. So we just iterate
2421 over the head. We plan to drain the list anyway. */
2422 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2424 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2425 rtx note
= find_reg_equal_equiv_note (insn
);
2427 /* This assert is generally triggered when someone deletes a REG_EQUAL
2428 or REG_EQUIV note by hacking the list manually rather than calling
2432 remove_note (insn
, note
);
2436 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2437 return 1 if it is found. A simple equality test is used to determine if
2441 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2445 for (x
= listp
; x
; x
= XEXP (x
, 1))
2446 if (node
== XEXP (x
, 0))
2452 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2453 remove that entry from the list if it is found.
2455 A simple equality test is used to determine if NODE matches. */
2458 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2460 rtx_expr_list
*temp
= *listp
;
2461 rtx_expr_list
*prev
= NULL
;
2465 if (node
== temp
->element ())
2467 /* Splice the node out of the list. */
2469 XEXP (prev
, 1) = temp
->next ();
2471 *listp
= temp
->next ();
2477 temp
= temp
->next ();
2481 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2482 remove that entry from the list if it is found.
2484 A simple equality test is used to determine if NODE matches. */
2487 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2489 rtx_insn_list
*temp
= *listp
;
2490 rtx_insn_list
*prev
= NULL
;
2494 if (node
== temp
->insn ())
2496 /* Splice the node out of the list. */
2498 XEXP (prev
, 1) = temp
->next ();
2500 *listp
= temp
->next ();
2506 temp
= temp
->next ();
2510 /* Nonzero if X contains any volatile instructions. These are instructions
2511 which may cause unpredictable machine state instructions, and thus no
2512 instructions or register uses should be moved or combined across them.
2513 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2516 volatile_insn_p (const_rtx x
)
2518 const RTX_CODE code
= GET_CODE (x
);
2536 case UNSPEC_VOLATILE
:
2541 if (MEM_VOLATILE_P (x
))
2548 /* Recursively scan the operands of this expression. */
2551 const char *const fmt
= GET_RTX_FORMAT (code
);
2554 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2558 if (volatile_insn_p (XEXP (x
, i
)))
2561 else if (fmt
[i
] == 'E')
2564 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2565 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2573 /* Nonzero if X contains any volatile memory references
2574 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2577 volatile_refs_p (const_rtx x
)
2579 const RTX_CODE code
= GET_CODE (x
);
2595 case UNSPEC_VOLATILE
:
2601 if (MEM_VOLATILE_P (x
))
2608 /* Recursively scan the operands of this expression. */
2611 const char *const fmt
= GET_RTX_FORMAT (code
);
2614 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2618 if (volatile_refs_p (XEXP (x
, i
)))
2621 else if (fmt
[i
] == 'E')
2624 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2625 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2633 /* Similar to above, except that it also rejects register pre- and post-
2637 side_effects_p (const_rtx x
)
2639 const RTX_CODE code
= GET_CODE (x
);
2656 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2657 when some combination can't be done. If we see one, don't think
2658 that we can simplify the expression. */
2659 return (GET_MODE (x
) != VOIDmode
);
2668 case UNSPEC_VOLATILE
:
2674 if (MEM_VOLATILE_P (x
))
2681 /* Recursively scan the operands of this expression. */
2684 const char *fmt
= GET_RTX_FORMAT (code
);
2687 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2691 if (side_effects_p (XEXP (x
, i
)))
2694 else if (fmt
[i
] == 'E')
2697 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2698 if (side_effects_p (XVECEXP (x
, i
, j
)))
2706 /* Return nonzero if evaluating rtx X might cause a trap.
2707 FLAGS controls how to consider MEMs. A nonzero means the context
2708 of the access may have changed from the original, such that the
2709 address may have become invalid. */
2712 may_trap_p_1 (const_rtx x
, unsigned flags
)
2718 /* We make no distinction currently, but this function is part of
2719 the internal target-hooks ABI so we keep the parameter as
2720 "unsigned flags". */
2721 bool code_changed
= flags
!= 0;
2725 code
= GET_CODE (x
);
2728 /* Handle these cases quickly. */
2740 return targetm
.unspec_may_trap_p (x
, flags
);
2742 case UNSPEC_VOLATILE
:
2748 return MEM_VOLATILE_P (x
);
2750 /* Memory ref can trap unless it's a static var or a stack slot. */
2752 /* Recognize specific pattern of stack checking probes. */
2753 if (flag_stack_check
2754 && MEM_VOLATILE_P (x
)
2755 && XEXP (x
, 0) == stack_pointer_rtx
)
2757 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2758 reference; moving it out of context such as when moving code
2759 when optimizing, might cause its address to become invalid. */
2761 || !MEM_NOTRAP_P (x
))
2763 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2764 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2765 GET_MODE (x
), code_changed
);
2770 /* Division by a non-constant might trap. */
2775 if (HONOR_SNANS (x
))
2777 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2778 return flag_trapping_math
;
2779 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2784 /* An EXPR_LIST is used to represent a function call. This
2785 certainly may trap. */
2794 /* Some floating point comparisons may trap. */
2795 if (!flag_trapping_math
)
2797 /* ??? There is no machine independent way to check for tests that trap
2798 when COMPARE is used, though many targets do make this distinction.
2799 For instance, sparc uses CCFPE for compares which generate exceptions
2800 and CCFP for compares which do not generate exceptions. */
2803 /* But often the compare has some CC mode, so check operand
2805 if (HONOR_NANS (XEXP (x
, 0))
2806 || HONOR_NANS (XEXP (x
, 1)))
2812 if (HONOR_SNANS (x
))
2814 /* Often comparison is CC mode, so check operand modes. */
2815 if (HONOR_SNANS (XEXP (x
, 0))
2816 || HONOR_SNANS (XEXP (x
, 1)))
2821 /* Conversion of floating point might trap. */
2822 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2829 /* These operations don't trap even with floating point. */
2833 /* Any floating arithmetic may trap. */
2834 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2838 fmt
= GET_RTX_FORMAT (code
);
2839 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2843 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2846 else if (fmt
[i
] == 'E')
2849 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2850 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2857 /* Return nonzero if evaluating rtx X might cause a trap. */
2860 may_trap_p (const_rtx x
)
2862 return may_trap_p_1 (x
, 0);
2865 /* Same as above, but additionally return nonzero if evaluating rtx X might
2866 cause a fault. We define a fault for the purpose of this function as a
2867 erroneous execution condition that cannot be encountered during the normal
2868 execution of a valid program; the typical example is an unaligned memory
2869 access on a strict alignment machine. The compiler guarantees that it
2870 doesn't generate code that will fault from a valid program, but this
2871 guarantee doesn't mean anything for individual instructions. Consider
2872 the following example:
2874 struct S { int d; union { char *cp; int *ip; }; };
2876 int foo(struct S *s)
2884 on a strict alignment machine. In a valid program, foo will never be
2885 invoked on a structure for which d is equal to 1 and the underlying
2886 unique field of the union not aligned on a 4-byte boundary, but the
2887 expression *s->ip might cause a fault if considered individually.
2889 At the RTL level, potentially problematic expressions will almost always
2890 verify may_trap_p; for example, the above dereference can be emitted as
2891 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2892 However, suppose that foo is inlined in a caller that causes s->cp to
2893 point to a local character variable and guarantees that s->d is not set
2894 to 1; foo may have been effectively translated into pseudo-RTL as:
2897 (set (reg:SI) (mem:SI (%fp - 7)))
2899 (set (reg:QI) (mem:QI (%fp - 7)))
2901 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2902 memory reference to a stack slot, but it will certainly cause a fault
2903 on a strict alignment machine. */
2906 may_trap_or_fault_p (const_rtx x
)
2908 return may_trap_p_1 (x
, 1);
2911 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2912 i.e., an inequality. */
2915 inequality_comparisons_p (const_rtx x
)
2919 const enum rtx_code code
= GET_CODE (x
);
2947 len
= GET_RTX_LENGTH (code
);
2948 fmt
= GET_RTX_FORMAT (code
);
2950 for (i
= 0; i
< len
; i
++)
2954 if (inequality_comparisons_p (XEXP (x
, i
)))
2957 else if (fmt
[i
] == 'E')
2960 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2961 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2969 /* Replace any occurrence of FROM in X with TO. The function does
2970 not enter into CONST_DOUBLE for the replace.
2972 Note that copying is not done so X must not be shared unless all copies
2975 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2976 those pointer-equal ones. */
2979 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
2987 /* Allow this function to make replacements in EXPR_LISTs. */
2994 && REGNO (x
) == REGNO (from
))
2996 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
2999 else if (GET_CODE (x
) == SUBREG
)
3001 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3003 if (CONST_INT_P (new_rtx
))
3005 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3006 GET_MODE (SUBREG_REG (x
)),
3011 SUBREG_REG (x
) = new_rtx
;
3015 else if (GET_CODE (x
) == ZERO_EXTEND
)
3017 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3019 if (CONST_INT_P (new_rtx
))
3021 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3022 new_rtx
, GET_MODE (XEXP (x
, 0)));
3026 XEXP (x
, 0) = new_rtx
;
3031 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3032 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3035 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3036 else if (fmt
[i
] == 'E')
3037 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3038 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3039 from
, to
, all_regs
);
3045 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3046 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3049 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3051 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3053 if (JUMP_TABLE_DATA_P (x
))
3056 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3057 int len
= GET_NUM_ELEM (vec
);
3058 for (int i
= 0; i
< len
; ++i
)
3060 rtx ref
= RTVEC_ELT (vec
, i
);
3061 if (XEXP (ref
, 0) == old_label
)
3063 XEXP (ref
, 0) = new_label
;
3064 if (update_label_nuses
)
3066 ++LABEL_NUSES (new_label
);
3067 --LABEL_NUSES (old_label
);
3074 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3075 field. This is not handled by the iterator because it doesn't
3076 handle unprinted ('0') fields. */
3077 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3078 JUMP_LABEL (x
) = new_label
;
3080 subrtx_ptr_iterator::array_type array
;
3081 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3086 if (GET_CODE (x
) == SYMBOL_REF
3087 && CONSTANT_POOL_ADDRESS_P (x
))
3089 rtx c
= get_pool_constant (x
);
3090 if (rtx_referenced_p (old_label
, c
))
3092 /* Create a copy of constant C; replace the label inside
3093 but do not update LABEL_NUSES because uses in constant pool
3095 rtx new_c
= copy_rtx (c
);
3096 replace_label (&new_c
, old_label
, new_label
, false);
3098 /* Add the new constant NEW_C to constant pool and replace
3099 the old reference to constant by new reference. */
3100 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3101 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3105 if ((GET_CODE (x
) == LABEL_REF
3106 || GET_CODE (x
) == INSN_LIST
)
3107 && XEXP (x
, 0) == old_label
)
3109 XEXP (x
, 0) = new_label
;
3110 if (update_label_nuses
)
3112 ++LABEL_NUSES (new_label
);
3113 --LABEL_NUSES (old_label
);
3121 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3122 rtx_insn
*new_label
, bool update_label_nuses
)
3124 rtx insn_as_rtx
= insn
;
3125 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3126 gcc_checking_assert (insn_as_rtx
== insn
);
3129 /* Return true if X is referenced in BODY. */
3132 rtx_referenced_p (const_rtx x
, const_rtx body
)
3134 subrtx_iterator::array_type array
;
3135 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3136 if (const_rtx y
= *iter
)
3138 /* Check if a label_ref Y refers to label X. */
3139 if (GET_CODE (y
) == LABEL_REF
3141 && label_ref_label (y
) == x
)
3144 if (rtx_equal_p (x
, y
))
3147 /* If Y is a reference to pool constant traverse the constant. */
3148 if (GET_CODE (y
) == SYMBOL_REF
3149 && CONSTANT_POOL_ADDRESS_P (y
))
3150 iter
.substitute (get_pool_constant (y
));
3155 /* If INSN is a tablejump return true and store the label (before jump table) to
3156 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3159 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3160 rtx_jump_table_data
**tablep
)
3165 rtx target
= JUMP_LABEL (insn
);
3166 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3169 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3170 rtx_insn
*table
= next_insn (label
);
3171 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3177 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3181 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3182 constant that is not in the constant pool and not in the condition
3183 of an IF_THEN_ELSE. */
3186 computed_jump_p_1 (const_rtx x
)
3188 const enum rtx_code code
= GET_CODE (x
);
3205 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3206 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3209 return (computed_jump_p_1 (XEXP (x
, 1))
3210 || computed_jump_p_1 (XEXP (x
, 2)));
3216 fmt
= GET_RTX_FORMAT (code
);
3217 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3220 && computed_jump_p_1 (XEXP (x
, i
)))
3223 else if (fmt
[i
] == 'E')
3224 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3225 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3232 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3234 Tablejumps and casesi insns are not considered indirect jumps;
3235 we can recognize them by a (use (label_ref)). */
3238 computed_jump_p (const rtx_insn
*insn
)
3243 rtx pat
= PATTERN (insn
);
3245 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3246 if (JUMP_LABEL (insn
) != NULL
)
3249 if (GET_CODE (pat
) == PARALLEL
)
3251 int len
= XVECLEN (pat
, 0);
3252 int has_use_labelref
= 0;
3254 for (i
= len
- 1; i
>= 0; i
--)
3255 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3256 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3259 has_use_labelref
= 1;
3263 if (! has_use_labelref
)
3264 for (i
= len
- 1; i
>= 0; i
--)
3265 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3266 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3267 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3270 else if (GET_CODE (pat
) == SET
3271 && SET_DEST (pat
) == pc_rtx
3272 && computed_jump_p_1 (SET_SRC (pat
)))
3280 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3281 the equivalent add insn and pass the result to FN, using DATA as the
3285 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3287 rtx x
= XEXP (mem
, 0);
3288 switch (GET_CODE (x
))
3293 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3294 rtx r1
= XEXP (x
, 0);
3295 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3296 return fn (mem
, x
, r1
, r1
, c
, data
);
3302 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3303 rtx r1
= XEXP (x
, 0);
3304 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3305 return fn (mem
, x
, r1
, r1
, c
, data
);
3311 rtx r1
= XEXP (x
, 0);
3312 rtx add
= XEXP (x
, 1);
3313 return fn (mem
, x
, r1
, add
, NULL
, data
);
3321 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3322 For each such autoinc operation found, call FN, passing it
3323 the innermost enclosing MEM, the operation itself, the RTX modified
3324 by the operation, two RTXs (the second may be NULL) that, once
3325 added, represent the value to be held by the modified RTX
3326 afterwards, and DATA. FN is to return 0 to continue the
3327 traversal or any other value to have it returned to the caller of
3328 for_each_inc_dec. */
3331 for_each_inc_dec (rtx x
,
3332 for_each_inc_dec_fn fn
,
3335 subrtx_var_iterator::array_type array
;
3336 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3341 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3343 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3346 iter
.skip_subrtxes ();
3353 /* Searches X for any reference to REGNO, returning the rtx of the
3354 reference found if any. Otherwise, returns NULL_RTX. */
3357 regno_use_in (unsigned int regno
, rtx x
)
3363 if (REG_P (x
) && REGNO (x
) == regno
)
3366 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3367 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3371 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3374 else if (fmt
[i
] == 'E')
3375 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3376 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3383 /* Return a value indicating whether OP, an operand of a commutative
3384 operation, is preferred as the first or second operand. The more
3385 positive the value, the stronger the preference for being the first
3389 commutative_operand_precedence (rtx op
)
3391 enum rtx_code code
= GET_CODE (op
);
3393 /* Constants always become the second operand. Prefer "nice" constants. */
3394 if (code
== CONST_INT
)
3396 if (code
== CONST_WIDE_INT
)
3398 if (code
== CONST_DOUBLE
)
3400 if (code
== CONST_FIXED
)
3402 op
= avoid_constant_pool_reference (op
);
3403 code
= GET_CODE (op
);
3405 switch (GET_RTX_CLASS (code
))
3408 if (code
== CONST_INT
)
3410 if (code
== CONST_WIDE_INT
)
3412 if (code
== CONST_DOUBLE
)
3414 if (code
== CONST_FIXED
)
3419 /* SUBREGs of objects should come second. */
3420 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3425 /* Complex expressions should be the first, so decrease priority
3426 of objects. Prefer pointer objects over non pointer objects. */
3427 if ((REG_P (op
) && REG_POINTER (op
))
3428 || (MEM_P (op
) && MEM_POINTER (op
)))
3432 case RTX_COMM_ARITH
:
3433 /* Prefer operands that are themselves commutative to be first.
3434 This helps to make things linear. In particular,
3435 (and (and (reg) (reg)) (not (reg))) is canonical. */
3439 /* If only one operand is a binary expression, it will be the first
3440 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3441 is canonical, although it will usually be further simplified. */
3445 /* Then prefer NEG and NOT. */
3446 if (code
== NEG
|| code
== NOT
)
3455 /* Return 1 iff it is necessary to swap operands of commutative operation
3456 in order to canonicalize expression. */
3459 swap_commutative_operands_p (rtx x
, rtx y
)
3461 return (commutative_operand_precedence (x
)
3462 < commutative_operand_precedence (y
));
3465 /* Return 1 if X is an autoincrement side effect and the register is
3466 not the stack pointer. */
3468 auto_inc_p (const_rtx x
)
3470 switch (GET_CODE (x
))
3478 /* There are no REG_INC notes for SP. */
3479 if (XEXP (x
, 0) != stack_pointer_rtx
)
3487 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3489 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3498 code
= GET_CODE (in
);
3499 fmt
= GET_RTX_FORMAT (code
);
3500 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3504 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3507 else if (fmt
[i
] == 'E')
3508 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3509 if (loc
== &XVECEXP (in
, i
, j
)
3510 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3516 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3517 and SUBREG_BYTE, return the bit offset where the subreg begins
3518 (counting from the least significant bit of the operand). */
3521 subreg_lsb_1 (machine_mode outer_mode
,
3522 machine_mode inner_mode
,
3523 unsigned int subreg_byte
)
3525 unsigned int bitpos
;
3529 /* A paradoxical subreg begins at bit position 0. */
3530 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3533 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3534 /* If the subreg crosses a word boundary ensure that
3535 it also begins and ends on a word boundary. */
3536 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3537 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3538 && (subreg_byte
% UNITS_PER_WORD
3539 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3541 if (WORDS_BIG_ENDIAN
)
3542 word
= (GET_MODE_SIZE (inner_mode
)
3543 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3545 word
= subreg_byte
/ UNITS_PER_WORD
;
3546 bitpos
= word
* BITS_PER_WORD
;
3548 if (BYTES_BIG_ENDIAN
)
3549 byte
= (GET_MODE_SIZE (inner_mode
)
3550 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3552 byte
= subreg_byte
% UNITS_PER_WORD
;
3553 bitpos
+= byte
* BITS_PER_UNIT
;
3558 /* Given a subreg X, return the bit offset where the subreg begins
3559 (counting from the least significant bit of the reg). */
3562 subreg_lsb (const_rtx x
)
3564 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3568 /* Return the subreg byte offset for a subreg whose outer value has
3569 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3570 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3571 lsb of the inner value. This is the inverse of the calculation
3572 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3575 subreg_size_offset_from_lsb (unsigned int outer_bytes
,
3576 unsigned int inner_bytes
,
3577 unsigned int lsb_shift
)
3579 /* A paradoxical subreg begins at bit position 0. */
3580 if (outer_bytes
> inner_bytes
)
3582 gcc_checking_assert (lsb_shift
== 0);
3586 gcc_assert (lsb_shift
% BITS_PER_UNIT
== 0);
3587 unsigned int lower_bytes
= lsb_shift
/ BITS_PER_UNIT
;
3588 unsigned int upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3589 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3591 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3595 unsigned int lower_word_part
= lower_bytes
& -UNITS_PER_WORD
;
3596 unsigned int upper_word_part
= upper_bytes
& -UNITS_PER_WORD
;
3597 if (WORDS_BIG_ENDIAN
)
3598 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3600 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3604 /* Fill in information about a subreg of a hard register.
3605 xregno - A regno of an inner hard subreg_reg (or what will become one).
3606 xmode - The mode of xregno.
3607 offset - The byte offset.
3608 ymode - The mode of a top level SUBREG (or what may become one).
3609 info - Pointer to structure to fill in.
3611 Rather than considering one particular inner register (and thus one
3612 particular "outer" register) in isolation, this function really uses
3613 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3614 function does not check whether adding INFO->offset to XREGNO gives
3615 a valid hard register; even if INFO->offset + XREGNO is out of range,
3616 there might be another register of the same type that is in range.
3617 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3618 register, since that can depend on things like whether the final
3619 register number is even or odd. Callers that want to check whether
3620 this particular subreg can be replaced by a simple (reg ...) should
3621 use simplify_subreg_regno. */
3624 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3625 unsigned int offset
, machine_mode ymode
,
3626 struct subreg_info
*info
)
3628 unsigned int nregs_xmode
, nregs_ymode
;
3630 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3632 unsigned int xsize
= GET_MODE_SIZE (xmode
);
3633 unsigned int ysize
= GET_MODE_SIZE (ymode
);
3634 bool rknown
= false;
3636 /* If the register representation of a non-scalar mode has holes in it,
3637 we expect the scalar units to be concatenated together, with the holes
3638 distributed evenly among the scalar units. Each scalar unit must occupy
3639 at least one register. */
3640 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3642 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3643 unsigned int nunits
= GET_MODE_NUNITS (xmode
);
3644 machine_mode xmode_unit
= GET_MODE_INNER (xmode
);
3645 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3646 gcc_assert (nregs_xmode
3648 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3649 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3650 == hard_regno_nregs
[xregno
][xmode_unit
] * nunits
);
3652 /* You can only ask for a SUBREG of a value with holes in the middle
3653 if you don't cross the holes. (Such a SUBREG should be done by
3654 picking a different register class, or doing it in memory if
3655 necessary.) An example of a value with holes is XCmode on 32-bit
3656 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3657 3 for each part, but in memory it's two 128-bit parts.
3658 Padding is assumed to be at the end (not necessarily the 'high part')
3660 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3661 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3662 != ((offset
+ ysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3664 info
->representable_p
= false;
3669 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3671 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3673 /* Paradoxical subregs are otherwise valid. */
3674 if (!rknown
&& offset
== 0 && ysize
> xsize
)
3676 info
->representable_p
= true;
3677 /* If this is a big endian paradoxical subreg, which uses more
3678 actual hard registers than the original register, we must
3679 return a negative offset so that we find the proper highpart
3682 We assume that the ordering of registers within a multi-register
3683 value has a consistent endianness: if bytes and register words
3684 have different endianness, the hard registers that make up a
3685 multi-register value must be at least word-sized. */
3686 if (REG_WORDS_BIG_ENDIAN
)
3687 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3690 info
->nregs
= nregs_ymode
;
3694 /* If registers store different numbers of bits in the different
3695 modes, we cannot generally form this subreg. */
3696 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3697 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3698 && (xsize
% nregs_xmode
) == 0
3699 && (ysize
% nregs_ymode
) == 0)
3701 int regsize_xmode
= xsize
/ nregs_xmode
;
3702 int regsize_ymode
= ysize
/ nregs_ymode
;
3704 && ((nregs_ymode
> 1 && regsize_xmode
> regsize_ymode
)
3705 || (nregs_xmode
> 1 && regsize_ymode
> regsize_xmode
)))
3707 info
->representable_p
= false;
3708 info
->nregs
= CEIL (ysize
, regsize_xmode
);
3709 info
->offset
= offset
/ regsize_xmode
;
3712 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3713 would go outside of XMODE. */
3714 if (!rknown
&& ysize
+ offset
> xsize
)
3716 info
->representable_p
= false;
3717 info
->nregs
= nregs_ymode
;
3718 info
->offset
= offset
/ regsize_xmode
;
3721 /* Quick exit for the simple and common case of extracting whole
3722 subregisters from a multiregister value. */
3723 /* ??? It would be better to integrate this into the code below,
3724 if we can generalize the concept enough and figure out how
3725 odd-sized modes can coexist with the other weird cases we support. */
3727 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3728 && regsize_xmode
== regsize_ymode
3729 && (offset
% regsize_ymode
) == 0)
3731 info
->representable_p
= true;
3732 info
->nregs
= nregs_ymode
;
3733 info
->offset
= offset
/ regsize_ymode
;
3734 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3739 /* Lowpart subregs are otherwise valid. */
3740 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3742 info
->representable_p
= true;
3745 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3748 info
->nregs
= nregs_ymode
;
3753 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3754 values there are in (reg:XMODE XREGNO). We can view the register
3755 as consisting of this number of independent "blocks", where each
3756 block occupies NREGS_YMODE registers and contains exactly one
3757 representable YMODE value. */
3758 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3759 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3761 /* Calculate the number of bytes in each block. This must always
3762 be exact, otherwise we don't know how to verify the constraint.
3763 These conditions may be relaxed but subreg_regno_offset would
3764 need to be redesigned. */
3765 gcc_assert ((xsize
% num_blocks
) == 0);
3766 unsigned int bytes_per_block
= xsize
/ num_blocks
;
3768 /* Get the number of the first block that contains the subreg and the byte
3769 offset of the subreg from the start of that block. */
3770 unsigned int block_number
= offset
/ bytes_per_block
;
3771 unsigned int subblock_offset
= offset
% bytes_per_block
;
3775 /* Only the lowpart of each block is representable. */
3776 info
->representable_p
3778 == subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3782 /* We assume that the ordering of registers within a multi-register
3783 value has a consistent endianness: if bytes and register words
3784 have different endianness, the hard registers that make up a
3785 multi-register value must be at least word-sized. */
3786 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3787 /* The block number we calculated above followed memory endianness.
3788 Convert it to register endianness by counting back from the end.
3789 (Note that, because of the assumption above, each block must be
3790 at least word-sized.) */
3791 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3793 info
->offset
= block_number
* nregs_ymode
;
3794 info
->nregs
= nregs_ymode
;
3797 /* This function returns the regno offset of a subreg expression.
3798 xregno - A regno of an inner hard subreg_reg (or what will become one).
3799 xmode - The mode of xregno.
3800 offset - The byte offset.
3801 ymode - The mode of a top level SUBREG (or what may become one).
3802 RETURN - The regno offset which would be used. */
3804 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3805 unsigned int offset
, machine_mode ymode
)
3807 struct subreg_info info
;
3808 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3812 /* This function returns true when the offset is representable via
3813 subreg_offset in the given regno.
3814 xregno - A regno of an inner hard subreg_reg (or what will become one).
3815 xmode - The mode of xregno.
3816 offset - The byte offset.
3817 ymode - The mode of a top level SUBREG (or what may become one).
3818 RETURN - Whether the offset is representable. */
3820 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3821 unsigned int offset
, machine_mode ymode
)
3823 struct subreg_info info
;
3824 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3825 return info
.representable_p
;
3828 /* Return the number of a YMODE register to which
3830 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3832 can be simplified. Return -1 if the subreg can't be simplified.
3834 XREGNO is a hard register number. */
3837 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3838 unsigned int offset
, machine_mode ymode
)
3840 struct subreg_info info
;
3841 unsigned int yregno
;
3843 #ifdef CANNOT_CHANGE_MODE_CLASS
3844 /* Give the backend a chance to disallow the mode change. */
3845 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3846 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3847 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3848 /* We can use mode change in LRA for some transformations. */
3849 && ! lra_in_progress
)
3853 /* We shouldn't simplify stack-related registers. */
3854 if ((!reload_completed
|| frame_pointer_needed
)
3855 && xregno
== FRAME_POINTER_REGNUM
)
3858 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3859 && xregno
== ARG_POINTER_REGNUM
)
3862 if (xregno
== STACK_POINTER_REGNUM
3863 /* We should convert hard stack register in LRA if it is
3865 && ! lra_in_progress
)
3868 /* Try to get the register offset. */
3869 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3870 if (!info
.representable_p
)
3873 /* Make sure that the offsetted register value is in range. */
3874 yregno
= xregno
+ info
.offset
;
3875 if (!HARD_REGISTER_NUM_P (yregno
))
3878 /* See whether (reg:YMODE YREGNO) is valid.
3880 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3881 This is a kludge to work around how complex FP arguments are passed
3882 on IA-64 and should be fixed. See PR target/49226. */
3883 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3884 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3887 return (int) yregno
;
3890 /* Return the final regno that a subreg expression refers to. */
3892 subreg_regno (const_rtx x
)
3895 rtx subreg
= SUBREG_REG (x
);
3896 int regno
= REGNO (subreg
);
3898 ret
= regno
+ subreg_regno_offset (regno
,
3906 /* Return the number of registers that a subreg expression refers
3909 subreg_nregs (const_rtx x
)
3911 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3914 /* Return the number of registers that a subreg REG with REGNO
3915 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3916 changed so that the regno can be passed in. */
3919 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3921 struct subreg_info info
;
3922 rtx subreg
= SUBREG_REG (x
);
3924 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3929 struct parms_set_data
3935 /* Helper function for noticing stores to parameter registers. */
3937 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3939 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3940 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3941 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3943 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3948 /* Look backward for first parameter to be loaded.
3949 Note that loads of all parameters will not necessarily be
3950 found if CSE has eliminated some of them (e.g., an argument
3951 to the outer function is passed down as a parameter).
3952 Do not skip BOUNDARY. */
3954 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3956 struct parms_set_data parm
;
3958 rtx_insn
*before
, *first_set
;
3960 /* Since different machines initialize their parameter registers
3961 in different orders, assume nothing. Collect the set of all
3962 parameter registers. */
3963 CLEAR_HARD_REG_SET (parm
.regs
);
3965 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3966 if (GET_CODE (XEXP (p
, 0)) == USE
3967 && REG_P (XEXP (XEXP (p
, 0), 0))
3968 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
3970 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3972 /* We only care about registers which can hold function
3974 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3977 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3981 first_set
= call_insn
;
3983 /* Search backward for the first set of a register in this set. */
3984 while (parm
.nregs
&& before
!= boundary
)
3986 before
= PREV_INSN (before
);
3988 /* It is possible that some loads got CSEed from one call to
3989 another. Stop in that case. */
3990 if (CALL_P (before
))
3993 /* Our caller needs either ensure that we will find all sets
3994 (in case code has not been optimized yet), or take care
3995 for possible labels in a way by setting boundary to preceding
3997 if (LABEL_P (before
))
3999 gcc_assert (before
== boundary
);
4003 if (INSN_P (before
))
4005 int nregs_old
= parm
.nregs
;
4006 note_stores (PATTERN (before
), parms_set
, &parm
);
4007 /* If we found something that did not set a parameter reg,
4008 we're done. Do not keep going, as that might result
4009 in hoisting an insn before the setting of a pseudo
4010 that is used by the hoisted insn. */
4011 if (nregs_old
!= parm
.nregs
)
4020 /* Return true if we should avoid inserting code between INSN and preceding
4021 call instruction. */
4024 keep_with_call_p (const rtx_insn
*insn
)
4028 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4030 if (REG_P (SET_DEST (set
))
4031 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4032 && fixed_regs
[REGNO (SET_DEST (set
))]
4033 && general_operand (SET_SRC (set
), VOIDmode
))
4035 if (REG_P (SET_SRC (set
))
4036 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4037 && REG_P (SET_DEST (set
))
4038 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4040 /* There may be a stack pop just after the call and before the store
4041 of the return register. Search for the actual store when deciding
4042 if we can break or not. */
4043 if (SET_DEST (set
) == stack_pointer_rtx
)
4045 /* This CONST_CAST is okay because next_nonnote_insn just
4046 returns its argument and we assign it to a const_rtx
4049 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4050 if (i2
&& keep_with_call_p (i2
))
4057 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4058 to non-complex jumps. That is, direct unconditional, conditional,
4059 and tablejumps, but not computed jumps or returns. It also does
4060 not apply to the fallthru case of a conditional jump. */
4063 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4065 rtx tmp
= JUMP_LABEL (jump_insn
);
4066 rtx_jump_table_data
*table
;
4071 if (tablejump_p (jump_insn
, NULL
, &table
))
4073 rtvec vec
= table
->get_labels ();
4074 int i
, veclen
= GET_NUM_ELEM (vec
);
4076 for (i
= 0; i
< veclen
; ++i
)
4077 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4081 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4088 /* Return an estimate of the cost of computing rtx X.
4089 One use is in cse, to decide which expression to keep in the hash table.
4090 Another is in rtl generation, to pick the cheapest way to multiply.
4091 Other uses like the latter are expected in the future.
4093 X appears as operand OPNO in an expression with code OUTER_CODE.
4094 SPEED specifies whether costs optimized for speed or size should
4098 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4099 int opno
, bool speed
)
4110 if (GET_MODE (x
) != VOIDmode
)
4111 mode
= GET_MODE (x
);
4113 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4114 many insns, taking N times as long. */
4115 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4119 /* Compute the default costs of certain things.
4120 Note that targetm.rtx_costs can override the defaults. */
4122 code
= GET_CODE (x
);
4126 /* Multiplication has time-complexity O(N*N), where N is the
4127 number of units (translated from digits) when using
4128 schoolbook long multiplication. */
4129 total
= factor
* factor
* COSTS_N_INSNS (5);
4135 /* Similarly, complexity for schoolbook long division. */
4136 total
= factor
* factor
* COSTS_N_INSNS (7);
4139 /* Used in combine.c as a marker. */
4143 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4144 the mode for the factor. */
4145 mode
= GET_MODE (SET_DEST (x
));
4146 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4151 total
= factor
* COSTS_N_INSNS (1);
4161 /* If we can't tie these modes, make this expensive. The larger
4162 the mode, the more expensive it is. */
4163 if (! MODES_TIEABLE_P (mode
, GET_MODE (SUBREG_REG (x
))))
4164 return COSTS_N_INSNS (2 + factor
);
4168 /* If we can tie these modes, make this cheap. */
4169 if (MODES_TIEABLE_P (mode
, GET_MODE (SUBREG_REG (x
))))
4176 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4181 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4182 which is already in total. */
4184 fmt
= GET_RTX_FORMAT (code
);
4185 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4187 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4188 else if (fmt
[i
] == 'E')
4189 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4190 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4195 /* Fill in the structure C with information about both speed and size rtx
4196 costs for X, which is operand OPNO in an expression with code OUTER. */
4199 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4200 struct full_rtx_costs
*c
)
4202 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4203 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4207 /* Return cost of address expression X.
4208 Expect that X is properly formed address reference.
4210 SPEED parameter specify whether costs optimized for speed or size should
4214 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4216 /* We may be asked for cost of various unusual addresses, such as operands
4217 of push instruction. It is not worthwhile to complicate writing
4218 of the target hook by such cases. */
4220 if (!memory_address_addr_space_p (mode
, x
, as
))
4223 return targetm
.address_cost (x
, mode
, as
, speed
);
4226 /* If the target doesn't override, compute the cost as with arithmetic. */
4229 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4231 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4235 unsigned HOST_WIDE_INT
4236 nonzero_bits (const_rtx x
, machine_mode mode
)
4238 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4242 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4244 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4247 /* Return true if nonzero_bits1 might recurse into both operands
4251 nonzero_bits_binary_arith_p (const_rtx x
)
4253 if (!ARITHMETIC_P (x
))
4255 switch (GET_CODE (x
))
4277 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4278 It avoids exponential behavior in nonzero_bits1 when X has
4279 identical subexpressions on the first or the second level. */
4281 static unsigned HOST_WIDE_INT
4282 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4283 machine_mode known_mode
,
4284 unsigned HOST_WIDE_INT known_ret
)
4286 if (x
== known_x
&& mode
== known_mode
)
4289 /* Try to find identical subexpressions. If found call
4290 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4291 precomputed value for the subexpression as KNOWN_RET. */
4293 if (nonzero_bits_binary_arith_p (x
))
4295 rtx x0
= XEXP (x
, 0);
4296 rtx x1
= XEXP (x
, 1);
4298 /* Check the first level. */
4300 return nonzero_bits1 (x
, mode
, x0
, mode
,
4301 cached_nonzero_bits (x0
, mode
, known_x
,
4302 known_mode
, known_ret
));
4304 /* Check the second level. */
4305 if (nonzero_bits_binary_arith_p (x0
)
4306 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4307 return nonzero_bits1 (x
, mode
, x1
, mode
,
4308 cached_nonzero_bits (x1
, mode
, known_x
,
4309 known_mode
, known_ret
));
4311 if (nonzero_bits_binary_arith_p (x1
)
4312 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4313 return nonzero_bits1 (x
, mode
, x0
, mode
,
4314 cached_nonzero_bits (x0
, mode
, known_x
,
4315 known_mode
, known_ret
));
4318 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4321 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4322 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4323 is less useful. We can't allow both, because that results in exponential
4324 run time recursion. There is a nullstone testcase that triggered
4325 this. This macro avoids accidental uses of num_sign_bit_copies. */
4326 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4328 /* Given an expression, X, compute which bits in X can be nonzero.
4329 We don't care about bits outside of those defined in MODE.
4331 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4332 an arithmetic operation, we can do better. */
4334 static unsigned HOST_WIDE_INT
4335 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4336 machine_mode known_mode
,
4337 unsigned HOST_WIDE_INT known_ret
)
4339 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4340 unsigned HOST_WIDE_INT inner_nz
;
4342 machine_mode inner_mode
;
4343 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4345 /* For floating-point and vector values, assume all bits are needed. */
4346 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4347 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4350 /* If X is wider than MODE, use its mode instead. */
4351 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4353 mode
= GET_MODE (x
);
4354 nonzero
= GET_MODE_MASK (mode
);
4355 mode_width
= GET_MODE_PRECISION (mode
);
4358 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4359 /* Our only callers in this case look for single bit values. So
4360 just return the mode mask. Those tests will then be false. */
4363 /* If MODE is wider than X, but both are a single word for both the host
4364 and target machines, we can compute this from which bits of the
4365 object might be nonzero in its own mode, taking into account the fact
4366 that on many CISC machines, accessing an object in a wider mode
4367 causes the high-order bits to become undefined. So they are
4368 not known to be zero. */
4370 if (!WORD_REGISTER_OPERATIONS
4371 && GET_MODE (x
) != VOIDmode
4372 && GET_MODE (x
) != mode
4373 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4374 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4375 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4377 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4378 known_x
, known_mode
, known_ret
);
4379 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4383 /* Please keep nonzero_bits_binary_arith_p above in sync with
4384 the code in the switch below. */
4385 code
= GET_CODE (x
);
4389 #if defined(POINTERS_EXTEND_UNSIGNED)
4390 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4391 all the bits above ptr_mode are known to be zero. */
4392 /* As we do not know which address space the pointer is referring to,
4393 we can do this only if the target does not support different pointer
4394 or address modes depending on the address space. */
4395 if (target_default_pointer_address_modes_p ()
4396 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4398 && !targetm
.have_ptr_extend ())
4399 nonzero
&= GET_MODE_MASK (ptr_mode
);
4402 /* Include declared information about alignment of pointers. */
4403 /* ??? We don't properly preserve REG_POINTER changes across
4404 pointer-to-integer casts, so we can't trust it except for
4405 things that we know must be pointers. See execute/960116-1.c. */
4406 if ((x
== stack_pointer_rtx
4407 || x
== frame_pointer_rtx
4408 || x
== arg_pointer_rtx
)
4409 && REGNO_POINTER_ALIGN (REGNO (x
)))
4411 unsigned HOST_WIDE_INT alignment
4412 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4414 #ifdef PUSH_ROUNDING
4415 /* If PUSH_ROUNDING is defined, it is possible for the
4416 stack to be momentarily aligned only to that amount,
4417 so we pick the least alignment. */
4418 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4419 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4423 nonzero
&= ~(alignment
- 1);
4427 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4428 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4429 known_mode
, known_ret
,
4433 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4434 known_mode
, known_ret
);
4436 return nonzero_for_hook
;
4440 /* If X is negative in MODE, sign-extend the value. */
4441 if (SHORT_IMMEDIATES_SIGN_EXTEND
&& INTVAL (x
) > 0
4442 && mode_width
< BITS_PER_WORD
4443 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
4445 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4450 /* In many, if not most, RISC machines, reading a byte from memory
4451 zeros the rest of the register. Noticing that fact saves a lot
4452 of extra zero-extends. */
4453 if (load_extend_op (GET_MODE (x
)) == ZERO_EXTEND
)
4454 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4458 case UNEQ
: case LTGT
:
4459 case GT
: case GTU
: case UNGT
:
4460 case LT
: case LTU
: case UNLT
:
4461 case GE
: case GEU
: case UNGE
:
4462 case LE
: case LEU
: case UNLE
:
4463 case UNORDERED
: case ORDERED
:
4464 /* If this produces an integer result, we know which bits are set.
4465 Code here used to clear bits outside the mode of X, but that is
4467 /* Mind that MODE is the mode the caller wants to look at this
4468 operation in, and not the actual operation mode. We can wind
4469 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4470 that describes the results of a vector compare. */
4471 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4472 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4473 nonzero
= STORE_FLAG_VALUE
;
4478 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4479 and num_sign_bit_copies. */
4480 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4481 == GET_MODE_PRECISION (GET_MODE (x
)))
4485 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4486 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4491 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4492 and num_sign_bit_copies. */
4493 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4494 == GET_MODE_PRECISION (GET_MODE (x
)))
4500 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4501 known_x
, known_mode
, known_ret
)
4502 & GET_MODE_MASK (mode
));
4506 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4507 known_x
, known_mode
, known_ret
);
4508 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4509 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4513 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4514 Otherwise, show all the bits in the outer mode but not the inner
4516 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4517 known_x
, known_mode
, known_ret
);
4518 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4520 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4521 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4522 inner_nz
|= (GET_MODE_MASK (mode
)
4523 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4526 nonzero
&= inner_nz
;
4530 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4531 known_x
, known_mode
, known_ret
)
4532 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4533 known_x
, known_mode
, known_ret
);
4537 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4539 unsigned HOST_WIDE_INT nonzero0
4540 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4541 known_x
, known_mode
, known_ret
);
4543 /* Don't call nonzero_bits for the second time if it cannot change
4545 if ((nonzero
& nonzero0
) != nonzero
)
4547 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4548 known_x
, known_mode
, known_ret
);
4552 case PLUS
: case MINUS
:
4554 case DIV
: case UDIV
:
4555 case MOD
: case UMOD
:
4556 /* We can apply the rules of arithmetic to compute the number of
4557 high- and low-order zero bits of these operations. We start by
4558 computing the width (position of the highest-order nonzero bit)
4559 and the number of low-order zero bits for each value. */
4561 unsigned HOST_WIDE_INT nz0
4562 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4563 known_x
, known_mode
, known_ret
);
4564 unsigned HOST_WIDE_INT nz1
4565 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4566 known_x
, known_mode
, known_ret
);
4567 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4568 int width0
= floor_log2 (nz0
) + 1;
4569 int width1
= floor_log2 (nz1
) + 1;
4570 int low0
= ctz_or_zero (nz0
);
4571 int low1
= ctz_or_zero (nz1
);
4572 unsigned HOST_WIDE_INT op0_maybe_minusp
4573 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4574 unsigned HOST_WIDE_INT op1_maybe_minusp
4575 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4576 unsigned int result_width
= mode_width
;
4582 result_width
= MAX (width0
, width1
) + 1;
4583 result_low
= MIN (low0
, low1
);
4586 result_low
= MIN (low0
, low1
);
4589 result_width
= width0
+ width1
;
4590 result_low
= low0
+ low1
;
4595 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4596 result_width
= width0
;
4601 result_width
= width0
;
4606 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4607 result_width
= MIN (width0
, width1
);
4608 result_low
= MIN (low0
, low1
);
4613 result_width
= MIN (width0
, width1
);
4614 result_low
= MIN (low0
, low1
);
4620 if (result_width
< mode_width
)
4621 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4624 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4629 if (CONST_INT_P (XEXP (x
, 1))
4630 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4631 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4635 /* If this is a SUBREG formed for a promoted variable that has
4636 been zero-extended, we know that at least the high-order bits
4637 are zero, though others might be too. */
4638 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4639 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4640 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4641 known_x
, known_mode
, known_ret
);
4643 /* If the inner mode is a single word for both the host and target
4644 machines, we can compute this from which bits of the inner
4645 object might be nonzero. */
4646 inner_mode
= GET_MODE (SUBREG_REG (x
));
4647 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4648 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
)
4650 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4651 known_x
, known_mode
, known_ret
);
4653 /* On many CISC machines, accessing an object in a wider mode
4654 causes the high-order bits to become undefined. So they are
4655 not known to be zero. */
4657 if ((!WORD_REGISTER_OPERATIONS
4658 /* If this is a typical RISC machine, we only have to worry
4659 about the way loads are extended. */
4660 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4661 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4662 : extend_op
!= ZERO_EXTEND
)
4663 || (!MEM_P (SUBREG_REG (x
)) && !REG_P (SUBREG_REG (x
))))
4664 && GET_MODE_PRECISION (GET_MODE (x
))
4665 > GET_MODE_PRECISION (inner_mode
))
4667 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
4675 /* The nonzero bits are in two classes: any bits within MODE
4676 that aren't in GET_MODE (x) are always significant. The rest of the
4677 nonzero bits are those that are significant in the operand of
4678 the shift when shifted the appropriate number of bits. This
4679 shows that high-order bits are cleared by the right shift and
4680 low-order bits by left shifts. */
4681 if (CONST_INT_P (XEXP (x
, 1))
4682 && INTVAL (XEXP (x
, 1)) >= 0
4683 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4684 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4686 machine_mode inner_mode
= GET_MODE (x
);
4687 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4688 int count
= INTVAL (XEXP (x
, 1));
4689 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4690 unsigned HOST_WIDE_INT op_nonzero
4691 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4692 known_x
, known_mode
, known_ret
);
4693 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4694 unsigned HOST_WIDE_INT outer
= 0;
4696 if (mode_width
> width
)
4697 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4699 if (code
== LSHIFTRT
)
4701 else if (code
== ASHIFTRT
)
4705 /* If the sign bit may have been nonzero before the shift, we
4706 need to mark all the places it could have been copied to
4707 by the shift as possibly nonzero. */
4708 if (inner
& (HOST_WIDE_INT_1U
<< (width
- 1 - count
)))
4709 inner
|= ((HOST_WIDE_INT_1U
<< count
) - 1)
4712 else if (code
== ASHIFT
)
4715 inner
= ((inner
<< (count
% width
)
4716 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4718 nonzero
&= (outer
| inner
);
4724 /* This is at most the number of bits in the mode. */
4725 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4729 /* If CLZ has a known value at zero, then the nonzero bits are
4730 that value, plus the number of bits in the mode minus one. */
4731 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4733 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4739 /* If CTZ has a known value at zero, then the nonzero bits are
4740 that value, plus the number of bits in the mode minus one. */
4741 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4743 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4749 /* This is at most the number of bits in the mode minus 1. */
4750 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4759 unsigned HOST_WIDE_INT nonzero_true
4760 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4761 known_x
, known_mode
, known_ret
);
4763 /* Don't call nonzero_bits for the second time if it cannot change
4765 if ((nonzero
& nonzero_true
) != nonzero
)
4766 nonzero
&= nonzero_true
4767 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4768 known_x
, known_mode
, known_ret
);
4779 /* See the macro definition above. */
4780 #undef cached_num_sign_bit_copies
4783 /* Return true if num_sign_bit_copies1 might recurse into both operands
4787 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4789 if (!ARITHMETIC_P (x
))
4791 switch (GET_CODE (x
))
4809 /* The function cached_num_sign_bit_copies is a wrapper around
4810 num_sign_bit_copies1. It avoids exponential behavior in
4811 num_sign_bit_copies1 when X has identical subexpressions on the
4812 first or the second level. */
4815 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4816 machine_mode known_mode
,
4817 unsigned int known_ret
)
4819 if (x
== known_x
&& mode
== known_mode
)
4822 /* Try to find identical subexpressions. If found call
4823 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4824 the precomputed value for the subexpression as KNOWN_RET. */
4826 if (num_sign_bit_copies_binary_arith_p (x
))
4828 rtx x0
= XEXP (x
, 0);
4829 rtx x1
= XEXP (x
, 1);
4831 /* Check the first level. */
4834 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4835 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4839 /* Check the second level. */
4840 if (num_sign_bit_copies_binary_arith_p (x0
)
4841 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4843 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4844 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4848 if (num_sign_bit_copies_binary_arith_p (x1
)
4849 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4851 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4852 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4857 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4860 /* Return the number of bits at the high-order end of X that are known to
4861 be equal to the sign bit. X will be used in mode MODE; if MODE is
4862 VOIDmode, X will be used in its own mode. The returned value will always
4863 be between 1 and the number of bits in MODE. */
4866 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4867 machine_mode known_mode
,
4868 unsigned int known_ret
)
4870 enum rtx_code code
= GET_CODE (x
);
4871 machine_mode inner_mode
;
4872 int num0
, num1
, result
;
4873 unsigned HOST_WIDE_INT nonzero
;
4875 /* If we weren't given a mode, use the mode of X. If the mode is still
4876 VOIDmode, we don't know anything. Likewise if one of the modes is
4879 if (mode
== VOIDmode
)
4880 mode
= GET_MODE (x
);
4882 gcc_checking_assert (mode
!= BLKmode
);
4884 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4885 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4888 /* For a smaller mode, just ignore the high bits. */
4889 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4890 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4892 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4893 known_x
, known_mode
, known_ret
);
4895 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4898 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4900 /* If this machine does not do all register operations on the entire
4901 register and MODE is wider than the mode of X, we can say nothing
4902 at all about the high-order bits. */
4903 if (!WORD_REGISTER_OPERATIONS
)
4906 /* Likewise on machines that do, if the mode of the object is smaller
4907 than a word and loads of that size don't sign extend, we can say
4908 nothing about the high order bits. */
4909 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4910 && load_extend_op (GET_MODE (x
)) != SIGN_EXTEND
)
4914 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4915 the code in the switch below. */
4920 #if defined(POINTERS_EXTEND_UNSIGNED)
4921 /* If pointers extend signed and this is a pointer in Pmode, say that
4922 all the bits above ptr_mode are known to be sign bit copies. */
4923 /* As we do not know which address space the pointer is referring to,
4924 we can do this only if the target does not support different pointer
4925 or address modes depending on the address space. */
4926 if (target_default_pointer_address_modes_p ()
4927 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4928 && mode
== Pmode
&& REG_POINTER (x
)
4929 && !targetm
.have_ptr_extend ())
4930 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4934 unsigned int copies_for_hook
= 1, copies
= 1;
4935 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4936 known_mode
, known_ret
,
4940 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4941 known_mode
, known_ret
);
4943 if (copies
> 1 || copies_for_hook
> 1)
4944 return MAX (copies
, copies_for_hook
);
4946 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4951 /* Some RISC machines sign-extend all loads of smaller than a word. */
4952 if (load_extend_op (GET_MODE (x
)) == SIGN_EXTEND
)
4953 return MAX (1, ((int) bitwidth
4954 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4958 /* If the constant is negative, take its 1's complement and remask.
4959 Then see how many zero bits we have. */
4960 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4961 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4962 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4963 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4965 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4968 /* If this is a SUBREG for a promoted object that is sign-extended
4969 and we are looking at it in a wider mode, we know that at least the
4970 high-order bits are known to be sign bit copies. */
4972 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4974 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4975 known_x
, known_mode
, known_ret
);
4976 return MAX ((int) bitwidth
4977 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4981 /* For a smaller object, just ignore the high bits. */
4982 inner_mode
= GET_MODE (SUBREG_REG (x
));
4983 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
4985 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4986 known_x
, known_mode
, known_ret
);
4988 MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
) - bitwidth
));
4991 /* For paradoxical SUBREGs on machines where all register operations
4992 affect the entire register, just look inside. Note that we are
4993 passing MODE to the recursive call, so the number of sign bit copies
4994 will remain relative to that mode, not the inner mode. */
4996 /* This works only if loads sign extend. Otherwise, if we get a
4997 reload for the inner part, it may be loaded from the stack, and
4998 then we lose all sign bit copies that existed before the store
5001 if (WORD_REGISTER_OPERATIONS
5002 && load_extend_op (inner_mode
) == SIGN_EXTEND
5003 && paradoxical_subreg_p (x
)
5004 && (MEM_P (SUBREG_REG (x
)) || REG_P (SUBREG_REG (x
))))
5005 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5006 known_x
, known_mode
, known_ret
);
5010 if (CONST_INT_P (XEXP (x
, 1)))
5011 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5015 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
5016 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
5017 known_x
, known_mode
, known_ret
));
5020 /* For a smaller object, just ignore the high bits. */
5021 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
5022 known_x
, known_mode
, known_ret
);
5023 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
5027 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5028 known_x
, known_mode
, known_ret
);
5030 case ROTATE
: case ROTATERT
:
5031 /* If we are rotating left by a number of bits less than the number
5032 of sign bit copies, we can just subtract that amount from the
5034 if (CONST_INT_P (XEXP (x
, 1))
5035 && INTVAL (XEXP (x
, 1)) >= 0
5036 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5038 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5039 known_x
, known_mode
, known_ret
);
5040 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5041 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5046 /* In general, this subtracts one sign bit copy. But if the value
5047 is known to be positive, the number of sign bit copies is the
5048 same as that of the input. Finally, if the input has just one bit
5049 that might be nonzero, all the bits are copies of the sign bit. */
5050 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5051 known_x
, known_mode
, known_ret
);
5052 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5053 return num0
> 1 ? num0
- 1 : 1;
5055 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5060 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5065 case IOR
: case AND
: case XOR
:
5066 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5067 /* Logical operations will preserve the number of sign-bit copies.
5068 MIN and MAX operations always return one of the operands. */
5069 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5070 known_x
, known_mode
, known_ret
);
5071 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5072 known_x
, known_mode
, known_ret
);
5074 /* If num1 is clearing some of the top bits then regardless of
5075 the other term, we are guaranteed to have at least that many
5076 high-order zero bits. */
5079 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5080 && CONST_INT_P (XEXP (x
, 1))
5081 && (UINTVAL (XEXP (x
, 1))
5082 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5085 /* Similarly for IOR when setting high-order bits. */
5088 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5089 && CONST_INT_P (XEXP (x
, 1))
5090 && (UINTVAL (XEXP (x
, 1))
5091 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5094 return MIN (num0
, num1
);
5096 case PLUS
: case MINUS
:
5097 /* For addition and subtraction, we can have a 1-bit carry. However,
5098 if we are subtracting 1 from a positive number, there will not
5099 be such a carry. Furthermore, if the positive number is known to
5100 be 0 or 1, we know the result is either -1 or 0. */
5102 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5103 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5105 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5106 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5107 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5108 : bitwidth
- floor_log2 (nonzero
) - 1);
5111 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5112 known_x
, known_mode
, known_ret
);
5113 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5114 known_x
, known_mode
, known_ret
);
5115 result
= MAX (1, MIN (num0
, num1
) - 1);
5120 /* The number of bits of the product is the sum of the number of
5121 bits of both terms. However, unless one of the terms if known
5122 to be positive, we must allow for an additional bit since negating
5123 a negative number can remove one sign bit copy. */
5125 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5126 known_x
, known_mode
, known_ret
);
5127 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5128 known_x
, known_mode
, known_ret
);
5130 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5132 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5133 || (((nonzero_bits (XEXP (x
, 0), mode
)
5134 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5135 && ((nonzero_bits (XEXP (x
, 1), mode
)
5136 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5140 return MAX (1, result
);
5143 /* The result must be <= the first operand. If the first operand
5144 has the high bit set, we know nothing about the number of sign
5146 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5148 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5149 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5152 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5153 known_x
, known_mode
, known_ret
);
5156 /* The result must be <= the second operand. If the second operand
5157 has (or just might have) the high bit set, we know nothing about
5158 the number of sign bit copies. */
5159 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5161 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5162 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5165 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5166 known_x
, known_mode
, known_ret
);
5169 /* Similar to unsigned division, except that we have to worry about
5170 the case where the divisor is negative, in which case we have
5172 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5173 known_x
, known_mode
, known_ret
);
5175 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5176 || (nonzero_bits (XEXP (x
, 1), mode
)
5177 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5183 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5184 known_x
, known_mode
, known_ret
);
5186 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5187 || (nonzero_bits (XEXP (x
, 1), mode
)
5188 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5194 /* Shifts by a constant add to the number of bits equal to the
5196 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5197 known_x
, known_mode
, known_ret
);
5198 if (CONST_INT_P (XEXP (x
, 1))
5199 && INTVAL (XEXP (x
, 1)) > 0
5200 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
5201 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5206 /* Left shifts destroy copies. */
5207 if (!CONST_INT_P (XEXP (x
, 1))
5208 || INTVAL (XEXP (x
, 1)) < 0
5209 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5210 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
5213 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5214 known_x
, known_mode
, known_ret
);
5215 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5218 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5219 known_x
, known_mode
, known_ret
);
5220 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5221 known_x
, known_mode
, known_ret
);
5222 return MIN (num0
, num1
);
5224 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5225 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5226 case GEU
: case GTU
: case LEU
: case LTU
:
5227 case UNORDERED
: case ORDERED
:
5228 /* If the constant is negative, take its 1's complement and remask.
5229 Then see how many zero bits we have. */
5230 nonzero
= STORE_FLAG_VALUE
;
5231 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5232 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5233 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5235 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5241 /* If we haven't been able to figure it out by one of the above rules,
5242 see if some of the high-order bits are known to be zero. If so,
5243 count those bits and return one less than that amount. If we can't
5244 safely compute the mask for this mode, always return BITWIDTH. */
5246 bitwidth
= GET_MODE_PRECISION (mode
);
5247 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5250 nonzero
= nonzero_bits (x
, mode
);
5251 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5252 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5255 /* Calculate the rtx_cost of a single instruction. A return value of
5256 zero indicates an instruction pattern without a known cost. */
5259 insn_rtx_cost (rtx pat
, bool speed
)
5264 /* Extract the single set rtx from the instruction pattern.
5265 We can't use single_set since we only have the pattern. */
5266 if (GET_CODE (pat
) == SET
)
5268 else if (GET_CODE (pat
) == PARALLEL
)
5271 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5273 rtx x
= XVECEXP (pat
, 0, i
);
5274 if (GET_CODE (x
) == SET
)
5287 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5288 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5291 /* Returns estimate on cost of computing SEQ. */
5294 seq_cost (const rtx_insn
*seq
, bool speed
)
5299 for (; seq
; seq
= NEXT_INSN (seq
))
5301 set
= single_set (seq
);
5303 cost
+= set_rtx_cost (set
, speed
);
5311 /* Given an insn INSN and condition COND, return the condition in a
5312 canonical form to simplify testing by callers. Specifically:
5314 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5315 (2) Both operands will be machine operands; (cc0) will have been replaced.
5316 (3) If an operand is a constant, it will be the second operand.
5317 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5318 for GE, GEU, and LEU.
5320 If the condition cannot be understood, or is an inequality floating-point
5321 comparison which needs to be reversed, 0 will be returned.
5323 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5325 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5326 insn used in locating the condition was found. If a replacement test
5327 of the condition is desired, it should be placed in front of that
5328 insn and we will be sure that the inputs are still valid.
5330 If WANT_REG is nonzero, we wish the condition to be relative to that
5331 register, if possible. Therefore, do not canonicalize the condition
5332 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5333 to be a compare to a CC mode register.
5335 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5339 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5340 rtx_insn
**earliest
,
5341 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5344 rtx_insn
*prev
= insn
;
5348 int reverse_code
= 0;
5350 basic_block bb
= BLOCK_FOR_INSN (insn
);
5352 code
= GET_CODE (cond
);
5353 mode
= GET_MODE (cond
);
5354 op0
= XEXP (cond
, 0);
5355 op1
= XEXP (cond
, 1);
5358 code
= reversed_comparison_code (cond
, insn
);
5359 if (code
== UNKNOWN
)
5365 /* If we are comparing a register with zero, see if the register is set
5366 in the previous insn to a COMPARE or a comparison operation. Perform
5367 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5370 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5371 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5372 && op1
== CONST0_RTX (GET_MODE (op0
))
5375 /* Set nonzero when we find something of interest. */
5378 /* If comparison with cc0, import actual comparison from compare
5382 if ((prev
= prev_nonnote_insn (prev
)) == 0
5383 || !NONJUMP_INSN_P (prev
)
5384 || (set
= single_set (prev
)) == 0
5385 || SET_DEST (set
) != cc0_rtx
)
5388 op0
= SET_SRC (set
);
5389 op1
= CONST0_RTX (GET_MODE (op0
));
5394 /* If this is a COMPARE, pick up the two things being compared. */
5395 if (GET_CODE (op0
) == COMPARE
)
5397 op1
= XEXP (op0
, 1);
5398 op0
= XEXP (op0
, 0);
5401 else if (!REG_P (op0
))
5404 /* Go back to the previous insn. Stop if it is not an INSN. We also
5405 stop if it isn't a single set or if it has a REG_INC note because
5406 we don't want to bother dealing with it. */
5408 prev
= prev_nonnote_nondebug_insn (prev
);
5411 || !NONJUMP_INSN_P (prev
)
5412 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5413 /* In cfglayout mode, there do not have to be labels at the
5414 beginning of a block, or jumps at the end, so the previous
5415 conditions would not stop us when we reach bb boundary. */
5416 || BLOCK_FOR_INSN (prev
) != bb
)
5419 set
= set_of (op0
, prev
);
5422 && (GET_CODE (set
) != SET
5423 || !rtx_equal_p (SET_DEST (set
), op0
)))
5426 /* If this is setting OP0, get what it sets it to if it looks
5430 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5431 #ifdef FLOAT_STORE_FLAG_VALUE
5432 REAL_VALUE_TYPE fsfv
;
5435 /* ??? We may not combine comparisons done in a CCmode with
5436 comparisons not done in a CCmode. This is to aid targets
5437 like Alpha that have an IEEE compliant EQ instruction, and
5438 a non-IEEE compliant BEQ instruction. The use of CCmode is
5439 actually artificial, simply to prevent the combination, but
5440 should not affect other platforms.
5442 However, we must allow VOIDmode comparisons to match either
5443 CCmode or non-CCmode comparison, because some ports have
5444 modeless comparisons inside branch patterns.
5446 ??? This mode check should perhaps look more like the mode check
5447 in simplify_comparison in combine. */
5448 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5449 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5451 && inner_mode
!= VOIDmode
)
5453 if (GET_CODE (SET_SRC (set
)) == COMPARE
5456 && val_signbit_known_set_p (inner_mode
,
5458 #ifdef FLOAT_STORE_FLAG_VALUE
5460 && SCALAR_FLOAT_MODE_P (inner_mode
)
5461 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5462 REAL_VALUE_NEGATIVE (fsfv
)))
5465 && COMPARISON_P (SET_SRC (set
))))
5467 else if (((code
== EQ
5469 && val_signbit_known_set_p (inner_mode
,
5471 #ifdef FLOAT_STORE_FLAG_VALUE
5473 && SCALAR_FLOAT_MODE_P (inner_mode
)
5474 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5475 REAL_VALUE_NEGATIVE (fsfv
)))
5478 && COMPARISON_P (SET_SRC (set
)))
5483 else if ((code
== EQ
|| code
== NE
)
5484 && GET_CODE (SET_SRC (set
)) == XOR
)
5485 /* Handle sequences like:
5488 ...(eq|ne op0 (const_int 0))...
5492 (eq op0 (const_int 0)) reduces to (eq X Y)
5493 (ne op0 (const_int 0)) reduces to (ne X Y)
5495 This is the form used by MIPS16, for example. */
5501 else if (reg_set_p (op0
, prev
))
5502 /* If this sets OP0, but not directly, we have to give up. */
5507 /* If the caller is expecting the condition to be valid at INSN,
5508 make sure X doesn't change before INSN. */
5509 if (valid_at_insn_p
)
5510 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5512 if (COMPARISON_P (x
))
5513 code
= GET_CODE (x
);
5516 code
= reversed_comparison_code (x
, prev
);
5517 if (code
== UNKNOWN
)
5522 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5528 /* If constant is first, put it last. */
5529 if (CONSTANT_P (op0
))
5530 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5532 /* If OP0 is the result of a comparison, we weren't able to find what
5533 was really being compared, so fail. */
5535 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5538 /* Canonicalize any ordered comparison with integers involving equality
5539 if we can do computations in the relevant mode and we do not
5542 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5543 && CONST_INT_P (op1
)
5544 && GET_MODE (op0
) != VOIDmode
5545 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5547 HOST_WIDE_INT const_val
= INTVAL (op1
);
5548 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5549 unsigned HOST_WIDE_INT max_val
5550 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5555 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5556 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5559 /* When cross-compiling, const_val might be sign-extended from
5560 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5562 if ((const_val
& max_val
)
5563 != (HOST_WIDE_INT_1U
5564 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5565 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5569 if (uconst_val
< max_val
)
5570 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5574 if (uconst_val
!= 0)
5575 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5583 /* Never return CC0; return zero instead. */
5587 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5590 /* Given a jump insn JUMP, return the condition that will cause it to branch
5591 to its JUMP_LABEL. If the condition cannot be understood, or is an
5592 inequality floating-point comparison which needs to be reversed, 0 will
5595 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5596 insn used in locating the condition was found. If a replacement test
5597 of the condition is desired, it should be placed in front of that
5598 insn and we will be sure that the inputs are still valid. If EARLIEST
5599 is null, the returned condition will be valid at INSN.
5601 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5602 compare CC mode register.
5604 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5607 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5608 int valid_at_insn_p
)
5614 /* If this is not a standard conditional jump, we can't parse it. */
5616 || ! any_condjump_p (jump
))
5618 set
= pc_set (jump
);
5620 cond
= XEXP (SET_SRC (set
), 0);
5622 /* If this branches to JUMP_LABEL when the condition is false, reverse
5625 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5626 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5628 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5629 allow_cc_mode
, valid_at_insn_p
);
5632 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5633 TARGET_MODE_REP_EXTENDED.
5635 Note that we assume that the property of
5636 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5637 narrower than mode B. I.e., if A is a mode narrower than B then in
5638 order to be able to operate on it in mode B, mode A needs to
5639 satisfy the requirements set by the representation of mode B. */
5642 init_num_sign_bit_copies_in_rep (void)
5644 machine_mode mode
, in_mode
;
5646 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5647 in_mode
= GET_MODE_WIDER_MODE (mode
))
5648 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5649 mode
= GET_MODE_WIDER_MODE (mode
))
5653 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5654 extends to the next widest mode. */
5655 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5656 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5658 /* We are in in_mode. Count how many bits outside of mode
5659 have to be copies of the sign-bit. */
5660 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5662 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5664 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5665 /* We can only check sign-bit copies starting from the
5666 top-bit. In order to be able to check the bits we
5667 have already seen we pretend that subsequent bits
5668 have to be sign-bit copies too. */
5669 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5670 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5671 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5676 /* Suppose that truncation from the machine mode of X to MODE is not a
5677 no-op. See if there is anything special about X so that we can
5678 assume it already contains a truncated value of MODE. */
5681 truncated_to_mode (machine_mode mode
, const_rtx x
)
5683 /* This register has already been used in MODE without explicit
5685 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5688 /* See if we already satisfy the requirements of MODE. If yes we
5689 can just switch to MODE. */
5690 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5691 && (num_sign_bit_copies (x
, GET_MODE (x
))
5692 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5698 /* Return true if RTX code CODE has a single sequence of zero or more
5699 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5700 entry in that case. */
5703 setup_reg_subrtx_bounds (unsigned int code
)
5705 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5707 for (; format
[i
] != 'e'; ++i
)
5710 /* No subrtxes. Leave start and count as 0. */
5712 if (format
[i
] == 'E' || format
[i
] == 'V')
5716 /* Record the sequence of 'e's. */
5717 rtx_all_subrtx_bounds
[code
].start
= i
;
5720 while (format
[i
] == 'e');
5721 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5722 /* rtl-iter.h relies on this. */
5723 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5725 for (; format
[i
]; ++i
)
5726 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5732 /* Initialize rtx_all_subrtx_bounds. */
5737 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5739 if (!setup_reg_subrtx_bounds (i
))
5740 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5741 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5742 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5745 init_num_sign_bit_copies_in_rep ();
5748 /* Check whether this is a constant pool constant. */
5750 constant_pool_constant_p (rtx x
)
5752 x
= avoid_constant_pool_reference (x
);
5753 return CONST_DOUBLE_P (x
);
5756 /* If M is a bitmask that selects a field of low-order bits within an item but
5757 not the entire word, return the length of the field. Return -1 otherwise.
5758 M is used in machine mode MODE. */
5761 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5763 if (mode
!= VOIDmode
)
5765 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5767 m
&= GET_MODE_MASK (mode
);
5770 return exact_log2 (m
+ 1);
5773 /* Return the mode of MEM's address. */
5776 get_address_mode (rtx mem
)
5780 gcc_assert (MEM_P (mem
));
5781 mode
= GET_MODE (XEXP (mem
, 0));
5782 if (mode
!= VOIDmode
)
5784 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5787 /* Split up a CONST_DOUBLE or integer constant rtx
5788 into two rtx's for single words,
5789 storing in *FIRST the word that comes first in memory in the target
5790 and in *SECOND the other.
5792 TODO: This function needs to be rewritten to work on any size
5796 split_double (rtx value
, rtx
*first
, rtx
*second
)
5798 if (CONST_INT_P (value
))
5800 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5802 /* In this case the CONST_INT holds both target words.
5803 Extract the bits from it into two word-sized pieces.
5804 Sign extend each half to HOST_WIDE_INT. */
5805 unsigned HOST_WIDE_INT low
, high
;
5806 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5807 unsigned bits_per_word
= BITS_PER_WORD
;
5809 /* Set sign_bit to the most significant bit of a word. */
5811 sign_bit
<<= bits_per_word
- 1;
5813 /* Set mask so that all bits of the word are set. We could
5814 have used 1 << BITS_PER_WORD instead of basing the
5815 calculation on sign_bit. However, on machines where
5816 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5817 compiler warning, even though the code would never be
5819 mask
= sign_bit
<< 1;
5822 /* Set sign_extend as any remaining bits. */
5823 sign_extend
= ~mask
;
5825 /* Pick the lower word and sign-extend it. */
5826 low
= INTVAL (value
);
5831 /* Pick the higher word, shifted to the least significant
5832 bits, and sign-extend it. */
5833 high
= INTVAL (value
);
5834 high
>>= bits_per_word
- 1;
5837 if (high
& sign_bit
)
5838 high
|= sign_extend
;
5840 /* Store the words in the target machine order. */
5841 if (WORDS_BIG_ENDIAN
)
5843 *first
= GEN_INT (high
);
5844 *second
= GEN_INT (low
);
5848 *first
= GEN_INT (low
);
5849 *second
= GEN_INT (high
);
5854 /* The rule for using CONST_INT for a wider mode
5855 is that we regard the value as signed.
5856 So sign-extend it. */
5857 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5858 if (WORDS_BIG_ENDIAN
)
5870 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5872 /* All of this is scary code and needs to be converted to
5873 properly work with any size integer. */
5874 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5875 if (WORDS_BIG_ENDIAN
)
5877 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5878 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5882 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5883 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5886 else if (!CONST_DOUBLE_P (value
))
5888 if (WORDS_BIG_ENDIAN
)
5890 *first
= const0_rtx
;
5896 *second
= const0_rtx
;
5899 else if (GET_MODE (value
) == VOIDmode
5900 /* This is the old way we did CONST_DOUBLE integers. */
5901 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5903 /* In an integer, the words are defined as most and least significant.
5904 So order them by the target's convention. */
5905 if (WORDS_BIG_ENDIAN
)
5907 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5908 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5912 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5913 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5920 /* Note, this converts the REAL_VALUE_TYPE to the target's
5921 format, splits up the floating point double and outputs
5922 exactly 32 bits of it into each of l[0] and l[1] --
5923 not necessarily BITS_PER_WORD bits. */
5924 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
5926 /* If 32 bits is an entire word for the target, but not for the host,
5927 then sign-extend on the host so that the number will look the same
5928 way on the host that it would on the target. See for instance
5929 simplify_unary_operation. The #if is needed to avoid compiler
5932 #if HOST_BITS_PER_LONG > 32
5933 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5935 if (l
[0] & ((long) 1 << 31))
5936 l
[0] |= ((unsigned long) (-1) << 32);
5937 if (l
[1] & ((long) 1 << 31))
5938 l
[1] |= ((unsigned long) (-1) << 32);
5942 *first
= GEN_INT (l
[0]);
5943 *second
= GEN_INT (l
[1]);
5947 /* Return true if X is a sign_extract or zero_extract from the least
5951 lsb_bitfield_op_p (rtx x
)
5953 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5955 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5956 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5957 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5959 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5964 /* Strip outer address "mutations" from LOC and return a pointer to the
5965 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5966 stripped expression there.
5968 "Mutations" either convert between modes or apply some kind of
5969 extension, truncation or alignment. */
5972 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5976 enum rtx_code code
= GET_CODE (*loc
);
5977 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5978 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5979 used to convert between pointer sizes. */
5980 loc
= &XEXP (*loc
, 0);
5981 else if (lsb_bitfield_op_p (*loc
))
5982 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5983 acts as a combined truncation and extension. */
5984 loc
= &XEXP (*loc
, 0);
5985 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5986 /* (and ... (const_int -X)) is used to align to X bytes. */
5987 loc
= &XEXP (*loc
, 0);
5988 else if (code
== SUBREG
5989 && !OBJECT_P (SUBREG_REG (*loc
))
5990 && subreg_lowpart_p (*loc
))
5991 /* (subreg (operator ...) ...) inside and is used for mode
5993 loc
= &SUBREG_REG (*loc
);
6001 /* Return true if CODE applies some kind of scale. The scaled value is
6002 is the first operand and the scale is the second. */
6005 binary_scale_code_p (enum rtx_code code
)
6007 return (code
== MULT
6009 /* Needed by ARM targets. */
6013 || code
== ROTATERT
);
6016 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6017 (see address_info). Return null otherwise. */
6020 get_base_term (rtx
*inner
)
6022 if (GET_CODE (*inner
) == LO_SUM
)
6023 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6026 || GET_CODE (*inner
) == SUBREG
6027 || GET_CODE (*inner
) == SCRATCH
)
6032 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6033 (see address_info). Return null otherwise. */
6036 get_index_term (rtx
*inner
)
6038 /* At present, only constant scales are allowed. */
6039 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6040 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6043 || GET_CODE (*inner
) == SUBREG
6044 || GET_CODE (*inner
) == SCRATCH
)
6049 /* Set the segment part of address INFO to LOC, given that INNER is the
6053 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6055 gcc_assert (!info
->segment
);
6056 info
->segment
= loc
;
6057 info
->segment_term
= inner
;
6060 /* Set the base part of address INFO to LOC, given that INNER is the
6064 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6066 gcc_assert (!info
->base
);
6068 info
->base_term
= inner
;
6071 /* Set the index part of address INFO to LOC, given that INNER is the
6075 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6077 gcc_assert (!info
->index
);
6079 info
->index_term
= inner
;
6082 /* Set the displacement part of address INFO to LOC, given that INNER
6083 is the constant term. */
6086 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6088 gcc_assert (!info
->disp
);
6090 info
->disp_term
= inner
;
6093 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6094 rest of INFO accordingly. */
6097 decompose_incdec_address (struct address_info
*info
)
6099 info
->autoinc_p
= true;
6101 rtx
*base
= &XEXP (*info
->inner
, 0);
6102 set_address_base (info
, base
, base
);
6103 gcc_checking_assert (info
->base
== info
->base_term
);
6105 /* These addresses are only valid when the size of the addressed
6107 gcc_checking_assert (info
->mode
!= VOIDmode
);
6110 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6111 of INFO accordingly. */
6114 decompose_automod_address (struct address_info
*info
)
6116 info
->autoinc_p
= true;
6118 rtx
*base
= &XEXP (*info
->inner
, 0);
6119 set_address_base (info
, base
, base
);
6120 gcc_checking_assert (info
->base
== info
->base_term
);
6122 rtx plus
= XEXP (*info
->inner
, 1);
6123 gcc_assert (GET_CODE (plus
) == PLUS
);
6125 info
->base_term2
= &XEXP (plus
, 0);
6126 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6128 rtx
*step
= &XEXP (plus
, 1);
6129 rtx
*inner_step
= strip_address_mutations (step
);
6130 if (CONSTANT_P (*inner_step
))
6131 set_address_disp (info
, step
, inner_step
);
6133 set_address_index (info
, step
, inner_step
);
6136 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6137 values in [PTR, END). Return a pointer to the end of the used array. */
6140 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6143 if (GET_CODE (x
) == PLUS
)
6145 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6146 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6150 gcc_assert (ptr
!= end
);
6156 /* Evaluate the likelihood of X being a base or index value, returning
6157 positive if it is likely to be a base, negative if it is likely to be
6158 an index, and 0 if we can't tell. Make the magnitude of the return
6159 value reflect the amount of confidence we have in the answer.
6161 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6164 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6165 enum rtx_code outer_code
, enum rtx_code index_code
)
6167 /* Believe *_POINTER unless the address shape requires otherwise. */
6168 if (REG_P (x
) && REG_POINTER (x
))
6170 if (MEM_P (x
) && MEM_POINTER (x
))
6173 if (REG_P (x
) && HARD_REGISTER_P (x
))
6175 /* X is a hard register. If it only fits one of the base
6176 or index classes, choose that interpretation. */
6177 int regno
= REGNO (x
);
6178 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6179 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6180 if (base_p
!= index_p
)
6181 return base_p
? 1 : -1;
6186 /* INFO->INNER describes a normal, non-automodified address.
6187 Fill in the rest of INFO accordingly. */
6190 decompose_normal_address (struct address_info
*info
)
6192 /* Treat the address as the sum of up to four values. */
6194 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6195 ops
+ ARRAY_SIZE (ops
)) - ops
;
6197 /* If there is more than one component, any base component is in a PLUS. */
6199 info
->base_outer_code
= PLUS
;
6201 /* Try to classify each sum operand now. Leave those that could be
6202 either a base or an index in OPS. */
6205 for (size_t in
= 0; in
< n_ops
; ++in
)
6208 rtx
*inner
= strip_address_mutations (loc
);
6209 if (CONSTANT_P (*inner
))
6210 set_address_disp (info
, loc
, inner
);
6211 else if (GET_CODE (*inner
) == UNSPEC
)
6212 set_address_segment (info
, loc
, inner
);
6215 /* The only other possibilities are a base or an index. */
6216 rtx
*base_term
= get_base_term (inner
);
6217 rtx
*index_term
= get_index_term (inner
);
6218 gcc_assert (base_term
|| index_term
);
6220 set_address_index (info
, loc
, index_term
);
6221 else if (!index_term
)
6222 set_address_base (info
, loc
, base_term
);
6225 gcc_assert (base_term
== index_term
);
6227 inner_ops
[out
] = base_term
;
6233 /* Classify the remaining OPS members as bases and indexes. */
6236 /* If we haven't seen a base or an index yet, assume that this is
6237 the base. If we were confident that another term was the base
6238 or index, treat the remaining operand as the other kind. */
6240 set_address_base (info
, ops
[0], inner_ops
[0]);
6242 set_address_index (info
, ops
[0], inner_ops
[0]);
6246 /* In the event of a tie, assume the base comes first. */
6247 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6249 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6250 GET_CODE (*ops
[0])))
6252 set_address_base (info
, ops
[0], inner_ops
[0]);
6253 set_address_index (info
, ops
[1], inner_ops
[1]);
6257 set_address_base (info
, ops
[1], inner_ops
[1]);
6258 set_address_index (info
, ops
[0], inner_ops
[0]);
6262 gcc_assert (out
== 0);
6265 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6266 or VOIDmode if not known. AS is the address space associated with LOC.
6267 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6270 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6271 addr_space_t as
, enum rtx_code outer_code
)
6273 memset (info
, 0, sizeof (*info
));
6276 info
->addr_outer_code
= outer_code
;
6278 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6279 info
->base_outer_code
= outer_code
;
6280 switch (GET_CODE (*info
->inner
))
6286 decompose_incdec_address (info
);
6291 decompose_automod_address (info
);
6295 decompose_normal_address (info
);
6300 /* Describe address operand LOC in INFO. */
6303 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6305 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6308 /* Describe the address of MEM X in INFO. */
6311 decompose_mem_address (struct address_info
*info
, rtx x
)
6313 gcc_assert (MEM_P (x
));
6314 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6315 MEM_ADDR_SPACE (x
), MEM
);
6318 /* Update INFO after a change to the address it describes. */
6321 update_address (struct address_info
*info
)
6323 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6324 info
->addr_outer_code
);
6327 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6328 more complicated than that. */
6331 get_index_scale (const struct address_info
*info
)
6333 rtx index
= *info
->index
;
6334 if (GET_CODE (index
) == MULT
6335 && CONST_INT_P (XEXP (index
, 1))
6336 && info
->index_term
== &XEXP (index
, 0))
6337 return INTVAL (XEXP (index
, 1));
6339 if (GET_CODE (index
) == ASHIFT
6340 && CONST_INT_P (XEXP (index
, 1))
6341 && info
->index_term
== &XEXP (index
, 0))
6342 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6344 if (info
->index
== info
->index_term
)
6350 /* Return the "index code" of INFO, in the form required by
6354 get_index_code (const struct address_info
*info
)
6357 return GET_CODE (*info
->index
);
6360 return GET_CODE (*info
->disp
);
6365 /* Return true if RTL X contains a SYMBOL_REF. */
6368 contains_symbol_ref_p (const_rtx x
)
6370 subrtx_iterator::array_type array
;
6371 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6372 if (SYMBOL_REF_P (*iter
))
6378 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6381 contains_symbolic_reference_p (const_rtx x
)
6383 subrtx_iterator::array_type array
;
6384 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6385 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6391 /* Return true if X contains a thread-local symbol. */
6394 tls_referenced_p (const_rtx x
)
6396 if (!targetm
.have_tls
)
6399 subrtx_iterator::array_type array
;
6400 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6401 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)