1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
38 #include "hard-reg-set.h"
40 /* Forward declarations */
41 static void set_of_1 (rtx
, const_rtx
, void *);
42 static bool covers_regno_p (const_rtx
, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
44 static int computed_jump_p_1 (const_rtx
);
45 static void parms_set (rtx
, const_rtx
, void *);
47 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
48 const_rtx
, machine_mode
,
49 unsigned HOST_WIDE_INT
);
50 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
51 const_rtx
, machine_mode
,
52 unsigned HOST_WIDE_INT
);
53 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
54 const_rtx
, machine_mode
,
56 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
57 const_rtx
, machine_mode
,
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
77 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
83 typename
T::value_type
*
84 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
86 size_t i
, value_type x
)
88 if (base
== array
.stack
)
95 gcc_checking_assert (i
== LOCAL_ELEMS
);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array
.heap
) <= i
)
99 vec_safe_grow (array
.heap
, i
+ 1);
100 base
= array
.heap
->address ();
101 memcpy (base
, array
.stack
, sizeof (array
.stack
));
102 base
[LOCAL_ELEMS
] = x
;
105 unsigned int length
= array
.heap
->length ();
108 gcc_checking_assert (base
== array
.heap
->address ());
114 gcc_checking_assert (i
== length
);
115 vec_safe_push (array
.heap
, x
);
116 return array
.heap
->address ();
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
123 template <typename T
>
125 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
127 size_t end
, rtx_type x
)
129 enum rtx_code code
= GET_CODE (x
);
130 const char *format
= GET_RTX_FORMAT (code
);
131 size_t orig_end
= end
;
132 if (__builtin_expect (INSN_P (x
), false))
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
137 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
138 if (format
[i
] == 'e')
140 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
141 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
144 base
= add_single_to_queue (array
, base
, end
++, subx
);
148 for (int i
= 0; format
[i
]; ++i
)
149 if (format
[i
] == 'e')
151 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
152 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
155 base
= add_single_to_queue (array
, base
, end
++, subx
);
157 else if (format
[i
] == 'E')
159 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
160 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
161 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
162 for (unsigned int j
= 0; j
< length
; j
++)
163 base
[end
++] = T::get_value (vec
[j
]);
165 for (unsigned int j
= 0; j
< length
; j
++)
166 base
= add_single_to_queue (array
, base
, end
++,
167 T::get_value (vec
[j
]));
168 if (code
== SEQUENCE
&& end
== length
)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
174 for (unsigned int j
= 0; j
< length
; j
++)
176 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
178 base
[j
] = T::get_value (PATTERN (x
));
181 return end
- orig_end
;
184 template <typename T
>
186 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
188 vec_free (array
.heap
);
191 template <typename T
>
192 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
194 template class generic_subrtx_iterator
<const_rtx_accessor
>;
195 template class generic_subrtx_iterator
<rtx_var_accessor
>;
196 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
204 rtx_unstable_p (const_rtx x
)
206 const RTX_CODE code
= GET_CODE (x
);
213 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
235 if (MEM_VOLATILE_P (x
))
244 fmt
= GET_RTX_FORMAT (code
);
245 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
248 if (rtx_unstable_p (XEXP (x
, i
)))
251 else if (fmt
[i
] == 'E')
254 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
255 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
270 rtx_varies_p (const_rtx x
, bool for_alias
)
283 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
296 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
300 if (x
== pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
314 || rtx_varies_p (XEXP (x
, 1), for_alias
);
317 if (MEM_VOLATILE_P (x
))
326 fmt
= GET_RTX_FORMAT (code
);
327 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
330 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
333 else if (fmt
[i
] == 'E')
336 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
337 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
349 get_initial_register_offset (int from
, int to
)
351 static const struct elim_table_t
355 } table
[] = ELIMINABLE_REGS
;
356 poly_int64 offset1
, offset2
;
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
363 before the reload pass. We need to give at least
364 an estimation for the resulting frame size. */
365 if (! reload_completed
)
367 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
371 if (to
== STACK_POINTER_REGNUM
)
373 else if (from
== STACK_POINTER_REGNUM
)
379 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
380 if (table
[i
].from
== from
)
382 if (table
[i
].to
== to
)
384 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
388 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
390 if (table
[j
].to
== to
391 && table
[j
].from
== table
[i
].to
)
393 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
395 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
397 return offset1
+ offset2
;
399 if (table
[j
].from
== to
400 && table
[j
].to
== table
[i
].to
)
402 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
404 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
406 return offset1
- offset2
;
410 else if (table
[i
].to
== from
)
412 if (table
[i
].from
== to
)
414 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
418 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
420 if (table
[j
].to
== to
421 && table
[j
].from
== table
[i
].from
)
423 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
425 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
427 return - offset1
+ offset2
;
429 if (table
[j
].from
== to
430 && table
[j
].to
== table
[i
].from
)
432 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
434 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
436 return - offset1
- offset2
;
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
445 else if (to
== ARG_POINTER_REGNUM
)
446 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
447 else if (from
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
449 else if (to
== HARD_FRAME_POINTER_REGNUM
)
450 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
457 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458 references on strict alignment machines. */
461 rtx_addr_can_trap_p_1 (const_rtx x
, poly_int64 offset
, poly_int64 size
,
462 machine_mode mode
, bool unaligned_mems
)
464 enum rtx_code code
= GET_CODE (x
);
465 gcc_checking_assert (mode
== BLKmode
|| known_size_p (size
));
468 /* The offset must be a multiple of the mode size if we are considering
469 unaligned memory references on strict alignment machines. */
470 if (STRICT_ALIGNMENT
&& unaligned_mems
&& mode
!= BLKmode
)
472 poly_int64 actual_offset
= offset
;
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 the real alignment of %sp. However, when it does this, the
477 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
478 if (SPARC_STACK_BOUNDARY_HACK
479 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
480 actual_offset
-= STACK_POINTER_OFFSET
;
483 if (!multiple_p (actual_offset
, GET_MODE_SIZE (mode
)))
490 if (SYMBOL_REF_WEAK (x
))
492 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
495 poly_int64 decl_size
;
497 if (maybe_lt (offset
, 0))
499 if (!known_size_p (size
))
500 return maybe_ne (offset
, 0);
502 /* If the size of the access or of the symbol is unknown,
504 decl
= SYMBOL_REF_DECL (x
);
506 /* Else check that the access is in bounds. TODO: restructure
507 expr_size/tree_expr_size/int_expr_size and just use the latter. */
510 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
512 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl
), &decl_size
))
515 else if (TREE_CODE (decl
) == STRING_CST
)
516 decl_size
= TREE_STRING_LENGTH (decl
);
517 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
518 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
522 return (!known_size_p (decl_size
) || known_eq (decl_size
, 0)
523 ? maybe_ne (offset
, 0)
524 : maybe_gt (offset
+ size
, decl_size
));
533 /* Stack references are assumed not to trap, but we need to deal with
534 nonsensical offsets. */
535 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
536 || x
== stack_pointer_rtx
537 /* The arg pointer varies if it is not a fixed register. */
538 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
541 poly_int64 red_zone_size
= RED_ZONE_SIZE
;
543 poly_int64 red_zone_size
= 0;
545 poly_int64 stack_boundary
= PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
;
546 poly_int64 low_bound
, high_bound
;
548 if (!known_size_p (size
))
551 if (x
== frame_pointer_rtx
)
553 if (FRAME_GROWS_DOWNWARD
)
555 high_bound
= targetm
.starting_frame_offset ();
556 low_bound
= high_bound
- get_frame_size ();
560 low_bound
= targetm
.starting_frame_offset ();
561 high_bound
= low_bound
+ get_frame_size ();
564 else if (x
== hard_frame_pointer_rtx
)
567 = get_initial_register_offset (STACK_POINTER_REGNUM
,
568 HARD_FRAME_POINTER_REGNUM
);
570 = get_initial_register_offset (ARG_POINTER_REGNUM
,
571 HARD_FRAME_POINTER_REGNUM
);
573 #if STACK_GROWS_DOWNWARD
574 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
575 high_bound
= ap_offset
576 + FIRST_PARM_OFFSET (current_function_decl
)
577 #if !ARGS_GROW_DOWNWARD
582 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
583 low_bound
= ap_offset
584 + FIRST_PARM_OFFSET (current_function_decl
)
585 #if ARGS_GROW_DOWNWARD
591 else if (x
== stack_pointer_rtx
)
594 = get_initial_register_offset (ARG_POINTER_REGNUM
,
595 STACK_POINTER_REGNUM
);
597 #if STACK_GROWS_DOWNWARD
598 low_bound
= - red_zone_size
- stack_boundary
;
599 high_bound
= ap_offset
600 + FIRST_PARM_OFFSET (current_function_decl
)
601 #if !ARGS_GROW_DOWNWARD
606 high_bound
= red_zone_size
+ stack_boundary
;
607 low_bound
= ap_offset
608 + FIRST_PARM_OFFSET (current_function_decl
)
609 #if ARGS_GROW_DOWNWARD
617 /* We assume that accesses are safe to at least the
619 Examples are varargs and __builtin_return_address. */
620 #if ARGS_GROW_DOWNWARD
621 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
623 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
624 - crtl
->args
.size
- stack_boundary
;
626 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
628 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
629 + crtl
->args
.size
+ stack_boundary
;
633 if (known_ge (offset
, low_bound
)
634 && known_le (offset
, high_bound
- size
))
638 /* All of the virtual frame registers are stack references. */
639 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
640 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
645 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
646 mode
, unaligned_mems
);
649 /* An address is assumed not to trap if:
650 - it is the pic register plus a const unspec without offset. */
651 if (XEXP (x
, 0) == pic_offset_table_rtx
652 && GET_CODE (XEXP (x
, 1)) == CONST
653 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
654 && known_eq (offset
, 0))
657 /* - or it is an address that can't trap plus a constant integer. */
658 if (poly_int_rtx_p (XEXP (x
, 1), &const_x1
)
659 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ const_x1
,
660 size
, mode
, unaligned_mems
))
667 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
668 mode
, unaligned_mems
);
675 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
676 mode
, unaligned_mems
);
682 /* If it isn't one of the case above, it can cause a trap. */
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
689 rtx_addr_can_trap_p (const_rtx x
)
691 return rtx_addr_can_trap_p_1 (x
, 0, -1, BLKmode
, false);
694 /* Return true if X contains a MEM subrtx. */
697 contains_mem_rtx_p (rtx x
)
699 subrtx_iterator::array_type array
;
700 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
707 /* Return true if X is an address that is known to not be zero. */
710 nonzero_address_p (const_rtx x
)
712 const enum rtx_code code
= GET_CODE (x
);
717 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
723 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
724 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
725 || x
== stack_pointer_rtx
726 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
728 /* All of the virtual frame registers are stack references. */
729 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
730 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
735 return nonzero_address_p (XEXP (x
, 0));
738 /* Handle PIC references. */
739 if (XEXP (x
, 0) == pic_offset_table_rtx
740 && CONSTANT_P (XEXP (x
, 1)))
745 /* Similar to the above; allow positive offsets. Further, since
746 auto-inc is only allowed in memories, the register must be a
748 if (CONST_INT_P (XEXP (x
, 1))
749 && INTVAL (XEXP (x
, 1)) > 0)
751 return nonzero_address_p (XEXP (x
, 0));
754 /* Similarly. Further, the offset is always positive. */
761 return nonzero_address_p (XEXP (x
, 0));
764 return nonzero_address_p (XEXP (x
, 1));
770 /* If it isn't one of the case above, might be zero. */
774 /* Return 1 if X refers to a memory location whose address
775 cannot be compared reliably with constant addresses,
776 or if X refers to a BLKmode memory object.
777 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778 zero, we are slightly more conservative. */
781 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
792 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
794 fmt
= GET_RTX_FORMAT (code
);
795 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
798 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
801 else if (fmt
[i
] == 'E')
804 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
805 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
811 /* Return the CALL in X if there is one. */
814 get_call_rtx_from (rtx x
)
818 if (GET_CODE (x
) == PARALLEL
)
819 x
= XVECEXP (x
, 0, 0);
820 if (GET_CODE (x
) == SET
)
822 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
827 /* Return the value of the integer term in X, if one is apparent;
829 Only obvious integer terms are detected.
830 This is used in cse.c with the `related_value' field. */
833 get_integer_term (const_rtx x
)
835 if (GET_CODE (x
) == CONST
)
838 if (GET_CODE (x
) == MINUS
839 && CONST_INT_P (XEXP (x
, 1)))
840 return - INTVAL (XEXP (x
, 1));
841 if (GET_CODE (x
) == PLUS
842 && CONST_INT_P (XEXP (x
, 1)))
843 return INTVAL (XEXP (x
, 1));
847 /* If X is a constant, return the value sans apparent integer term;
849 Only obvious integer terms are detected. */
852 get_related_value (const_rtx x
)
854 if (GET_CODE (x
) != CONST
)
857 if (GET_CODE (x
) == PLUS
858 && CONST_INT_P (XEXP (x
, 1)))
860 else if (GET_CODE (x
) == MINUS
861 && CONST_INT_P (XEXP (x
, 1)))
866 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
867 to somewhere in the same object or object_block as SYMBOL. */
870 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
874 if (GET_CODE (symbol
) != SYMBOL_REF
)
882 if (CONSTANT_POOL_ADDRESS_P (symbol
)
883 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
886 decl
= SYMBOL_REF_DECL (symbol
);
887 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
891 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
892 && SYMBOL_REF_BLOCK (symbol
)
893 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
894 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
895 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
901 /* Split X into a base and a constant offset, storing them in *BASE_OUT
902 and *OFFSET_OUT respectively. */
905 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
907 if (GET_CODE (x
) == CONST
)
910 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
912 *base_out
= XEXP (x
, 0);
913 *offset_out
= XEXP (x
, 1);
918 *offset_out
= const0_rtx
;
921 /* Express integer value X as some value Y plus a polynomial offset,
922 where Y is either const0_rtx, X or something within X (as opposed
923 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
926 strip_offset (rtx x
, poly_int64_pod
*offset_out
)
928 rtx base
= const0_rtx
;
930 if (GET_CODE (test
) == CONST
)
931 test
= XEXP (test
, 0);
932 if (GET_CODE (test
) == PLUS
)
934 base
= XEXP (test
, 0);
935 test
= XEXP (test
, 1);
937 if (poly_int_rtx_p (test
, offset_out
))
943 /* Return the argument size in REG_ARGS_SIZE note X. */
946 get_args_size (const_rtx x
)
948 gcc_checking_assert (REG_NOTE_KIND (x
) == REG_ARGS_SIZE
);
949 return rtx_to_poly_int64 (XEXP (x
, 0));
952 /* Return the number of places FIND appears within X. If COUNT_DEST is
953 zero, we do not count occurrences inside the destination of a SET. */
956 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
960 const char *format_ptr
;
979 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
981 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
985 if (MEM_P (find
) && rtx_equal_p (x
, find
))
990 if (SET_DEST (x
) == find
&& ! count_dest
)
991 return count_occurrences (SET_SRC (x
), find
, count_dest
);
998 format_ptr
= GET_RTX_FORMAT (code
);
1001 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
1003 switch (*format_ptr
++)
1006 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
1010 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1011 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
1019 /* Return TRUE if OP is a register or subreg of a register that
1020 holds an unsigned quantity. Otherwise, return FALSE. */
1023 unsigned_reg_p (rtx op
)
1027 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1030 if (GET_CODE (op
) == SUBREG
1031 && SUBREG_PROMOTED_SIGN (op
))
1038 /* Nonzero if register REG appears somewhere within IN.
1039 Also works if REG is not a register; in this case it checks
1040 for a subexpression of IN that is Lisp "equal" to REG. */
1043 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1055 if (GET_CODE (in
) == LABEL_REF
)
1056 return reg
== label_ref_label (in
);
1058 code
= GET_CODE (in
);
1062 /* Compare registers by number. */
1064 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1066 /* These codes have no constituent expressions
1074 /* These are kept unique for a given value. */
1081 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1084 fmt
= GET_RTX_FORMAT (code
);
1086 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1091 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1092 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1095 else if (fmt
[i
] == 'e'
1096 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1102 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1103 no CODE_LABEL insn. */
1106 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1111 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1117 /* Nonzero if register REG is used in an insn between
1118 FROM_INSN and TO_INSN (exclusive of those two). */
1121 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1122 const rtx_insn
*to_insn
)
1126 if (from_insn
== to_insn
)
1129 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1130 if (NONDEBUG_INSN_P (insn
)
1131 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1132 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1137 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1138 is entirely replaced by a new value and the only use is as a SET_DEST,
1139 we do not consider it a reference. */
1142 reg_referenced_p (const_rtx x
, const_rtx body
)
1146 switch (GET_CODE (body
))
1149 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1152 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1153 of a REG that occupies all of the REG, the insn references X if
1154 it is mentioned in the destination. */
1155 if (GET_CODE (SET_DEST (body
)) != CC0
1156 && GET_CODE (SET_DEST (body
)) != PC
1157 && !REG_P (SET_DEST (body
))
1158 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1159 && REG_P (SUBREG_REG (SET_DEST (body
)))
1160 && !read_modify_subreg_p (SET_DEST (body
)))
1161 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1166 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1167 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1174 return reg_overlap_mentioned_p (x
, body
);
1177 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1180 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1183 case UNSPEC_VOLATILE
:
1184 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1185 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1190 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1191 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1196 if (MEM_P (XEXP (body
, 0)))
1197 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1202 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1204 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1211 /* Nonzero if register REG is set or clobbered in an insn between
1212 FROM_INSN and TO_INSN (exclusive of those two). */
1215 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1216 const rtx_insn
*to_insn
)
1218 const rtx_insn
*insn
;
1220 if (from_insn
== to_insn
)
1223 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1224 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1229 /* Return true if REG is set or clobbered inside INSN. */
1232 reg_set_p (const_rtx reg
, const_rtx insn
)
1234 /* After delay slot handling, call and branch insns might be in a
1235 sequence. Check all the elements there. */
1236 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1238 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1239 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1245 /* We can be passed an insn or part of one. If we are passed an insn,
1246 check if a side-effect of the insn clobbers REG. */
1248 && (FIND_REG_INC_NOTE (insn
, reg
)
1251 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1252 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1253 GET_MODE (reg
), REGNO (reg
)))
1255 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1258 /* There are no REG_INC notes for SP autoinc. */
1259 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1261 subrtx_var_iterator::array_type array
;
1262 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1267 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1269 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1271 iter
.skip_subrtxes ();
1276 return set_of (reg
, insn
) != NULL_RTX
;
1279 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1280 only if none of them are modified between START and END. Return 1 if
1281 X contains a MEM; this routine does use memory aliasing. */
1284 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1286 const enum rtx_code code
= GET_CODE (x
);
1307 if (modified_between_p (XEXP (x
, 0), start
, end
))
1309 if (MEM_READONLY_P (x
))
1311 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1312 if (memory_modified_in_insn_p (x
, insn
))
1317 return reg_set_between_p (x
, start
, end
);
1323 fmt
= GET_RTX_FORMAT (code
);
1324 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1326 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1329 else if (fmt
[i
] == 'E')
1330 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1331 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1338 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1339 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1340 does use memory aliasing. */
1343 modified_in_p (const_rtx x
, const_rtx insn
)
1345 const enum rtx_code code
= GET_CODE (x
);
1362 if (modified_in_p (XEXP (x
, 0), insn
))
1364 if (MEM_READONLY_P (x
))
1366 if (memory_modified_in_insn_p (x
, insn
))
1371 return reg_set_p (x
, insn
);
1377 fmt
= GET_RTX_FORMAT (code
);
1378 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1380 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1383 else if (fmt
[i
] == 'E')
1384 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1385 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1392 /* Return true if X is a SUBREG and if storing a value to X would
1393 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1394 target, using a SUBREG to store to one half of a DImode REG would
1395 preserve the other half. */
1398 read_modify_subreg_p (const_rtx x
)
1400 if (GET_CODE (x
) != SUBREG
)
1402 poly_uint64 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1403 poly_uint64 osize
= GET_MODE_SIZE (GET_MODE (x
));
1404 poly_uint64 regsize
= REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1405 /* The inner and outer modes of a subreg must be ordered, so that we
1406 can tell whether they're paradoxical or partial. */
1407 gcc_checking_assert (ordered_p (isize
, osize
));
1408 return (maybe_gt (isize
, osize
) && maybe_gt (isize
, regsize
));
1411 /* Helper function for set_of. */
1419 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1421 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1422 if (rtx_equal_p (x
, data
->pat
)
1423 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1427 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1428 (either directly or via STRICT_LOW_PART and similar modifiers). */
1430 set_of (const_rtx pat
, const_rtx insn
)
1432 struct set_of_data data
;
1433 data
.found
= NULL_RTX
;
1435 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1439 /* Add all hard register in X to *PSET. */
1441 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1443 subrtx_iterator::array_type array
;
1444 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1446 const_rtx x
= *iter
;
1447 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1448 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1452 /* This function, called through note_stores, collects sets and
1453 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1456 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1458 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1459 if (REG_P (x
) && HARD_REGISTER_P (x
))
1460 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1463 /* Examine INSN, and compute the set of hard registers written by it.
1464 Store it in *PSET. Should only be called after reload. */
1466 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1470 CLEAR_HARD_REG_SET (*pset
);
1471 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1475 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1477 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1478 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1480 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1481 if (REG_NOTE_KIND (link
) == REG_INC
)
1482 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1485 /* Like record_hard_reg_sets, but called through note_uses. */
1487 record_hard_reg_uses (rtx
*px
, void *data
)
1489 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1492 /* Given an INSN, return a SET expression if this insn has only a single SET.
1493 It may also have CLOBBERs, USEs, or SET whose output
1494 will not be used, which we ignore. */
1497 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1500 int set_verified
= 1;
1503 if (GET_CODE (pat
) == PARALLEL
)
1505 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1507 rtx sub
= XVECEXP (pat
, 0, i
);
1508 switch (GET_CODE (sub
))
1515 /* We can consider insns having multiple sets, where all
1516 but one are dead as single set insns. In common case
1517 only single set is present in the pattern so we want
1518 to avoid checking for REG_UNUSED notes unless necessary.
1520 When we reach set first time, we just expect this is
1521 the single set we are looking for and only when more
1522 sets are found in the insn, we check them. */
1525 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1526 && !side_effects_p (set
))
1532 set
= sub
, set_verified
= 0;
1533 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1534 || side_effects_p (sub
))
1546 /* Given an INSN, return nonzero if it has more than one SET, else return
1550 multiple_sets (const_rtx insn
)
1555 /* INSN must be an insn. */
1556 if (! INSN_P (insn
))
1559 /* Only a PARALLEL can have multiple SETs. */
1560 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1562 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1563 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1565 /* If we have already found a SET, then return now. */
1573 /* Either zero or one SET. */
1577 /* Return nonzero if the destination of SET equals the source
1578 and there are no side effects. */
1581 set_noop_p (const_rtx set
)
1583 rtx src
= SET_SRC (set
);
1584 rtx dst
= SET_DEST (set
);
1586 if (dst
== pc_rtx
&& src
== pc_rtx
)
1589 if (MEM_P (dst
) && MEM_P (src
))
1590 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1592 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1593 return rtx_equal_p (XEXP (dst
, 0), src
)
1594 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1595 && !side_effects_p (src
);
1597 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1598 dst
= XEXP (dst
, 0);
1600 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1602 if (maybe_ne (SUBREG_BYTE (src
), SUBREG_BYTE (dst
)))
1604 src
= SUBREG_REG (src
);
1605 dst
= SUBREG_REG (dst
);
1608 /* It is a NOOP if destination overlaps with selected src vector
1610 if (GET_CODE (src
) == VEC_SELECT
1611 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1612 && HARD_REGISTER_P (XEXP (src
, 0))
1613 && HARD_REGISTER_P (dst
))
1616 rtx par
= XEXP (src
, 1);
1617 rtx src0
= XEXP (src
, 0);
1618 poly_int64 c0
= rtx_to_poly_int64 (XVECEXP (par
, 0, 0));
1619 poly_int64 offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1621 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1622 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par
, 0, i
)), c0
+ i
))
1625 REG_CAN_CHANGE_MODE_P (REGNO (dst
), GET_MODE (src0
), GET_MODE (dst
))
1626 && simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1627 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1630 return (REG_P (src
) && REG_P (dst
)
1631 && REGNO (src
) == REGNO (dst
));
1634 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1638 noop_move_p (const rtx_insn
*insn
)
1640 rtx pat
= PATTERN (insn
);
1642 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1645 /* Insns carrying these notes are useful later on. */
1646 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1649 /* Check the code to be executed for COND_EXEC. */
1650 if (GET_CODE (pat
) == COND_EXEC
)
1651 pat
= COND_EXEC_CODE (pat
);
1653 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1656 if (GET_CODE (pat
) == PARALLEL
)
1659 /* If nothing but SETs of registers to themselves,
1660 this insn can also be deleted. */
1661 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1663 rtx tem
= XVECEXP (pat
, 0, i
);
1665 if (GET_CODE (tem
) == USE
1666 || GET_CODE (tem
) == CLOBBER
)
1669 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1679 /* Return nonzero if register in range [REGNO, ENDREGNO)
1680 appears either explicitly or implicitly in X
1681 other than being stored into.
1683 References contained within the substructure at LOC do not count.
1684 LOC may be zero, meaning don't ignore anything. */
1687 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1691 unsigned int x_regno
;
1696 /* The contents of a REG_NONNEG note is always zero, so we must come here
1697 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1701 code
= GET_CODE (x
);
1706 x_regno
= REGNO (x
);
1708 /* If we modifying the stack, frame, or argument pointer, it will
1709 clobber a virtual register. In fact, we could be more precise,
1710 but it isn't worth it. */
1711 if ((x_regno
== STACK_POINTER_REGNUM
1712 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1713 && x_regno
== ARG_POINTER_REGNUM
)
1714 || x_regno
== FRAME_POINTER_REGNUM
)
1715 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1718 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1721 /* If this is a SUBREG of a hard reg, we can see exactly which
1722 registers are being modified. Otherwise, handle normally. */
1723 if (REG_P (SUBREG_REG (x
))
1724 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1726 unsigned int inner_regno
= subreg_regno (x
);
1727 unsigned int inner_endregno
1728 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1729 ? subreg_nregs (x
) : 1);
1731 return endregno
> inner_regno
&& regno
< inner_endregno
;
1737 if (&SET_DEST (x
) != loc
1738 /* Note setting a SUBREG counts as referring to the REG it is in for
1739 a pseudo but not for hard registers since we can
1740 treat each word individually. */
1741 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1742 && loc
!= &SUBREG_REG (SET_DEST (x
))
1743 && REG_P (SUBREG_REG (SET_DEST (x
)))
1744 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1745 && refers_to_regno_p (regno
, endregno
,
1746 SUBREG_REG (SET_DEST (x
)), loc
))
1747 || (!REG_P (SET_DEST (x
))
1748 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1751 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1760 /* X does not match, so try its subexpressions. */
1762 fmt
= GET_RTX_FORMAT (code
);
1763 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1765 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1773 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1776 else if (fmt
[i
] == 'E')
1779 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1780 if (loc
!= &XVECEXP (x
, i
, j
)
1781 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1788 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1789 we check if any register number in X conflicts with the relevant register
1790 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1791 contains a MEM (we don't bother checking for memory addresses that can't
1792 conflict because we expect this to be a rare case. */
1795 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1797 unsigned int regno
, endregno
;
1799 /* If either argument is a constant, then modifying X can not
1800 affect IN. Here we look at IN, we can profitably combine
1801 CONSTANT_P (x) with the switch statement below. */
1802 if (CONSTANT_P (in
))
1806 switch (GET_CODE (x
))
1808 case STRICT_LOW_PART
:
1811 /* Overly conservative. */
1816 regno
= REGNO (SUBREG_REG (x
));
1817 if (regno
< FIRST_PSEUDO_REGISTER
)
1818 regno
= subreg_regno (x
);
1819 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1820 ? subreg_nregs (x
) : 1);
1825 endregno
= END_REGNO (x
);
1827 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1837 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1838 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1841 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1844 else if (fmt
[i
] == 'E')
1847 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1848 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1858 return reg_mentioned_p (x
, in
);
1864 /* If any register in here refers to it we return true. */
1865 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1866 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1867 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1873 gcc_assert (CONSTANT_P (x
));
1878 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1879 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1880 ignored by note_stores, but passed to FUN.
1882 FUN receives three arguments:
1883 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1884 2. the SET or CLOBBER rtx that does the store,
1885 3. the pointer DATA provided to note_stores.
1887 If the item being stored in or clobbered is a SUBREG of a hard register,
1888 the SUBREG will be passed. */
1891 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1895 if (GET_CODE (x
) == COND_EXEC
)
1896 x
= COND_EXEC_CODE (x
);
1898 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1900 rtx dest
= SET_DEST (x
);
1902 while ((GET_CODE (dest
) == SUBREG
1903 && (!REG_P (SUBREG_REG (dest
))
1904 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1905 || GET_CODE (dest
) == ZERO_EXTRACT
1906 || GET_CODE (dest
) == STRICT_LOW_PART
)
1907 dest
= XEXP (dest
, 0);
1909 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1910 each of whose first operand is a register. */
1911 if (GET_CODE (dest
) == PARALLEL
)
1913 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1914 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1915 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1918 (*fun
) (dest
, x
, data
);
1921 else if (GET_CODE (x
) == PARALLEL
)
1922 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1923 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1926 /* Like notes_stores, but call FUN for each expression that is being
1927 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1928 FUN for each expression, not any interior subexpressions. FUN receives a
1929 pointer to the expression and the DATA passed to this function.
1931 Note that this is not quite the same test as that done in reg_referenced_p
1932 since that considers something as being referenced if it is being
1933 partially set, while we do not. */
1936 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1941 switch (GET_CODE (body
))
1944 (*fun
) (&COND_EXEC_TEST (body
), data
);
1945 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1949 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1950 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1954 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1955 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1959 (*fun
) (&XEXP (body
, 0), data
);
1963 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1964 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1968 (*fun
) (&TRAP_CONDITION (body
), data
);
1972 (*fun
) (&XEXP (body
, 0), data
);
1976 case UNSPEC_VOLATILE
:
1977 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1978 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1982 if (MEM_P (XEXP (body
, 0)))
1983 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1988 rtx dest
= SET_DEST (body
);
1990 /* For sets we replace everything in source plus registers in memory
1991 expression in store and operands of a ZERO_EXTRACT. */
1992 (*fun
) (&SET_SRC (body
), data
);
1994 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1996 (*fun
) (&XEXP (dest
, 1), data
);
1997 (*fun
) (&XEXP (dest
, 2), data
);
2000 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
2001 dest
= XEXP (dest
, 0);
2004 (*fun
) (&XEXP (dest
, 0), data
);
2009 /* All the other possibilities never store. */
2010 (*fun
) (pbody
, data
);
2015 /* Return nonzero if X's old contents don't survive after INSN.
2016 This will be true if X is (cc0) or if X is a register and
2017 X dies in INSN or because INSN entirely sets X.
2019 "Entirely set" means set directly and not through a SUBREG, or
2020 ZERO_EXTRACT, so no trace of the old contents remains.
2021 Likewise, REG_INC does not count.
2023 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2024 but for this use that makes no difference, since regs don't overlap
2025 during their lifetimes. Therefore, this function may be used
2026 at any time after deaths have been computed.
2028 If REG is a hard reg that occupies multiple machine registers, this
2029 function will only return 1 if each of those registers will be replaced
2033 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2035 unsigned int regno
, end_regno
;
2038 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2039 if (GET_CODE (x
) == CC0
)
2042 gcc_assert (REG_P (x
));
2045 end_regno
= END_REGNO (x
);
2046 for (i
= regno
; i
< end_regno
; i
++)
2047 if (! dead_or_set_regno_p (insn
, i
))
2053 /* Return TRUE iff DEST is a register or subreg of a register, is a
2054 complete rather than read-modify-write destination, and contains
2055 register TEST_REGNO. */
2058 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2060 unsigned int regno
, endregno
;
2062 if (GET_CODE (dest
) == SUBREG
&& !read_modify_subreg_p (dest
))
2063 dest
= SUBREG_REG (dest
);
2068 regno
= REGNO (dest
);
2069 endregno
= END_REGNO (dest
);
2070 return (test_regno
>= regno
&& test_regno
< endregno
);
2073 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2074 any member matches the covers_regno_no_parallel_p criteria. */
2077 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2079 if (GET_CODE (dest
) == PARALLEL
)
2081 /* Some targets place small structures in registers for return
2082 values of functions, and those registers are wrapped in
2083 PARALLELs that we may see as the destination of a SET. */
2086 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2088 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2089 if (inner
!= NULL_RTX
2090 && covers_regno_no_parallel_p (inner
, test_regno
))
2097 return covers_regno_no_parallel_p (dest
, test_regno
);
2100 /* Utility function for dead_or_set_p to check an individual register. */
2103 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2107 /* See if there is a death note for something that includes TEST_REGNO. */
2108 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2112 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2115 pattern
= PATTERN (insn
);
2117 /* If a COND_EXEC is not executed, the value survives. */
2118 if (GET_CODE (pattern
) == COND_EXEC
)
2121 if (GET_CODE (pattern
) == SET
|| GET_CODE (pattern
) == CLOBBER
)
2122 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2123 else if (GET_CODE (pattern
) == PARALLEL
)
2127 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2129 rtx body
= XVECEXP (pattern
, 0, i
);
2131 if (GET_CODE (body
) == COND_EXEC
)
2132 body
= COND_EXEC_CODE (body
);
2134 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2135 && covers_regno_p (SET_DEST (body
), test_regno
))
2143 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2144 If DATUM is nonzero, look for one whose datum is DATUM. */
2147 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2151 gcc_checking_assert (insn
);
2153 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2154 if (! INSN_P (insn
))
2158 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2159 if (REG_NOTE_KIND (link
) == kind
)
2164 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2165 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2170 /* Return the reg-note of kind KIND in insn INSN which applies to register
2171 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2172 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2173 it might be the case that the note overlaps REGNO. */
2176 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2180 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2181 if (! INSN_P (insn
))
2184 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2185 if (REG_NOTE_KIND (link
) == kind
2186 /* Verify that it is a register, so that scratch and MEM won't cause a
2188 && REG_P (XEXP (link
, 0))
2189 && REGNO (XEXP (link
, 0)) <= regno
2190 && END_REGNO (XEXP (link
, 0)) > regno
)
2195 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2199 find_reg_equal_equiv_note (const_rtx insn
)
2206 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2207 if (REG_NOTE_KIND (link
) == REG_EQUAL
2208 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2210 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2211 insns that have multiple sets. Checking single_set to
2212 make sure of this is not the proper check, as explained
2213 in the comment in set_unique_reg_note.
2215 This should be changed into an assert. */
2216 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2223 /* Check whether INSN is a single_set whose source is known to be
2224 equivalent to a constant. Return that constant if so, otherwise
2228 find_constant_src (const rtx_insn
*insn
)
2232 set
= single_set (insn
);
2235 x
= avoid_constant_pool_reference (SET_SRC (set
));
2240 note
= find_reg_equal_equiv_note (insn
);
2241 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2242 return XEXP (note
, 0);
2247 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2248 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2251 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2253 /* If it's not a CALL_INSN, it can't possibly have a
2254 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2264 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2266 link
= XEXP (link
, 1))
2267 if (GET_CODE (XEXP (link
, 0)) == code
2268 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2273 unsigned int regno
= REGNO (datum
);
2275 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2276 to pseudo registers, so don't bother checking. */
2278 if (regno
< FIRST_PSEUDO_REGISTER
)
2280 unsigned int end_regno
= END_REGNO (datum
);
2283 for (i
= regno
; i
< end_regno
; i
++)
2284 if (find_regno_fusage (insn
, code
, i
))
2292 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2293 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2296 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2300 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2301 to pseudo registers, so don't bother checking. */
2303 if (regno
>= FIRST_PSEUDO_REGISTER
2307 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2311 if (GET_CODE (op
= XEXP (link
, 0)) == code
2312 && REG_P (reg
= XEXP (op
, 0))
2313 && REGNO (reg
) <= regno
2314 && END_REGNO (reg
) > regno
)
2322 /* Return true if KIND is an integer REG_NOTE. */
2325 int_reg_note_p (enum reg_note kind
)
2327 return kind
== REG_BR_PROB
;
2330 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2331 stored as the pointer to the next register note. */
2334 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2338 gcc_checking_assert (!int_reg_note_p (kind
));
2343 case REG_LABEL_TARGET
:
2344 case REG_LABEL_OPERAND
:
2346 /* These types of register notes use an INSN_LIST rather than an
2347 EXPR_LIST, so that copying is done right and dumps look
2349 note
= alloc_INSN_LIST (datum
, list
);
2350 PUT_REG_NOTE_KIND (note
, kind
);
2354 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2361 /* Add register note with kind KIND and datum DATUM to INSN. */
2364 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2366 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2369 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2372 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2374 gcc_checking_assert (int_reg_note_p (kind
));
2375 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2376 datum
, REG_NOTES (insn
));
2379 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2382 add_args_size_note (rtx_insn
*insn
, poly_int64 value
)
2384 gcc_checking_assert (!find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
));
2385 add_reg_note (insn
, REG_ARGS_SIZE
, gen_int_mode (value
, Pmode
));
2388 /* Add a register note like NOTE to INSN. */
2391 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2393 if (GET_CODE (note
) == INT_LIST
)
2394 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2396 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2399 /* Duplicate NOTE and return the copy. */
2401 duplicate_reg_note (rtx note
)
2403 reg_note kind
= REG_NOTE_KIND (note
);
2405 if (GET_CODE (note
) == INT_LIST
)
2406 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2407 else if (GET_CODE (note
) == EXPR_LIST
)
2408 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2410 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2413 /* Remove register note NOTE from the REG_NOTES of INSN. */
2416 remove_note (rtx_insn
*insn
, const_rtx note
)
2420 if (note
== NULL_RTX
)
2423 if (REG_NOTES (insn
) == note
)
2424 REG_NOTES (insn
) = XEXP (note
, 1);
2426 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2427 if (XEXP (link
, 1) == note
)
2429 XEXP (link
, 1) = XEXP (note
, 1);
2433 switch (REG_NOTE_KIND (note
))
2437 df_notes_rescan (insn
);
2444 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2445 Return true if any note has been removed. */
2448 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2453 loc
= ®_NOTES (insn
);
2456 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2457 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2459 *loc
= XEXP (*loc
, 1);
2463 loc
= &XEXP (*loc
, 1);
2468 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2471 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2478 /* This loop is a little tricky. We cannot just go down the chain because
2479 it is being modified by some actions in the loop. So we just iterate
2480 over the head. We plan to drain the list anyway. */
2481 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2483 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2484 rtx note
= find_reg_equal_equiv_note (insn
);
2486 /* This assert is generally triggered when someone deletes a REG_EQUAL
2487 or REG_EQUIV note by hacking the list manually rather than calling
2491 remove_note (insn
, note
);
2495 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2496 return 1 if it is found. A simple equality test is used to determine if
2500 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2504 for (x
= listp
; x
; x
= XEXP (x
, 1))
2505 if (node
== XEXP (x
, 0))
2511 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2512 remove that entry from the list if it is found.
2514 A simple equality test is used to determine if NODE matches. */
2517 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2519 rtx_expr_list
*temp
= *listp
;
2520 rtx_expr_list
*prev
= NULL
;
2524 if (node
== temp
->element ())
2526 /* Splice the node out of the list. */
2528 XEXP (prev
, 1) = temp
->next ();
2530 *listp
= temp
->next ();
2536 temp
= temp
->next ();
2540 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2541 remove that entry from the list if it is found.
2543 A simple equality test is used to determine if NODE matches. */
2546 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2548 rtx_insn_list
*temp
= *listp
;
2549 rtx_insn_list
*prev
= NULL
;
2553 if (node
== temp
->insn ())
2555 /* Splice the node out of the list. */
2557 XEXP (prev
, 1) = temp
->next ();
2559 *listp
= temp
->next ();
2565 temp
= temp
->next ();
2569 /* Nonzero if X contains any volatile instructions. These are instructions
2570 which may cause unpredictable machine state instructions, and thus no
2571 instructions or register uses should be moved or combined across them.
2572 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2575 volatile_insn_p (const_rtx x
)
2577 const RTX_CODE code
= GET_CODE (x
);
2595 case UNSPEC_VOLATILE
:
2600 if (MEM_VOLATILE_P (x
))
2607 /* Recursively scan the operands of this expression. */
2610 const char *const fmt
= GET_RTX_FORMAT (code
);
2613 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2617 if (volatile_insn_p (XEXP (x
, i
)))
2620 else if (fmt
[i
] == 'E')
2623 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2624 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2632 /* Nonzero if X contains any volatile memory references
2633 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2636 volatile_refs_p (const_rtx x
)
2638 const RTX_CODE code
= GET_CODE (x
);
2654 case UNSPEC_VOLATILE
:
2660 if (MEM_VOLATILE_P (x
))
2667 /* Recursively scan the operands of this expression. */
2670 const char *const fmt
= GET_RTX_FORMAT (code
);
2673 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2677 if (volatile_refs_p (XEXP (x
, i
)))
2680 else if (fmt
[i
] == 'E')
2683 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2684 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2692 /* Similar to above, except that it also rejects register pre- and post-
2696 side_effects_p (const_rtx x
)
2698 const RTX_CODE code
= GET_CODE (x
);
2715 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2716 when some combination can't be done. If we see one, don't think
2717 that we can simplify the expression. */
2718 return (GET_MODE (x
) != VOIDmode
);
2727 case UNSPEC_VOLATILE
:
2733 if (MEM_VOLATILE_P (x
))
2740 /* Recursively scan the operands of this expression. */
2743 const char *fmt
= GET_RTX_FORMAT (code
);
2746 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2750 if (side_effects_p (XEXP (x
, i
)))
2753 else if (fmt
[i
] == 'E')
2756 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2757 if (side_effects_p (XVECEXP (x
, i
, j
)))
2765 /* Return nonzero if evaluating rtx X might cause a trap.
2766 FLAGS controls how to consider MEMs. A nonzero means the context
2767 of the access may have changed from the original, such that the
2768 address may have become invalid. */
2771 may_trap_p_1 (const_rtx x
, unsigned flags
)
2777 /* We make no distinction currently, but this function is part of
2778 the internal target-hooks ABI so we keep the parameter as
2779 "unsigned flags". */
2780 bool code_changed
= flags
!= 0;
2784 code
= GET_CODE (x
);
2787 /* Handle these cases quickly. */
2799 return targetm
.unspec_may_trap_p (x
, flags
);
2801 case UNSPEC_VOLATILE
:
2807 return MEM_VOLATILE_P (x
);
2809 /* Memory ref can trap unless it's a static var or a stack slot. */
2811 /* Recognize specific pattern of stack checking probes. */
2812 if (flag_stack_check
2813 && MEM_VOLATILE_P (x
)
2814 && XEXP (x
, 0) == stack_pointer_rtx
)
2816 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2817 reference; moving it out of context such as when moving code
2818 when optimizing, might cause its address to become invalid. */
2820 || !MEM_NOTRAP_P (x
))
2822 poly_int64 size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : -1;
2823 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2824 GET_MODE (x
), code_changed
);
2829 /* Division by a non-constant might trap. */
2834 if (HONOR_SNANS (x
))
2836 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2837 return flag_trapping_math
;
2838 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2843 /* An EXPR_LIST is used to represent a function call. This
2844 certainly may trap. */
2853 /* Some floating point comparisons may trap. */
2854 if (!flag_trapping_math
)
2856 /* ??? There is no machine independent way to check for tests that trap
2857 when COMPARE is used, though many targets do make this distinction.
2858 For instance, sparc uses CCFPE for compares which generate exceptions
2859 and CCFP for compares which do not generate exceptions. */
2862 /* But often the compare has some CC mode, so check operand
2864 if (HONOR_NANS (XEXP (x
, 0))
2865 || HONOR_NANS (XEXP (x
, 1)))
2871 if (HONOR_SNANS (x
))
2873 /* Often comparison is CC mode, so check operand modes. */
2874 if (HONOR_SNANS (XEXP (x
, 0))
2875 || HONOR_SNANS (XEXP (x
, 1)))
2880 /* Conversion of floating point might trap. */
2881 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2888 /* These operations don't trap even with floating point. */
2892 /* Any floating arithmetic may trap. */
2893 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2897 fmt
= GET_RTX_FORMAT (code
);
2898 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2902 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2905 else if (fmt
[i
] == 'E')
2908 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2909 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2916 /* Return nonzero if evaluating rtx X might cause a trap. */
2919 may_trap_p (const_rtx x
)
2921 return may_trap_p_1 (x
, 0);
2924 /* Same as above, but additionally return nonzero if evaluating rtx X might
2925 cause a fault. We define a fault for the purpose of this function as a
2926 erroneous execution condition that cannot be encountered during the normal
2927 execution of a valid program; the typical example is an unaligned memory
2928 access on a strict alignment machine. The compiler guarantees that it
2929 doesn't generate code that will fault from a valid program, but this
2930 guarantee doesn't mean anything for individual instructions. Consider
2931 the following example:
2933 struct S { int d; union { char *cp; int *ip; }; };
2935 int foo(struct S *s)
2943 on a strict alignment machine. In a valid program, foo will never be
2944 invoked on a structure for which d is equal to 1 and the underlying
2945 unique field of the union not aligned on a 4-byte boundary, but the
2946 expression *s->ip might cause a fault if considered individually.
2948 At the RTL level, potentially problematic expressions will almost always
2949 verify may_trap_p; for example, the above dereference can be emitted as
2950 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2951 However, suppose that foo is inlined in a caller that causes s->cp to
2952 point to a local character variable and guarantees that s->d is not set
2953 to 1; foo may have been effectively translated into pseudo-RTL as:
2956 (set (reg:SI) (mem:SI (%fp - 7)))
2958 (set (reg:QI) (mem:QI (%fp - 7)))
2960 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2961 memory reference to a stack slot, but it will certainly cause a fault
2962 on a strict alignment machine. */
2965 may_trap_or_fault_p (const_rtx x
)
2967 return may_trap_p_1 (x
, 1);
2970 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2971 i.e., an inequality. */
2974 inequality_comparisons_p (const_rtx x
)
2978 const enum rtx_code code
= GET_CODE (x
);
3006 len
= GET_RTX_LENGTH (code
);
3007 fmt
= GET_RTX_FORMAT (code
);
3009 for (i
= 0; i
< len
; i
++)
3013 if (inequality_comparisons_p (XEXP (x
, i
)))
3016 else if (fmt
[i
] == 'E')
3019 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3020 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
3028 /* Replace any occurrence of FROM in X with TO. The function does
3029 not enter into CONST_DOUBLE for the replace.
3031 Note that copying is not done so X must not be shared unless all copies
3034 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3035 those pointer-equal ones. */
3038 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3046 /* Allow this function to make replacements in EXPR_LISTs. */
3053 && REGNO (x
) == REGNO (from
))
3055 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3058 else if (GET_CODE (x
) == SUBREG
)
3060 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3062 if (CONST_INT_P (new_rtx
))
3064 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3065 GET_MODE (SUBREG_REG (x
)),
3070 SUBREG_REG (x
) = new_rtx
;
3074 else if (GET_CODE (x
) == ZERO_EXTEND
)
3076 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3078 if (CONST_INT_P (new_rtx
))
3080 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3081 new_rtx
, GET_MODE (XEXP (x
, 0)));
3085 XEXP (x
, 0) = new_rtx
;
3090 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3091 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3094 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3095 else if (fmt
[i
] == 'E')
3096 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3097 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3098 from
, to
, all_regs
);
3104 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3105 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3108 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3110 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3112 if (JUMP_TABLE_DATA_P (x
))
3115 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3116 int len
= GET_NUM_ELEM (vec
);
3117 for (int i
= 0; i
< len
; ++i
)
3119 rtx ref
= RTVEC_ELT (vec
, i
);
3120 if (XEXP (ref
, 0) == old_label
)
3122 XEXP (ref
, 0) = new_label
;
3123 if (update_label_nuses
)
3125 ++LABEL_NUSES (new_label
);
3126 --LABEL_NUSES (old_label
);
3133 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3134 field. This is not handled by the iterator because it doesn't
3135 handle unprinted ('0') fields. */
3136 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3137 JUMP_LABEL (x
) = new_label
;
3139 subrtx_ptr_iterator::array_type array
;
3140 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3145 if (GET_CODE (x
) == SYMBOL_REF
3146 && CONSTANT_POOL_ADDRESS_P (x
))
3148 rtx c
= get_pool_constant (x
);
3149 if (rtx_referenced_p (old_label
, c
))
3151 /* Create a copy of constant C; replace the label inside
3152 but do not update LABEL_NUSES because uses in constant pool
3154 rtx new_c
= copy_rtx (c
);
3155 replace_label (&new_c
, old_label
, new_label
, false);
3157 /* Add the new constant NEW_C to constant pool and replace
3158 the old reference to constant by new reference. */
3159 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3160 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3164 if ((GET_CODE (x
) == LABEL_REF
3165 || GET_CODE (x
) == INSN_LIST
)
3166 && XEXP (x
, 0) == old_label
)
3168 XEXP (x
, 0) = new_label
;
3169 if (update_label_nuses
)
3171 ++LABEL_NUSES (new_label
);
3172 --LABEL_NUSES (old_label
);
3180 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3181 rtx_insn
*new_label
, bool update_label_nuses
)
3183 rtx insn_as_rtx
= insn
;
3184 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3185 gcc_checking_assert (insn_as_rtx
== insn
);
3188 /* Return true if X is referenced in BODY. */
3191 rtx_referenced_p (const_rtx x
, const_rtx body
)
3193 subrtx_iterator::array_type array
;
3194 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3195 if (const_rtx y
= *iter
)
3197 /* Check if a label_ref Y refers to label X. */
3198 if (GET_CODE (y
) == LABEL_REF
3200 && label_ref_label (y
) == x
)
3203 if (rtx_equal_p (x
, y
))
3206 /* If Y is a reference to pool constant traverse the constant. */
3207 if (GET_CODE (y
) == SYMBOL_REF
3208 && CONSTANT_POOL_ADDRESS_P (y
))
3209 iter
.substitute (get_pool_constant (y
));
3214 /* If INSN is a tablejump return true and store the label (before jump table) to
3215 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3218 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3219 rtx_jump_table_data
**tablep
)
3224 rtx target
= JUMP_LABEL (insn
);
3225 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3228 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3229 rtx_insn
*table
= next_insn (label
);
3230 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3236 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3240 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3241 constant that is not in the constant pool and not in the condition
3242 of an IF_THEN_ELSE. */
3245 computed_jump_p_1 (const_rtx x
)
3247 const enum rtx_code code
= GET_CODE (x
);
3264 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3265 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3268 return (computed_jump_p_1 (XEXP (x
, 1))
3269 || computed_jump_p_1 (XEXP (x
, 2)));
3275 fmt
= GET_RTX_FORMAT (code
);
3276 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3279 && computed_jump_p_1 (XEXP (x
, i
)))
3282 else if (fmt
[i
] == 'E')
3283 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3284 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3291 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3293 Tablejumps and casesi insns are not considered indirect jumps;
3294 we can recognize them by a (use (label_ref)). */
3297 computed_jump_p (const rtx_insn
*insn
)
3302 rtx pat
= PATTERN (insn
);
3304 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3305 if (JUMP_LABEL (insn
) != NULL
)
3308 if (GET_CODE (pat
) == PARALLEL
)
3310 int len
= XVECLEN (pat
, 0);
3311 int has_use_labelref
= 0;
3313 for (i
= len
- 1; i
>= 0; i
--)
3314 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3315 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3318 has_use_labelref
= 1;
3322 if (! has_use_labelref
)
3323 for (i
= len
- 1; i
>= 0; i
--)
3324 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3325 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3326 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3329 else if (GET_CODE (pat
) == SET
3330 && SET_DEST (pat
) == pc_rtx
3331 && computed_jump_p_1 (SET_SRC (pat
)))
3339 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3340 the equivalent add insn and pass the result to FN, using DATA as the
3344 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3346 rtx x
= XEXP (mem
, 0);
3347 switch (GET_CODE (x
))
3352 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3353 rtx r1
= XEXP (x
, 0);
3354 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3355 return fn (mem
, x
, r1
, r1
, c
, data
);
3361 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3362 rtx r1
= XEXP (x
, 0);
3363 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3364 return fn (mem
, x
, r1
, r1
, c
, data
);
3370 rtx r1
= XEXP (x
, 0);
3371 rtx add
= XEXP (x
, 1);
3372 return fn (mem
, x
, r1
, add
, NULL
, data
);
3380 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3381 For each such autoinc operation found, call FN, passing it
3382 the innermost enclosing MEM, the operation itself, the RTX modified
3383 by the operation, two RTXs (the second may be NULL) that, once
3384 added, represent the value to be held by the modified RTX
3385 afterwards, and DATA. FN is to return 0 to continue the
3386 traversal or any other value to have it returned to the caller of
3387 for_each_inc_dec. */
3390 for_each_inc_dec (rtx x
,
3391 for_each_inc_dec_fn fn
,
3394 subrtx_var_iterator::array_type array
;
3395 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3400 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3402 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3405 iter
.skip_subrtxes ();
3412 /* Searches X for any reference to REGNO, returning the rtx of the
3413 reference found if any. Otherwise, returns NULL_RTX. */
3416 regno_use_in (unsigned int regno
, rtx x
)
3422 if (REG_P (x
) && REGNO (x
) == regno
)
3425 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3426 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3430 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3433 else if (fmt
[i
] == 'E')
3434 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3435 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3442 /* Return a value indicating whether OP, an operand of a commutative
3443 operation, is preferred as the first or second operand. The more
3444 positive the value, the stronger the preference for being the first
3448 commutative_operand_precedence (rtx op
)
3450 enum rtx_code code
= GET_CODE (op
);
3452 /* Constants always become the second operand. Prefer "nice" constants. */
3453 if (code
== CONST_INT
)
3455 if (code
== CONST_WIDE_INT
)
3457 if (code
== CONST_POLY_INT
)
3459 if (code
== CONST_DOUBLE
)
3461 if (code
== CONST_FIXED
)
3463 op
= avoid_constant_pool_reference (op
);
3464 code
= GET_CODE (op
);
3466 switch (GET_RTX_CLASS (code
))
3469 if (code
== CONST_INT
)
3471 if (code
== CONST_WIDE_INT
)
3473 if (code
== CONST_POLY_INT
)
3475 if (code
== CONST_DOUBLE
)
3477 if (code
== CONST_FIXED
)
3482 /* SUBREGs of objects should come second. */
3483 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3488 /* Complex expressions should be the first, so decrease priority
3489 of objects. Prefer pointer objects over non pointer objects. */
3490 if ((REG_P (op
) && REG_POINTER (op
))
3491 || (MEM_P (op
) && MEM_POINTER (op
)))
3495 case RTX_COMM_ARITH
:
3496 /* Prefer operands that are themselves commutative to be first.
3497 This helps to make things linear. In particular,
3498 (and (and (reg) (reg)) (not (reg))) is canonical. */
3502 /* If only one operand is a binary expression, it will be the first
3503 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3504 is canonical, although it will usually be further simplified. */
3508 /* Then prefer NEG and NOT. */
3509 if (code
== NEG
|| code
== NOT
)
3518 /* Return 1 iff it is necessary to swap operands of commutative operation
3519 in order to canonicalize expression. */
3522 swap_commutative_operands_p (rtx x
, rtx y
)
3524 return (commutative_operand_precedence (x
)
3525 < commutative_operand_precedence (y
));
3528 /* Return 1 if X is an autoincrement side effect and the register is
3529 not the stack pointer. */
3531 auto_inc_p (const_rtx x
)
3533 switch (GET_CODE (x
))
3541 /* There are no REG_INC notes for SP. */
3542 if (XEXP (x
, 0) != stack_pointer_rtx
)
3550 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3552 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3561 code
= GET_CODE (in
);
3562 fmt
= GET_RTX_FORMAT (code
);
3563 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3567 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3570 else if (fmt
[i
] == 'E')
3571 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3572 if (loc
== &XVECEXP (in
, i
, j
)
3573 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3579 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3580 and SUBREG_BYTE, return the bit offset where the subreg begins
3581 (counting from the least significant bit of the operand). */
3584 subreg_lsb_1 (machine_mode outer_mode
,
3585 machine_mode inner_mode
,
3586 poly_uint64 subreg_byte
)
3588 poly_uint64 subreg_end
, trailing_bytes
, byte_pos
;
3590 /* A paradoxical subreg begins at bit position 0. */
3591 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
3594 subreg_end
= subreg_byte
+ GET_MODE_SIZE (outer_mode
);
3595 trailing_bytes
= GET_MODE_SIZE (inner_mode
) - subreg_end
;
3596 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3597 byte_pos
= trailing_bytes
;
3598 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3599 byte_pos
= subreg_byte
;
3602 /* When bytes and words have opposite endianness, we must be able
3603 to split offsets into words and bytes at compile time. */
3604 poly_uint64 leading_word_part
3605 = force_align_down (subreg_byte
, UNITS_PER_WORD
);
3606 poly_uint64 trailing_word_part
3607 = force_align_down (trailing_bytes
, UNITS_PER_WORD
);
3608 /* If the subreg crosses a word boundary ensure that
3609 it also begins and ends on a word boundary. */
3610 gcc_assert (known_le (subreg_end
- leading_word_part
,
3611 (unsigned int) UNITS_PER_WORD
)
3612 || (known_eq (leading_word_part
, subreg_byte
)
3613 && known_eq (trailing_word_part
, trailing_bytes
)));
3614 if (WORDS_BIG_ENDIAN
)
3615 byte_pos
= trailing_word_part
+ (subreg_byte
- leading_word_part
);
3617 byte_pos
= leading_word_part
+ (trailing_bytes
- trailing_word_part
);
3620 return byte_pos
* BITS_PER_UNIT
;
3623 /* Given a subreg X, return the bit offset where the subreg begins
3624 (counting from the least significant bit of the reg). */
3627 subreg_lsb (const_rtx x
)
3629 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3633 /* Return the subreg byte offset for a subreg whose outer value has
3634 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3635 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3636 lsb of the inner value. This is the inverse of the calculation
3637 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3640 subreg_size_offset_from_lsb (poly_uint64 outer_bytes
, poly_uint64 inner_bytes
,
3641 poly_uint64 lsb_shift
)
3643 /* A paradoxical subreg begins at bit position 0. */
3644 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3645 if (maybe_gt (outer_bytes
, inner_bytes
))
3647 gcc_checking_assert (known_eq (lsb_shift
, 0U));
3651 poly_uint64 lower_bytes
= exact_div (lsb_shift
, BITS_PER_UNIT
);
3652 poly_uint64 upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3653 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3655 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3659 /* When bytes and words have opposite endianness, we must be able
3660 to split offsets into words and bytes at compile time. */
3661 poly_uint64 lower_word_part
= force_align_down (lower_bytes
,
3663 poly_uint64 upper_word_part
= force_align_down (upper_bytes
,
3665 if (WORDS_BIG_ENDIAN
)
3666 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3668 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3672 /* Fill in information about a subreg of a hard register.
3673 xregno - A regno of an inner hard subreg_reg (or what will become one).
3674 xmode - The mode of xregno.
3675 offset - The byte offset.
3676 ymode - The mode of a top level SUBREG (or what may become one).
3677 info - Pointer to structure to fill in.
3679 Rather than considering one particular inner register (and thus one
3680 particular "outer" register) in isolation, this function really uses
3681 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3682 function does not check whether adding INFO->offset to XREGNO gives
3683 a valid hard register; even if INFO->offset + XREGNO is out of range,
3684 there might be another register of the same type that is in range.
3685 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3686 the new register, since that can depend on things like whether the final
3687 register number is even or odd. Callers that want to check whether
3688 this particular subreg can be replaced by a simple (reg ...) should
3689 use simplify_subreg_regno. */
3692 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3693 poly_uint64 offset
, machine_mode ymode
,
3694 struct subreg_info
*info
)
3696 unsigned int nregs_xmode
, nregs_ymode
;
3698 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3700 poly_uint64 xsize
= GET_MODE_SIZE (xmode
);
3701 poly_uint64 ysize
= GET_MODE_SIZE (ymode
);
3703 bool rknown
= false;
3705 /* If the register representation of a non-scalar mode has holes in it,
3706 we expect the scalar units to be concatenated together, with the holes
3707 distributed evenly among the scalar units. Each scalar unit must occupy
3708 at least one register. */
3709 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3711 /* As a consequence, we must be dealing with a constant number of
3712 scalars, and thus a constant offset and number of units. */
3713 HOST_WIDE_INT coffset
= offset
.to_constant ();
3714 HOST_WIDE_INT cysize
= ysize
.to_constant ();
3715 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3716 unsigned int nunits
= GET_MODE_NUNITS (xmode
).to_constant ();
3717 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
3718 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3719 gcc_assert (nregs_xmode
3721 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3722 gcc_assert (hard_regno_nregs (xregno
, xmode
)
3723 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
3725 /* You can only ask for a SUBREG of a value with holes in the middle
3726 if you don't cross the holes. (Such a SUBREG should be done by
3727 picking a different register class, or doing it in memory if
3728 necessary.) An example of a value with holes is XCmode on 32-bit
3729 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3730 3 for each part, but in memory it's two 128-bit parts.
3731 Padding is assumed to be at the end (not necessarily the 'high part')
3733 if ((coffset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3734 && (coffset
/ GET_MODE_SIZE (xmode_unit
)
3735 != ((coffset
+ cysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3737 info
->representable_p
= false;
3742 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
3744 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
3746 /* Subreg sizes must be ordered, so that we can tell whether they are
3747 partial, paradoxical or complete. */
3748 gcc_checking_assert (ordered_p (xsize
, ysize
));
3750 /* Paradoxical subregs are otherwise valid. */
3751 if (!rknown
&& known_eq (offset
, 0U) && maybe_gt (ysize
, xsize
))
3753 info
->representable_p
= true;
3754 /* If this is a big endian paradoxical subreg, which uses more
3755 actual hard registers than the original register, we must
3756 return a negative offset so that we find the proper highpart
3759 We assume that the ordering of registers within a multi-register
3760 value has a consistent endianness: if bytes and register words
3761 have different endianness, the hard registers that make up a
3762 multi-register value must be at least word-sized. */
3763 if (REG_WORDS_BIG_ENDIAN
)
3764 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3767 info
->nregs
= nregs_ymode
;
3771 /* If registers store different numbers of bits in the different
3772 modes, we cannot generally form this subreg. */
3773 poly_uint64 regsize_xmode
, regsize_ymode
;
3774 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3775 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3776 && multiple_p (xsize
, nregs_xmode
, ®size_xmode
)
3777 && multiple_p (ysize
, nregs_ymode
, ®size_ymode
))
3780 && ((nregs_ymode
> 1 && maybe_gt (regsize_xmode
, regsize_ymode
))
3781 || (nregs_xmode
> 1 && maybe_gt (regsize_ymode
, regsize_xmode
))))
3783 info
->representable_p
= false;
3784 if (!can_div_away_from_zero_p (ysize
, regsize_xmode
, &info
->nregs
)
3785 || !can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3786 /* Checked by validate_subreg. We must know at compile time
3787 which inner registers are being accessed. */
3791 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3792 would go outside of XMODE. */
3793 if (!rknown
&& maybe_gt (ysize
+ offset
, xsize
))
3795 info
->representable_p
= false;
3796 info
->nregs
= nregs_ymode
;
3797 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3798 /* Checked by validate_subreg. We must know at compile time
3799 which inner registers are being accessed. */
3803 /* Quick exit for the simple and common case of extracting whole
3804 subregisters from a multiregister value. */
3805 /* ??? It would be better to integrate this into the code below,
3806 if we can generalize the concept enough and figure out how
3807 odd-sized modes can coexist with the other weird cases we support. */
3808 HOST_WIDE_INT count
;
3810 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3811 && known_eq (regsize_xmode
, regsize_ymode
)
3812 && constant_multiple_p (offset
, regsize_ymode
, &count
))
3814 info
->representable_p
= true;
3815 info
->nregs
= nregs_ymode
;
3816 info
->offset
= count
;
3817 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3822 /* Lowpart subregs are otherwise valid. */
3823 if (!rknown
&& known_eq (offset
, subreg_lowpart_offset (ymode
, xmode
)))
3825 info
->representable_p
= true;
3828 if (known_eq (offset
, 0U) || nregs_xmode
== nregs_ymode
)
3831 info
->nregs
= nregs_ymode
;
3836 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3837 values there are in (reg:XMODE XREGNO). We can view the register
3838 as consisting of this number of independent "blocks", where each
3839 block occupies NREGS_YMODE registers and contains exactly one
3840 representable YMODE value. */
3841 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3842 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3844 /* Calculate the number of bytes in each block. This must always
3845 be exact, otherwise we don't know how to verify the constraint.
3846 These conditions may be relaxed but subreg_regno_offset would
3847 need to be redesigned. */
3848 poly_uint64 bytes_per_block
= exact_div (xsize
, num_blocks
);
3850 /* Get the number of the first block that contains the subreg and the byte
3851 offset of the subreg from the start of that block. */
3852 unsigned int block_number
;
3853 poly_uint64 subblock_offset
;
3854 if (!can_div_trunc_p (offset
, bytes_per_block
, &block_number
,
3856 /* Checked by validate_subreg. We must know at compile time which
3857 inner registers are being accessed. */
3862 /* Only the lowpart of each block is representable. */
3863 info
->representable_p
3864 = known_eq (subblock_offset
,
3865 subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3869 /* We assume that the ordering of registers within a multi-register
3870 value has a consistent endianness: if bytes and register words
3871 have different endianness, the hard registers that make up a
3872 multi-register value must be at least word-sized. */
3873 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3874 /* The block number we calculated above followed memory endianness.
3875 Convert it to register endianness by counting back from the end.
3876 (Note that, because of the assumption above, each block must be
3877 at least word-sized.) */
3878 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3880 info
->offset
= block_number
* nregs_ymode
;
3881 info
->nregs
= nregs_ymode
;
3884 /* This function returns the regno offset of a subreg expression.
3885 xregno - A regno of an inner hard subreg_reg (or what will become one).
3886 xmode - The mode of xregno.
3887 offset - The byte offset.
3888 ymode - The mode of a top level SUBREG (or what may become one).
3889 RETURN - The regno offset which would be used. */
3891 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3892 poly_uint64 offset
, machine_mode ymode
)
3894 struct subreg_info info
;
3895 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3899 /* This function returns true when the offset is representable via
3900 subreg_offset in the given regno.
3901 xregno - A regno of an inner hard subreg_reg (or what will become one).
3902 xmode - The mode of xregno.
3903 offset - The byte offset.
3904 ymode - The mode of a top level SUBREG (or what may become one).
3905 RETURN - Whether the offset is representable. */
3907 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3908 poly_uint64 offset
, machine_mode ymode
)
3910 struct subreg_info info
;
3911 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3912 return info
.representable_p
;
3915 /* Return the number of a YMODE register to which
3917 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3919 can be simplified. Return -1 if the subreg can't be simplified.
3921 XREGNO is a hard register number. */
3924 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3925 poly_uint64 offset
, machine_mode ymode
)
3927 struct subreg_info info
;
3928 unsigned int yregno
;
3930 /* Give the backend a chance to disallow the mode change. */
3931 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3932 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3933 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3934 /* We can use mode change in LRA for some transformations. */
3935 && ! lra_in_progress
)
3938 /* We shouldn't simplify stack-related registers. */
3939 if ((!reload_completed
|| frame_pointer_needed
)
3940 && xregno
== FRAME_POINTER_REGNUM
)
3943 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3944 && xregno
== ARG_POINTER_REGNUM
)
3947 if (xregno
== STACK_POINTER_REGNUM
3948 /* We should convert hard stack register in LRA if it is
3950 && ! lra_in_progress
)
3953 /* Try to get the register offset. */
3954 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3955 if (!info
.representable_p
)
3958 /* Make sure that the offsetted register value is in range. */
3959 yregno
= xregno
+ info
.offset
;
3960 if (!HARD_REGISTER_NUM_P (yregno
))
3963 /* See whether (reg:YMODE YREGNO) is valid.
3965 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3966 This is a kludge to work around how complex FP arguments are passed
3967 on IA-64 and should be fixed. See PR target/49226. */
3968 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
3969 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
3972 return (int) yregno
;
3975 /* Return the final regno that a subreg expression refers to. */
3977 subreg_regno (const_rtx x
)
3980 rtx subreg
= SUBREG_REG (x
);
3981 int regno
= REGNO (subreg
);
3983 ret
= regno
+ subreg_regno_offset (regno
,
3991 /* Return the number of registers that a subreg expression refers
3994 subreg_nregs (const_rtx x
)
3996 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3999 /* Return the number of registers that a subreg REG with REGNO
4000 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4001 changed so that the regno can be passed in. */
4004 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
4006 struct subreg_info info
;
4007 rtx subreg
= SUBREG_REG (x
);
4009 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
4014 struct parms_set_data
4020 /* Helper function for noticing stores to parameter registers. */
4022 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
4024 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
4025 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4026 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
4028 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
4033 /* Look backward for first parameter to be loaded.
4034 Note that loads of all parameters will not necessarily be
4035 found if CSE has eliminated some of them (e.g., an argument
4036 to the outer function is passed down as a parameter).
4037 Do not skip BOUNDARY. */
4039 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
4041 struct parms_set_data parm
;
4043 rtx_insn
*before
, *first_set
;
4045 /* Since different machines initialize their parameter registers
4046 in different orders, assume nothing. Collect the set of all
4047 parameter registers. */
4048 CLEAR_HARD_REG_SET (parm
.regs
);
4050 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
4051 if (GET_CODE (XEXP (p
, 0)) == USE
4052 && REG_P (XEXP (XEXP (p
, 0), 0))
4053 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
4055 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
4057 /* We only care about registers which can hold function
4059 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
4062 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4066 first_set
= call_insn
;
4068 /* Search backward for the first set of a register in this set. */
4069 while (parm
.nregs
&& before
!= boundary
)
4071 before
= PREV_INSN (before
);
4073 /* It is possible that some loads got CSEed from one call to
4074 another. Stop in that case. */
4075 if (CALL_P (before
))
4078 /* Our caller needs either ensure that we will find all sets
4079 (in case code has not been optimized yet), or take care
4080 for possible labels in a way by setting boundary to preceding
4082 if (LABEL_P (before
))
4084 gcc_assert (before
== boundary
);
4088 if (INSN_P (before
))
4090 int nregs_old
= parm
.nregs
;
4091 note_stores (PATTERN (before
), parms_set
, &parm
);
4092 /* If we found something that did not set a parameter reg,
4093 we're done. Do not keep going, as that might result
4094 in hoisting an insn before the setting of a pseudo
4095 that is used by the hoisted insn. */
4096 if (nregs_old
!= parm
.nregs
)
4105 /* Return true if we should avoid inserting code between INSN and preceding
4106 call instruction. */
4109 keep_with_call_p (const rtx_insn
*insn
)
4113 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4115 if (REG_P (SET_DEST (set
))
4116 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4117 && fixed_regs
[REGNO (SET_DEST (set
))]
4118 && general_operand (SET_SRC (set
), VOIDmode
))
4120 if (REG_P (SET_SRC (set
))
4121 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4122 && REG_P (SET_DEST (set
))
4123 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4125 /* There may be a stack pop just after the call and before the store
4126 of the return register. Search for the actual store when deciding
4127 if we can break or not. */
4128 if (SET_DEST (set
) == stack_pointer_rtx
)
4130 /* This CONST_CAST is okay because next_nonnote_insn just
4131 returns its argument and we assign it to a const_rtx
4134 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4135 if (i2
&& keep_with_call_p (i2
))
4142 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4143 to non-complex jumps. That is, direct unconditional, conditional,
4144 and tablejumps, but not computed jumps or returns. It also does
4145 not apply to the fallthru case of a conditional jump. */
4148 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4150 rtx tmp
= JUMP_LABEL (jump_insn
);
4151 rtx_jump_table_data
*table
;
4156 if (tablejump_p (jump_insn
, NULL
, &table
))
4158 rtvec vec
= table
->get_labels ();
4159 int i
, veclen
= GET_NUM_ELEM (vec
);
4161 for (i
= 0; i
< veclen
; ++i
)
4162 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4166 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4173 /* Return an estimate of the cost of computing rtx X.
4174 One use is in cse, to decide which expression to keep in the hash table.
4175 Another is in rtl generation, to pick the cheapest way to multiply.
4176 Other uses like the latter are expected in the future.
4178 X appears as operand OPNO in an expression with code OUTER_CODE.
4179 SPEED specifies whether costs optimized for speed or size should
4183 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4184 int opno
, bool speed
)
4195 if (GET_MODE (x
) != VOIDmode
)
4196 mode
= GET_MODE (x
);
4198 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4199 many insns, taking N times as long. */
4200 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4204 /* Compute the default costs of certain things.
4205 Note that targetm.rtx_costs can override the defaults. */
4207 code
= GET_CODE (x
);
4211 /* Multiplication has time-complexity O(N*N), where N is the
4212 number of units (translated from digits) when using
4213 schoolbook long multiplication. */
4214 total
= factor
* factor
* COSTS_N_INSNS (5);
4220 /* Similarly, complexity for schoolbook long division. */
4221 total
= factor
* factor
* COSTS_N_INSNS (7);
4224 /* Used in combine.c as a marker. */
4228 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4229 the mode for the factor. */
4230 mode
= GET_MODE (SET_DEST (x
));
4231 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4236 total
= factor
* COSTS_N_INSNS (1);
4246 /* If we can't tie these modes, make this expensive. The larger
4247 the mode, the more expensive it is. */
4248 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4249 return COSTS_N_INSNS (2 + factor
);
4253 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4260 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4265 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4266 which is already in total. */
4268 fmt
= GET_RTX_FORMAT (code
);
4269 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4271 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4272 else if (fmt
[i
] == 'E')
4273 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4274 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4279 /* Fill in the structure C with information about both speed and size rtx
4280 costs for X, which is operand OPNO in an expression with code OUTER. */
4283 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4284 struct full_rtx_costs
*c
)
4286 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4287 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4291 /* Return cost of address expression X.
4292 Expect that X is properly formed address reference.
4294 SPEED parameter specify whether costs optimized for speed or size should
4298 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4300 /* We may be asked for cost of various unusual addresses, such as operands
4301 of push instruction. It is not worthwhile to complicate writing
4302 of the target hook by such cases. */
4304 if (!memory_address_addr_space_p (mode
, x
, as
))
4307 return targetm
.address_cost (x
, mode
, as
, speed
);
4310 /* If the target doesn't override, compute the cost as with arithmetic. */
4313 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4315 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4319 unsigned HOST_WIDE_INT
4320 nonzero_bits (const_rtx x
, machine_mode mode
)
4322 if (mode
== VOIDmode
)
4323 mode
= GET_MODE (x
);
4324 scalar_int_mode int_mode
;
4325 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4326 return GET_MODE_MASK (mode
);
4327 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4331 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4333 if (mode
== VOIDmode
)
4334 mode
= GET_MODE (x
);
4335 scalar_int_mode int_mode
;
4336 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4338 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4341 /* Return true if nonzero_bits1 might recurse into both operands
4345 nonzero_bits_binary_arith_p (const_rtx x
)
4347 if (!ARITHMETIC_P (x
))
4349 switch (GET_CODE (x
))
4371 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4372 It avoids exponential behavior in nonzero_bits1 when X has
4373 identical subexpressions on the first or the second level. */
4375 static unsigned HOST_WIDE_INT
4376 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4377 machine_mode known_mode
,
4378 unsigned HOST_WIDE_INT known_ret
)
4380 if (x
== known_x
&& mode
== known_mode
)
4383 /* Try to find identical subexpressions. If found call
4384 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4385 precomputed value for the subexpression as KNOWN_RET. */
4387 if (nonzero_bits_binary_arith_p (x
))
4389 rtx x0
= XEXP (x
, 0);
4390 rtx x1
= XEXP (x
, 1);
4392 /* Check the first level. */
4394 return nonzero_bits1 (x
, mode
, x0
, mode
,
4395 cached_nonzero_bits (x0
, mode
, known_x
,
4396 known_mode
, known_ret
));
4398 /* Check the second level. */
4399 if (nonzero_bits_binary_arith_p (x0
)
4400 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4401 return nonzero_bits1 (x
, mode
, x1
, mode
,
4402 cached_nonzero_bits (x1
, mode
, known_x
,
4403 known_mode
, known_ret
));
4405 if (nonzero_bits_binary_arith_p (x1
)
4406 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4407 return nonzero_bits1 (x
, mode
, x0
, mode
,
4408 cached_nonzero_bits (x0
, mode
, known_x
,
4409 known_mode
, known_ret
));
4412 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4415 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4416 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4417 is less useful. We can't allow both, because that results in exponential
4418 run time recursion. There is a nullstone testcase that triggered
4419 this. This macro avoids accidental uses of num_sign_bit_copies. */
4420 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4422 /* Given an expression, X, compute which bits in X can be nonzero.
4423 We don't care about bits outside of those defined in MODE.
4425 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4426 an arithmetic operation, we can do better. */
4428 static unsigned HOST_WIDE_INT
4429 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4430 machine_mode known_mode
,
4431 unsigned HOST_WIDE_INT known_ret
)
4433 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4434 unsigned HOST_WIDE_INT inner_nz
;
4435 enum rtx_code code
= GET_CODE (x
);
4436 machine_mode inner_mode
;
4437 unsigned int inner_width
;
4438 scalar_int_mode xmode
;
4440 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4442 if (CONST_INT_P (x
))
4444 if (SHORT_IMMEDIATES_SIGN_EXTEND
4446 && mode_width
< BITS_PER_WORD
4447 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4448 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4453 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4455 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4457 /* If X is wider than MODE, use its mode instead. */
4458 if (xmode_width
> mode_width
)
4461 nonzero
= GET_MODE_MASK (mode
);
4462 mode_width
= xmode_width
;
4465 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4466 /* Our only callers in this case look for single bit values. So
4467 just return the mode mask. Those tests will then be false. */
4470 /* If MODE is wider than X, but both are a single word for both the host
4471 and target machines, we can compute this from which bits of the object
4472 might be nonzero in its own mode, taking into account the fact that, on
4473 CISC machines, accessing an object in a wider mode generally causes the
4474 high-order bits to become undefined, so they are not known to be zero.
4475 We extend this reasoning to RISC machines for rotate operations since the
4476 semantics of the operations in the larger mode is not well defined. */
4477 if (mode_width
> xmode_width
4478 && xmode_width
<= BITS_PER_WORD
4479 && xmode_width
<= HOST_BITS_PER_WIDE_INT
4480 && (!WORD_REGISTER_OPERATIONS
|| code
== ROTATE
|| code
== ROTATERT
))
4482 nonzero
&= cached_nonzero_bits (x
, xmode
,
4483 known_x
, known_mode
, known_ret
);
4484 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4488 /* Please keep nonzero_bits_binary_arith_p above in sync with
4489 the code in the switch below. */
4493 #if defined(POINTERS_EXTEND_UNSIGNED)
4494 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4495 all the bits above ptr_mode are known to be zero. */
4496 /* As we do not know which address space the pointer is referring to,
4497 we can do this only if the target does not support different pointer
4498 or address modes depending on the address space. */
4499 if (target_default_pointer_address_modes_p ()
4500 && POINTERS_EXTEND_UNSIGNED
4503 && !targetm
.have_ptr_extend ())
4504 nonzero
&= GET_MODE_MASK (ptr_mode
);
4507 /* Include declared information about alignment of pointers. */
4508 /* ??? We don't properly preserve REG_POINTER changes across
4509 pointer-to-integer casts, so we can't trust it except for
4510 things that we know must be pointers. See execute/960116-1.c. */
4511 if ((x
== stack_pointer_rtx
4512 || x
== frame_pointer_rtx
4513 || x
== arg_pointer_rtx
)
4514 && REGNO_POINTER_ALIGN (REGNO (x
)))
4516 unsigned HOST_WIDE_INT alignment
4517 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4519 #ifdef PUSH_ROUNDING
4520 /* If PUSH_ROUNDING is defined, it is possible for the
4521 stack to be momentarily aligned only to that amount,
4522 so we pick the least alignment. */
4523 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4525 poly_uint64 rounded_1
= PUSH_ROUNDING (poly_int64 (1));
4526 alignment
= MIN (known_alignment (rounded_1
), alignment
);
4530 nonzero
&= ~(alignment
- 1);
4534 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4535 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4539 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4540 known_mode
, known_ret
);
4542 return nonzero_for_hook
;
4546 /* In many, if not most, RISC machines, reading a byte from memory
4547 zeros the rest of the register. Noticing that fact saves a lot
4548 of extra zero-extends. */
4549 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4550 nonzero
&= GET_MODE_MASK (xmode
);
4554 case UNEQ
: case LTGT
:
4555 case GT
: case GTU
: case UNGT
:
4556 case LT
: case LTU
: case UNLT
:
4557 case GE
: case GEU
: case UNGE
:
4558 case LE
: case LEU
: case UNLE
:
4559 case UNORDERED
: case ORDERED
:
4560 /* If this produces an integer result, we know which bits are set.
4561 Code here used to clear bits outside the mode of X, but that is
4563 /* Mind that MODE is the mode the caller wants to look at this
4564 operation in, and not the actual operation mode. We can wind
4565 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4566 that describes the results of a vector compare. */
4567 if (GET_MODE_CLASS (xmode
) == MODE_INT
4568 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4569 nonzero
= STORE_FLAG_VALUE
;
4574 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4575 and num_sign_bit_copies. */
4576 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4580 if (xmode_width
< mode_width
)
4581 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4586 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4587 and num_sign_bit_copies. */
4588 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4594 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4595 known_x
, known_mode
, known_ret
)
4596 & GET_MODE_MASK (mode
));
4600 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4601 known_x
, known_mode
, known_ret
);
4602 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4603 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4607 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4608 Otherwise, show all the bits in the outer mode but not the inner
4610 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4611 known_x
, known_mode
, known_ret
);
4612 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4614 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4615 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4616 inner_nz
|= (GET_MODE_MASK (mode
)
4617 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4620 nonzero
&= inner_nz
;
4624 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4625 known_x
, known_mode
, known_ret
)
4626 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4627 known_x
, known_mode
, known_ret
);
4631 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4633 unsigned HOST_WIDE_INT nonzero0
4634 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4635 known_x
, known_mode
, known_ret
);
4637 /* Don't call nonzero_bits for the second time if it cannot change
4639 if ((nonzero
& nonzero0
) != nonzero
)
4641 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4642 known_x
, known_mode
, known_ret
);
4646 case PLUS
: case MINUS
:
4648 case DIV
: case UDIV
:
4649 case MOD
: case UMOD
:
4650 /* We can apply the rules of arithmetic to compute the number of
4651 high- and low-order zero bits of these operations. We start by
4652 computing the width (position of the highest-order nonzero bit)
4653 and the number of low-order zero bits for each value. */
4655 unsigned HOST_WIDE_INT nz0
4656 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4657 known_x
, known_mode
, known_ret
);
4658 unsigned HOST_WIDE_INT nz1
4659 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4660 known_x
, known_mode
, known_ret
);
4661 int sign_index
= xmode_width
- 1;
4662 int width0
= floor_log2 (nz0
) + 1;
4663 int width1
= floor_log2 (nz1
) + 1;
4664 int low0
= ctz_or_zero (nz0
);
4665 int low1
= ctz_or_zero (nz1
);
4666 unsigned HOST_WIDE_INT op0_maybe_minusp
4667 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4668 unsigned HOST_WIDE_INT op1_maybe_minusp
4669 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4670 unsigned int result_width
= mode_width
;
4676 result_width
= MAX (width0
, width1
) + 1;
4677 result_low
= MIN (low0
, low1
);
4680 result_low
= MIN (low0
, low1
);
4683 result_width
= width0
+ width1
;
4684 result_low
= low0
+ low1
;
4689 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4690 result_width
= width0
;
4695 result_width
= width0
;
4700 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4701 result_width
= MIN (width0
, width1
);
4702 result_low
= MIN (low0
, low1
);
4707 result_width
= MIN (width0
, width1
);
4708 result_low
= MIN (low0
, low1
);
4714 if (result_width
< mode_width
)
4715 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4718 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4723 if (CONST_INT_P (XEXP (x
, 1))
4724 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4725 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4729 /* If this is a SUBREG formed for a promoted variable that has
4730 been zero-extended, we know that at least the high-order bits
4731 are zero, though others might be too. */
4732 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4733 nonzero
= GET_MODE_MASK (xmode
)
4734 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
4735 known_x
, known_mode
, known_ret
);
4737 /* If the inner mode is a single word for both the host and target
4738 machines, we can compute this from which bits of the inner
4739 object might be nonzero. */
4740 inner_mode
= GET_MODE (SUBREG_REG (x
));
4741 if (GET_MODE_PRECISION (inner_mode
).is_constant (&inner_width
)
4742 && inner_width
<= BITS_PER_WORD
4743 && inner_width
<= HOST_BITS_PER_WIDE_INT
)
4745 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4746 known_x
, known_mode
, known_ret
);
4748 /* On many CISC machines, accessing an object in a wider mode
4749 causes the high-order bits to become undefined. So they are
4750 not known to be zero. */
4752 if ((!WORD_REGISTER_OPERATIONS
4753 /* If this is a typical RISC machine, we only have to worry
4754 about the way loads are extended. */
4755 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4756 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4757 : extend_op
!= ZERO_EXTEND
)
4758 || (!MEM_P (SUBREG_REG (x
)) && !REG_P (SUBREG_REG (x
))))
4759 && xmode_width
> inner_width
)
4761 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
4770 /* The nonzero bits are in two classes: any bits within MODE
4771 that aren't in xmode are always significant. The rest of the
4772 nonzero bits are those that are significant in the operand of
4773 the shift when shifted the appropriate number of bits. This
4774 shows that high-order bits are cleared by the right shift and
4775 low-order bits by left shifts. */
4776 if (CONST_INT_P (XEXP (x
, 1))
4777 && INTVAL (XEXP (x
, 1)) >= 0
4778 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4779 && INTVAL (XEXP (x
, 1)) < xmode_width
)
4781 int count
= INTVAL (XEXP (x
, 1));
4782 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
4783 unsigned HOST_WIDE_INT op_nonzero
4784 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4785 known_x
, known_mode
, known_ret
);
4786 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4787 unsigned HOST_WIDE_INT outer
= 0;
4789 if (mode_width
> xmode_width
)
4790 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4805 /* If the sign bit may have been nonzero before the shift, we
4806 need to mark all the places it could have been copied to
4807 by the shift as possibly nonzero. */
4808 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
4809 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
4810 << (xmode_width
- count
));
4814 inner
= (inner
<< (count
% xmode_width
)
4815 | (inner
>> (xmode_width
- (count
% xmode_width
))))
4820 inner
= (inner
>> (count
% xmode_width
)
4821 | (inner
<< (xmode_width
- (count
% xmode_width
))))
4829 nonzero
&= (outer
| inner
);
4835 /* This is at most the number of bits in the mode. */
4836 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4840 /* If CLZ has a known value at zero, then the nonzero bits are
4841 that value, plus the number of bits in the mode minus one. */
4842 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4844 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4850 /* If CTZ has a known value at zero, then the nonzero bits are
4851 that value, plus the number of bits in the mode minus one. */
4852 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4854 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4860 /* This is at most the number of bits in the mode minus 1. */
4861 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4870 unsigned HOST_WIDE_INT nonzero_true
4871 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4872 known_x
, known_mode
, known_ret
);
4874 /* Don't call nonzero_bits for the second time if it cannot change
4876 if ((nonzero
& nonzero_true
) != nonzero
)
4877 nonzero
&= nonzero_true
4878 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4879 known_x
, known_mode
, known_ret
);
4890 /* See the macro definition above. */
4891 #undef cached_num_sign_bit_copies
4894 /* Return true if num_sign_bit_copies1 might recurse into both operands
4898 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4900 if (!ARITHMETIC_P (x
))
4902 switch (GET_CODE (x
))
4920 /* The function cached_num_sign_bit_copies is a wrapper around
4921 num_sign_bit_copies1. It avoids exponential behavior in
4922 num_sign_bit_copies1 when X has identical subexpressions on the
4923 first or the second level. */
4926 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
4927 const_rtx known_x
, machine_mode known_mode
,
4928 unsigned int known_ret
)
4930 if (x
== known_x
&& mode
== known_mode
)
4933 /* Try to find identical subexpressions. If found call
4934 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4935 the precomputed value for the subexpression as KNOWN_RET. */
4937 if (num_sign_bit_copies_binary_arith_p (x
))
4939 rtx x0
= XEXP (x
, 0);
4940 rtx x1
= XEXP (x
, 1);
4942 /* Check the first level. */
4945 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4946 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4950 /* Check the second level. */
4951 if (num_sign_bit_copies_binary_arith_p (x0
)
4952 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4954 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4955 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4959 if (num_sign_bit_copies_binary_arith_p (x1
)
4960 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4962 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4963 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4968 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4971 /* Return the number of bits at the high-order end of X that are known to
4972 be equal to the sign bit. X will be used in mode MODE. The returned
4973 value will always be between 1 and the number of bits in MODE. */
4976 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4977 machine_mode known_mode
,
4978 unsigned int known_ret
)
4980 enum rtx_code code
= GET_CODE (x
);
4981 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4982 int num0
, num1
, result
;
4983 unsigned HOST_WIDE_INT nonzero
;
4985 if (CONST_INT_P (x
))
4987 /* If the constant is negative, take its 1's complement and remask.
4988 Then see how many zero bits we have. */
4989 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4990 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4991 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4992 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4994 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4997 scalar_int_mode xmode
, inner_mode
;
4998 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
5001 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
5003 /* For a smaller mode, just ignore the high bits. */
5004 if (bitwidth
< xmode_width
)
5006 num0
= cached_num_sign_bit_copies (x
, xmode
,
5007 known_x
, known_mode
, known_ret
);
5008 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
5011 if (bitwidth
> xmode_width
)
5013 /* If this machine does not do all register operations on the entire
5014 register and MODE is wider than the mode of X, we can say nothing
5015 at all about the high-order bits. We extend this reasoning to every
5016 machine for rotate operations since the semantics of the operations
5017 in the larger mode is not well defined. */
5018 if (!WORD_REGISTER_OPERATIONS
|| code
== ROTATE
|| code
== ROTATERT
)
5021 /* Likewise on machines that do, if the mode of the object is smaller
5022 than a word and loads of that size don't sign extend, we can say
5023 nothing about the high order bits. */
5024 if (xmode_width
< BITS_PER_WORD
5025 && load_extend_op (xmode
) != SIGN_EXTEND
)
5029 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5030 the code in the switch below. */
5035 #if defined(POINTERS_EXTEND_UNSIGNED)
5036 /* If pointers extend signed and this is a pointer in Pmode, say that
5037 all the bits above ptr_mode are known to be sign bit copies. */
5038 /* As we do not know which address space the pointer is referring to,
5039 we can do this only if the target does not support different pointer
5040 or address modes depending on the address space. */
5041 if (target_default_pointer_address_modes_p ()
5042 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
5043 && mode
== Pmode
&& REG_POINTER (x
)
5044 && !targetm
.have_ptr_extend ())
5045 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
5049 unsigned int copies_for_hook
= 1, copies
= 1;
5050 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
5054 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
5055 known_mode
, known_ret
);
5057 if (copies
> 1 || copies_for_hook
> 1)
5058 return MAX (copies
, copies_for_hook
);
5060 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5065 /* Some RISC machines sign-extend all loads of smaller than a word. */
5066 if (load_extend_op (xmode
) == SIGN_EXTEND
)
5067 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
5071 /* If this is a SUBREG for a promoted object that is sign-extended
5072 and we are looking at it in a wider mode, we know that at least the
5073 high-order bits are known to be sign bit copies. */
5075 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
5077 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5078 known_x
, known_mode
, known_ret
);
5079 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
5082 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
5084 /* For a smaller object, just ignore the high bits. */
5085 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
5087 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
5088 known_x
, known_mode
,
5090 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5094 /* For paradoxical SUBREGs on machines where all register operations
5095 affect the entire register, just look inside. Note that we are
5096 passing MODE to the recursive call, so the number of sign bit
5097 copies will remain relative to that mode, not the inner mode. */
5099 /* This works only if loads sign extend. Otherwise, if we get a
5100 reload for the inner part, it may be loaded from the stack, and
5101 then we lose all sign bit copies that existed before the store
5104 if (WORD_REGISTER_OPERATIONS
5105 && load_extend_op (inner_mode
) == SIGN_EXTEND
5106 && paradoxical_subreg_p (x
)
5107 && MEM_P (SUBREG_REG (x
)))
5108 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5109 known_x
, known_mode
, known_ret
);
5114 if (CONST_INT_P (XEXP (x
, 1)))
5115 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5119 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5120 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5121 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5122 known_x
, known_mode
, known_ret
));
5126 /* For a smaller object, just ignore the high bits. */
5127 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5128 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5129 known_x
, known_mode
, known_ret
);
5130 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5134 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5135 known_x
, known_mode
, known_ret
);
5137 case ROTATE
: case ROTATERT
:
5138 /* If we are rotating left by a number of bits less than the number
5139 of sign bit copies, we can just subtract that amount from the
5141 if (CONST_INT_P (XEXP (x
, 1))
5142 && INTVAL (XEXP (x
, 1)) >= 0
5143 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5145 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5146 known_x
, known_mode
, known_ret
);
5147 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5148 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5153 /* In general, this subtracts one sign bit copy. But if the value
5154 is known to be positive, the number of sign bit copies is the
5155 same as that of the input. Finally, if the input has just one bit
5156 that might be nonzero, all the bits are copies of the sign bit. */
5157 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5158 known_x
, known_mode
, known_ret
);
5159 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5160 return num0
> 1 ? num0
- 1 : 1;
5162 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5167 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5172 case IOR
: case AND
: case XOR
:
5173 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5174 /* Logical operations will preserve the number of sign-bit copies.
5175 MIN and MAX operations always return one of the operands. */
5176 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5177 known_x
, known_mode
, known_ret
);
5178 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5179 known_x
, known_mode
, known_ret
);
5181 /* If num1 is clearing some of the top bits then regardless of
5182 the other term, we are guaranteed to have at least that many
5183 high-order zero bits. */
5186 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5187 && CONST_INT_P (XEXP (x
, 1))
5188 && (UINTVAL (XEXP (x
, 1))
5189 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5192 /* Similarly for IOR when setting high-order bits. */
5195 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5196 && CONST_INT_P (XEXP (x
, 1))
5197 && (UINTVAL (XEXP (x
, 1))
5198 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5201 return MIN (num0
, num1
);
5203 case PLUS
: case MINUS
:
5204 /* For addition and subtraction, we can have a 1-bit carry. However,
5205 if we are subtracting 1 from a positive number, there will not
5206 be such a carry. Furthermore, if the positive number is known to
5207 be 0 or 1, we know the result is either -1 or 0. */
5209 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5210 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5212 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5213 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5214 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5215 : bitwidth
- floor_log2 (nonzero
) - 1);
5218 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5219 known_x
, known_mode
, known_ret
);
5220 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5221 known_x
, known_mode
, known_ret
);
5222 result
= MAX (1, MIN (num0
, num1
) - 1);
5227 /* The number of bits of the product is the sum of the number of
5228 bits of both terms. However, unless one of the terms if known
5229 to be positive, we must allow for an additional bit since negating
5230 a negative number can remove one sign bit copy. */
5232 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5233 known_x
, known_mode
, known_ret
);
5234 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5235 known_x
, known_mode
, known_ret
);
5237 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5239 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5240 || (((nonzero_bits (XEXP (x
, 0), mode
)
5241 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5242 && ((nonzero_bits (XEXP (x
, 1), mode
)
5243 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5247 return MAX (1, result
);
5250 /* The result must be <= the first operand. If the first operand
5251 has the high bit set, we know nothing about the number of sign
5253 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5255 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5256 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5259 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5260 known_x
, known_mode
, known_ret
);
5263 /* The result must be <= the second operand. If the second operand
5264 has (or just might have) the high bit set, we know nothing about
5265 the number of sign bit copies. */
5266 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5268 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5269 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5272 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5273 known_x
, known_mode
, known_ret
);
5276 /* Similar to unsigned division, except that we have to worry about
5277 the case where the divisor is negative, in which case we have
5279 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5280 known_x
, known_mode
, known_ret
);
5282 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5283 || (nonzero_bits (XEXP (x
, 1), mode
)
5284 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5290 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5291 known_x
, known_mode
, known_ret
);
5293 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5294 || (nonzero_bits (XEXP (x
, 1), mode
)
5295 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5301 /* Shifts by a constant add to the number of bits equal to the
5303 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5304 known_x
, known_mode
, known_ret
);
5305 if (CONST_INT_P (XEXP (x
, 1))
5306 && INTVAL (XEXP (x
, 1)) > 0
5307 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5308 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5313 /* Left shifts destroy copies. */
5314 if (!CONST_INT_P (XEXP (x
, 1))
5315 || INTVAL (XEXP (x
, 1)) < 0
5316 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5317 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5320 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5321 known_x
, known_mode
, known_ret
);
5322 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5325 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5326 known_x
, known_mode
, known_ret
);
5327 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5328 known_x
, known_mode
, known_ret
);
5329 return MIN (num0
, num1
);
5331 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5332 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5333 case GEU
: case GTU
: case LEU
: case LTU
:
5334 case UNORDERED
: case ORDERED
:
5335 /* If the constant is negative, take its 1's complement and remask.
5336 Then see how many zero bits we have. */
5337 nonzero
= STORE_FLAG_VALUE
;
5338 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5339 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5340 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5342 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5348 /* If we haven't been able to figure it out by one of the above rules,
5349 see if some of the high-order bits are known to be zero. If so,
5350 count those bits and return one less than that amount. If we can't
5351 safely compute the mask for this mode, always return BITWIDTH. */
5353 bitwidth
= GET_MODE_PRECISION (mode
);
5354 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5357 nonzero
= nonzero_bits (x
, mode
);
5358 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5359 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5362 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5363 zero indicates an instruction pattern without a known cost. */
5366 pattern_cost (rtx pat
, bool speed
)
5371 /* Extract the single set rtx from the instruction pattern. We
5372 can't use single_set since we only have the pattern. We also
5373 consider PARALLELs of a normal set and a single comparison. In
5374 that case we use the cost of the non-comparison SET operation,
5375 which is most-likely to be the real cost of this operation. */
5376 if (GET_CODE (pat
) == SET
)
5378 else if (GET_CODE (pat
) == PARALLEL
)
5381 rtx comparison
= NULL_RTX
;
5383 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5385 rtx x
= XVECEXP (pat
, 0, i
);
5386 if (GET_CODE (x
) == SET
)
5388 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5403 if (!set
&& comparison
)
5412 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5413 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5416 /* Calculate the cost of a single instruction. A return value of zero
5417 indicates an instruction pattern without a known cost. */
5420 insn_cost (rtx_insn
*insn
, bool speed
)
5422 if (targetm
.insn_cost
)
5423 return targetm
.insn_cost (insn
, speed
);
5425 return pattern_cost (PATTERN (insn
), speed
);
5428 /* Returns estimate on cost of computing SEQ. */
5431 seq_cost (const rtx_insn
*seq
, bool speed
)
5436 for (; seq
; seq
= NEXT_INSN (seq
))
5438 set
= single_set (seq
);
5440 cost
+= set_rtx_cost (set
, speed
);
5441 else if (NONDEBUG_INSN_P (seq
))
5443 int this_cost
= insn_cost (CONST_CAST_RTX_INSN (seq
), speed
);
5454 /* Given an insn INSN and condition COND, return the condition in a
5455 canonical form to simplify testing by callers. Specifically:
5457 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5458 (2) Both operands will be machine operands; (cc0) will have been replaced.
5459 (3) If an operand is a constant, it will be the second operand.
5460 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5461 for GE, GEU, and LEU.
5463 If the condition cannot be understood, or is an inequality floating-point
5464 comparison which needs to be reversed, 0 will be returned.
5466 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5468 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5469 insn used in locating the condition was found. If a replacement test
5470 of the condition is desired, it should be placed in front of that
5471 insn and we will be sure that the inputs are still valid.
5473 If WANT_REG is nonzero, we wish the condition to be relative to that
5474 register, if possible. Therefore, do not canonicalize the condition
5475 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5476 to be a compare to a CC mode register.
5478 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5482 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5483 rtx_insn
**earliest
,
5484 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5487 rtx_insn
*prev
= insn
;
5491 int reverse_code
= 0;
5493 basic_block bb
= BLOCK_FOR_INSN (insn
);
5495 code
= GET_CODE (cond
);
5496 mode
= GET_MODE (cond
);
5497 op0
= XEXP (cond
, 0);
5498 op1
= XEXP (cond
, 1);
5501 code
= reversed_comparison_code (cond
, insn
);
5502 if (code
== UNKNOWN
)
5508 /* If we are comparing a register with zero, see if the register is set
5509 in the previous insn to a COMPARE or a comparison operation. Perform
5510 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5513 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5514 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5515 && op1
== CONST0_RTX (GET_MODE (op0
))
5518 /* Set nonzero when we find something of interest. */
5521 /* If comparison with cc0, import actual comparison from compare
5525 if ((prev
= prev_nonnote_insn (prev
)) == 0
5526 || !NONJUMP_INSN_P (prev
)
5527 || (set
= single_set (prev
)) == 0
5528 || SET_DEST (set
) != cc0_rtx
)
5531 op0
= SET_SRC (set
);
5532 op1
= CONST0_RTX (GET_MODE (op0
));
5537 /* If this is a COMPARE, pick up the two things being compared. */
5538 if (GET_CODE (op0
) == COMPARE
)
5540 op1
= XEXP (op0
, 1);
5541 op0
= XEXP (op0
, 0);
5544 else if (!REG_P (op0
))
5547 /* Go back to the previous insn. Stop if it is not an INSN. We also
5548 stop if it isn't a single set or if it has a REG_INC note because
5549 we don't want to bother dealing with it. */
5551 prev
= prev_nonnote_nondebug_insn (prev
);
5554 || !NONJUMP_INSN_P (prev
)
5555 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5556 /* In cfglayout mode, there do not have to be labels at the
5557 beginning of a block, or jumps at the end, so the previous
5558 conditions would not stop us when we reach bb boundary. */
5559 || BLOCK_FOR_INSN (prev
) != bb
)
5562 set
= set_of (op0
, prev
);
5565 && (GET_CODE (set
) != SET
5566 || !rtx_equal_p (SET_DEST (set
), op0
)))
5569 /* If this is setting OP0, get what it sets it to if it looks
5573 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5574 #ifdef FLOAT_STORE_FLAG_VALUE
5575 REAL_VALUE_TYPE fsfv
;
5578 /* ??? We may not combine comparisons done in a CCmode with
5579 comparisons not done in a CCmode. This is to aid targets
5580 like Alpha that have an IEEE compliant EQ instruction, and
5581 a non-IEEE compliant BEQ instruction. The use of CCmode is
5582 actually artificial, simply to prevent the combination, but
5583 should not affect other platforms.
5585 However, we must allow VOIDmode comparisons to match either
5586 CCmode or non-CCmode comparison, because some ports have
5587 modeless comparisons inside branch patterns.
5589 ??? This mode check should perhaps look more like the mode check
5590 in simplify_comparison in combine. */
5591 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5592 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5594 && inner_mode
!= VOIDmode
)
5596 if (GET_CODE (SET_SRC (set
)) == COMPARE
5599 && val_signbit_known_set_p (inner_mode
,
5601 #ifdef FLOAT_STORE_FLAG_VALUE
5603 && SCALAR_FLOAT_MODE_P (inner_mode
)
5604 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5605 REAL_VALUE_NEGATIVE (fsfv
)))
5608 && COMPARISON_P (SET_SRC (set
))))
5610 else if (((code
== EQ
5612 && val_signbit_known_set_p (inner_mode
,
5614 #ifdef FLOAT_STORE_FLAG_VALUE
5616 && SCALAR_FLOAT_MODE_P (inner_mode
)
5617 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5618 REAL_VALUE_NEGATIVE (fsfv
)))
5621 && COMPARISON_P (SET_SRC (set
)))
5626 else if ((code
== EQ
|| code
== NE
)
5627 && GET_CODE (SET_SRC (set
)) == XOR
)
5628 /* Handle sequences like:
5631 ...(eq|ne op0 (const_int 0))...
5635 (eq op0 (const_int 0)) reduces to (eq X Y)
5636 (ne op0 (const_int 0)) reduces to (ne X Y)
5638 This is the form used by MIPS16, for example. */
5644 else if (reg_set_p (op0
, prev
))
5645 /* If this sets OP0, but not directly, we have to give up. */
5650 /* If the caller is expecting the condition to be valid at INSN,
5651 make sure X doesn't change before INSN. */
5652 if (valid_at_insn_p
)
5653 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5655 if (COMPARISON_P (x
))
5656 code
= GET_CODE (x
);
5659 code
= reversed_comparison_code (x
, prev
);
5660 if (code
== UNKNOWN
)
5665 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5671 /* If constant is first, put it last. */
5672 if (CONSTANT_P (op0
))
5673 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5675 /* If OP0 is the result of a comparison, we weren't able to find what
5676 was really being compared, so fail. */
5678 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5681 /* Canonicalize any ordered comparison with integers involving equality
5682 if we can do computations in the relevant mode and we do not
5685 scalar_int_mode op0_mode
;
5686 if (CONST_INT_P (op1
)
5687 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
5688 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
5690 HOST_WIDE_INT const_val
= INTVAL (op1
);
5691 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5692 unsigned HOST_WIDE_INT max_val
5693 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
5698 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5699 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
5702 /* When cross-compiling, const_val might be sign-extended from
5703 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5705 if ((const_val
& max_val
)
5706 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
5707 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
5711 if (uconst_val
< max_val
)
5712 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
5716 if (uconst_val
!= 0)
5717 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
5725 /* Never return CC0; return zero instead. */
5729 /* We promised to return a comparison. */
5730 rtx ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5731 if (COMPARISON_P (ret
))
5736 /* Given a jump insn JUMP, return the condition that will cause it to branch
5737 to its JUMP_LABEL. If the condition cannot be understood, or is an
5738 inequality floating-point comparison which needs to be reversed, 0 will
5741 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5742 insn used in locating the condition was found. If a replacement test
5743 of the condition is desired, it should be placed in front of that
5744 insn and we will be sure that the inputs are still valid. If EARLIEST
5745 is null, the returned condition will be valid at INSN.
5747 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5748 compare CC mode register.
5750 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5753 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5754 int valid_at_insn_p
)
5760 /* If this is not a standard conditional jump, we can't parse it. */
5762 || ! any_condjump_p (jump
))
5764 set
= pc_set (jump
);
5766 cond
= XEXP (SET_SRC (set
), 0);
5768 /* If this branches to JUMP_LABEL when the condition is false, reverse
5771 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5772 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5774 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5775 allow_cc_mode
, valid_at_insn_p
);
5778 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5779 TARGET_MODE_REP_EXTENDED.
5781 Note that we assume that the property of
5782 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5783 narrower than mode B. I.e., if A is a mode narrower than B then in
5784 order to be able to operate on it in mode B, mode A needs to
5785 satisfy the requirements set by the representation of mode B. */
5788 init_num_sign_bit_copies_in_rep (void)
5790 opt_scalar_int_mode in_mode_iter
;
5791 scalar_int_mode mode
;
5793 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
5794 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
5796 scalar_int_mode in_mode
= in_mode_iter
.require ();
5799 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5800 extends to the next widest mode. */
5801 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5802 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
5804 /* We are in in_mode. Count how many bits outside of mode
5805 have to be copies of the sign-bit. */
5806 FOR_EACH_MODE (i
, mode
, in_mode
)
5808 /* This must always exist (for the last iteration it will be
5810 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
5812 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5813 /* We can only check sign-bit copies starting from the
5814 top-bit. In order to be able to check the bits we
5815 have already seen we pretend that subsequent bits
5816 have to be sign-bit copies too. */
5817 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5818 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5819 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5824 /* Suppose that truncation from the machine mode of X to MODE is not a
5825 no-op. See if there is anything special about X so that we can
5826 assume it already contains a truncated value of MODE. */
5829 truncated_to_mode (machine_mode mode
, const_rtx x
)
5831 /* This register has already been used in MODE without explicit
5833 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5836 /* See if we already satisfy the requirements of MODE. If yes we
5837 can just switch to MODE. */
5838 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5839 && (num_sign_bit_copies (x
, GET_MODE (x
))
5840 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5846 /* Return true if RTX code CODE has a single sequence of zero or more
5847 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5848 entry in that case. */
5851 setup_reg_subrtx_bounds (unsigned int code
)
5853 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5855 for (; format
[i
] != 'e'; ++i
)
5858 /* No subrtxes. Leave start and count as 0. */
5860 if (format
[i
] == 'E' || format
[i
] == 'V')
5864 /* Record the sequence of 'e's. */
5865 rtx_all_subrtx_bounds
[code
].start
= i
;
5868 while (format
[i
] == 'e');
5869 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5870 /* rtl-iter.h relies on this. */
5871 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5873 for (; format
[i
]; ++i
)
5874 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5880 /* Initialize rtx_all_subrtx_bounds. */
5885 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5887 if (!setup_reg_subrtx_bounds (i
))
5888 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5889 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5890 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5893 init_num_sign_bit_copies_in_rep ();
5896 /* Check whether this is a constant pool constant. */
5898 constant_pool_constant_p (rtx x
)
5900 x
= avoid_constant_pool_reference (x
);
5901 return CONST_DOUBLE_P (x
);
5904 /* If M is a bitmask that selects a field of low-order bits within an item but
5905 not the entire word, return the length of the field. Return -1 otherwise.
5906 M is used in machine mode MODE. */
5909 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5911 if (mode
!= VOIDmode
)
5913 if (!HWI_COMPUTABLE_MODE_P (mode
))
5915 m
&= GET_MODE_MASK (mode
);
5918 return exact_log2 (m
+ 1);
5921 /* Return the mode of MEM's address. */
5924 get_address_mode (rtx mem
)
5928 gcc_assert (MEM_P (mem
));
5929 mode
= GET_MODE (XEXP (mem
, 0));
5930 if (mode
!= VOIDmode
)
5931 return as_a
<scalar_int_mode
> (mode
);
5932 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5935 /* Split up a CONST_DOUBLE or integer constant rtx
5936 into two rtx's for single words,
5937 storing in *FIRST the word that comes first in memory in the target
5938 and in *SECOND the other.
5940 TODO: This function needs to be rewritten to work on any size
5944 split_double (rtx value
, rtx
*first
, rtx
*second
)
5946 if (CONST_INT_P (value
))
5948 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5950 /* In this case the CONST_INT holds both target words.
5951 Extract the bits from it into two word-sized pieces.
5952 Sign extend each half to HOST_WIDE_INT. */
5953 unsigned HOST_WIDE_INT low
, high
;
5954 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5955 unsigned bits_per_word
= BITS_PER_WORD
;
5957 /* Set sign_bit to the most significant bit of a word. */
5959 sign_bit
<<= bits_per_word
- 1;
5961 /* Set mask so that all bits of the word are set. We could
5962 have used 1 << BITS_PER_WORD instead of basing the
5963 calculation on sign_bit. However, on machines where
5964 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5965 compiler warning, even though the code would never be
5967 mask
= sign_bit
<< 1;
5970 /* Set sign_extend as any remaining bits. */
5971 sign_extend
= ~mask
;
5973 /* Pick the lower word and sign-extend it. */
5974 low
= INTVAL (value
);
5979 /* Pick the higher word, shifted to the least significant
5980 bits, and sign-extend it. */
5981 high
= INTVAL (value
);
5982 high
>>= bits_per_word
- 1;
5985 if (high
& sign_bit
)
5986 high
|= sign_extend
;
5988 /* Store the words in the target machine order. */
5989 if (WORDS_BIG_ENDIAN
)
5991 *first
= GEN_INT (high
);
5992 *second
= GEN_INT (low
);
5996 *first
= GEN_INT (low
);
5997 *second
= GEN_INT (high
);
6002 /* The rule for using CONST_INT for a wider mode
6003 is that we regard the value as signed.
6004 So sign-extend it. */
6005 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
6006 if (WORDS_BIG_ENDIAN
)
6018 else if (GET_CODE (value
) == CONST_WIDE_INT
)
6020 /* All of this is scary code and needs to be converted to
6021 properly work with any size integer. */
6022 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
6023 if (WORDS_BIG_ENDIAN
)
6025 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6026 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6030 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6031 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6034 else if (!CONST_DOUBLE_P (value
))
6036 if (WORDS_BIG_ENDIAN
)
6038 *first
= const0_rtx
;
6044 *second
= const0_rtx
;
6047 else if (GET_MODE (value
) == VOIDmode
6048 /* This is the old way we did CONST_DOUBLE integers. */
6049 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
6051 /* In an integer, the words are defined as most and least significant.
6052 So order them by the target's convention. */
6053 if (WORDS_BIG_ENDIAN
)
6055 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6056 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
6060 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
6061 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6068 /* Note, this converts the REAL_VALUE_TYPE to the target's
6069 format, splits up the floating point double and outputs
6070 exactly 32 bits of it into each of l[0] and l[1] --
6071 not necessarily BITS_PER_WORD bits. */
6072 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
6074 /* If 32 bits is an entire word for the target, but not for the host,
6075 then sign-extend on the host so that the number will look the same
6076 way on the host that it would on the target. See for instance
6077 simplify_unary_operation. The #if is needed to avoid compiler
6080 #if HOST_BITS_PER_LONG > 32
6081 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
6083 if (l
[0] & ((long) 1 << 31))
6084 l
[0] |= ((unsigned long) (-1) << 32);
6085 if (l
[1] & ((long) 1 << 31))
6086 l
[1] |= ((unsigned long) (-1) << 32);
6090 *first
= GEN_INT (l
[0]);
6091 *second
= GEN_INT (l
[1]);
6095 /* Return true if X is a sign_extract or zero_extract from the least
6099 lsb_bitfield_op_p (rtx x
)
6101 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
6103 machine_mode mode
= GET_MODE (XEXP (x
, 0));
6104 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
6105 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
6106 poly_int64 remaining_bits
= GET_MODE_PRECISION (mode
) - len
;
6108 return known_eq (pos
, BITS_BIG_ENDIAN
? remaining_bits
: 0);
6113 /* Strip outer address "mutations" from LOC and return a pointer to the
6114 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6115 stripped expression there.
6117 "Mutations" either convert between modes or apply some kind of
6118 extension, truncation or alignment. */
6121 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6125 enum rtx_code code
= GET_CODE (*loc
);
6126 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6127 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6128 used to convert between pointer sizes. */
6129 loc
= &XEXP (*loc
, 0);
6130 else if (lsb_bitfield_op_p (*loc
))
6131 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6132 acts as a combined truncation and extension. */
6133 loc
= &XEXP (*loc
, 0);
6134 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6135 /* (and ... (const_int -X)) is used to align to X bytes. */
6136 loc
= &XEXP (*loc
, 0);
6137 else if (code
== SUBREG
6138 && !OBJECT_P (SUBREG_REG (*loc
))
6139 && subreg_lowpart_p (*loc
))
6140 /* (subreg (operator ...) ...) inside and is used for mode
6142 loc
= &SUBREG_REG (*loc
);
6150 /* Return true if CODE applies some kind of scale. The scaled value is
6151 is the first operand and the scale is the second. */
6154 binary_scale_code_p (enum rtx_code code
)
6156 return (code
== MULT
6158 /* Needed by ARM targets. */
6162 || code
== ROTATERT
);
6165 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6166 (see address_info). Return null otherwise. */
6169 get_base_term (rtx
*inner
)
6171 if (GET_CODE (*inner
) == LO_SUM
)
6172 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6175 || GET_CODE (*inner
) == SUBREG
6176 || GET_CODE (*inner
) == SCRATCH
)
6181 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6182 (see address_info). Return null otherwise. */
6185 get_index_term (rtx
*inner
)
6187 /* At present, only constant scales are allowed. */
6188 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6189 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6192 || GET_CODE (*inner
) == SUBREG
6193 || GET_CODE (*inner
) == SCRATCH
)
6198 /* Set the segment part of address INFO to LOC, given that INNER is the
6202 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6204 gcc_assert (!info
->segment
);
6205 info
->segment
= loc
;
6206 info
->segment_term
= inner
;
6209 /* Set the base part of address INFO to LOC, given that INNER is the
6213 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6215 gcc_assert (!info
->base
);
6217 info
->base_term
= inner
;
6220 /* Set the index part of address INFO to LOC, given that INNER is the
6224 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6226 gcc_assert (!info
->index
);
6228 info
->index_term
= inner
;
6231 /* Set the displacement part of address INFO to LOC, given that INNER
6232 is the constant term. */
6235 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6237 gcc_assert (!info
->disp
);
6239 info
->disp_term
= inner
;
6242 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6243 rest of INFO accordingly. */
6246 decompose_incdec_address (struct address_info
*info
)
6248 info
->autoinc_p
= true;
6250 rtx
*base
= &XEXP (*info
->inner
, 0);
6251 set_address_base (info
, base
, base
);
6252 gcc_checking_assert (info
->base
== info
->base_term
);
6254 /* These addresses are only valid when the size of the addressed
6256 gcc_checking_assert (info
->mode
!= VOIDmode
);
6259 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6260 of INFO accordingly. */
6263 decompose_automod_address (struct address_info
*info
)
6265 info
->autoinc_p
= true;
6267 rtx
*base
= &XEXP (*info
->inner
, 0);
6268 set_address_base (info
, base
, base
);
6269 gcc_checking_assert (info
->base
== info
->base_term
);
6271 rtx plus
= XEXP (*info
->inner
, 1);
6272 gcc_assert (GET_CODE (plus
) == PLUS
);
6274 info
->base_term2
= &XEXP (plus
, 0);
6275 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6277 rtx
*step
= &XEXP (plus
, 1);
6278 rtx
*inner_step
= strip_address_mutations (step
);
6279 if (CONSTANT_P (*inner_step
))
6280 set_address_disp (info
, step
, inner_step
);
6282 set_address_index (info
, step
, inner_step
);
6285 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6286 values in [PTR, END). Return a pointer to the end of the used array. */
6289 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6292 if (GET_CODE (x
) == PLUS
)
6294 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6295 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6299 gcc_assert (ptr
!= end
);
6305 /* Evaluate the likelihood of X being a base or index value, returning
6306 positive if it is likely to be a base, negative if it is likely to be
6307 an index, and 0 if we can't tell. Make the magnitude of the return
6308 value reflect the amount of confidence we have in the answer.
6310 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6313 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6314 enum rtx_code outer_code
, enum rtx_code index_code
)
6316 /* Believe *_POINTER unless the address shape requires otherwise. */
6317 if (REG_P (x
) && REG_POINTER (x
))
6319 if (MEM_P (x
) && MEM_POINTER (x
))
6322 if (REG_P (x
) && HARD_REGISTER_P (x
))
6324 /* X is a hard register. If it only fits one of the base
6325 or index classes, choose that interpretation. */
6326 int regno
= REGNO (x
);
6327 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6328 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6329 if (base_p
!= index_p
)
6330 return base_p
? 1 : -1;
6335 /* INFO->INNER describes a normal, non-automodified address.
6336 Fill in the rest of INFO accordingly. */
6339 decompose_normal_address (struct address_info
*info
)
6341 /* Treat the address as the sum of up to four values. */
6343 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6344 ops
+ ARRAY_SIZE (ops
)) - ops
;
6346 /* If there is more than one component, any base component is in a PLUS. */
6348 info
->base_outer_code
= PLUS
;
6350 /* Try to classify each sum operand now. Leave those that could be
6351 either a base or an index in OPS. */
6354 for (size_t in
= 0; in
< n_ops
; ++in
)
6357 rtx
*inner
= strip_address_mutations (loc
);
6358 if (CONSTANT_P (*inner
))
6359 set_address_disp (info
, loc
, inner
);
6360 else if (GET_CODE (*inner
) == UNSPEC
)
6361 set_address_segment (info
, loc
, inner
);
6364 /* The only other possibilities are a base or an index. */
6365 rtx
*base_term
= get_base_term (inner
);
6366 rtx
*index_term
= get_index_term (inner
);
6367 gcc_assert (base_term
|| index_term
);
6369 set_address_index (info
, loc
, index_term
);
6370 else if (!index_term
)
6371 set_address_base (info
, loc
, base_term
);
6374 gcc_assert (base_term
== index_term
);
6376 inner_ops
[out
] = base_term
;
6382 /* Classify the remaining OPS members as bases and indexes. */
6385 /* If we haven't seen a base or an index yet, assume that this is
6386 the base. If we were confident that another term was the base
6387 or index, treat the remaining operand as the other kind. */
6389 set_address_base (info
, ops
[0], inner_ops
[0]);
6391 set_address_index (info
, ops
[0], inner_ops
[0]);
6395 /* In the event of a tie, assume the base comes first. */
6396 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6398 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6399 GET_CODE (*ops
[0])))
6401 set_address_base (info
, ops
[0], inner_ops
[0]);
6402 set_address_index (info
, ops
[1], inner_ops
[1]);
6406 set_address_base (info
, ops
[1], inner_ops
[1]);
6407 set_address_index (info
, ops
[0], inner_ops
[0]);
6411 gcc_assert (out
== 0);
6414 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6415 or VOIDmode if not known. AS is the address space associated with LOC.
6416 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6419 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6420 addr_space_t as
, enum rtx_code outer_code
)
6422 memset (info
, 0, sizeof (*info
));
6425 info
->addr_outer_code
= outer_code
;
6427 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6428 info
->base_outer_code
= outer_code
;
6429 switch (GET_CODE (*info
->inner
))
6435 decompose_incdec_address (info
);
6440 decompose_automod_address (info
);
6444 decompose_normal_address (info
);
6449 /* Describe address operand LOC in INFO. */
6452 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6454 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6457 /* Describe the address of MEM X in INFO. */
6460 decompose_mem_address (struct address_info
*info
, rtx x
)
6462 gcc_assert (MEM_P (x
));
6463 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6464 MEM_ADDR_SPACE (x
), MEM
);
6467 /* Update INFO after a change to the address it describes. */
6470 update_address (struct address_info
*info
)
6472 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6473 info
->addr_outer_code
);
6476 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6477 more complicated than that. */
6480 get_index_scale (const struct address_info
*info
)
6482 rtx index
= *info
->index
;
6483 if (GET_CODE (index
) == MULT
6484 && CONST_INT_P (XEXP (index
, 1))
6485 && info
->index_term
== &XEXP (index
, 0))
6486 return INTVAL (XEXP (index
, 1));
6488 if (GET_CODE (index
) == ASHIFT
6489 && CONST_INT_P (XEXP (index
, 1))
6490 && info
->index_term
== &XEXP (index
, 0))
6491 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6493 if (info
->index
== info
->index_term
)
6499 /* Return the "index code" of INFO, in the form required by
6503 get_index_code (const struct address_info
*info
)
6506 return GET_CODE (*info
->index
);
6509 return GET_CODE (*info
->disp
);
6514 /* Return true if RTL X contains a SYMBOL_REF. */
6517 contains_symbol_ref_p (const_rtx x
)
6519 subrtx_iterator::array_type array
;
6520 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6521 if (SYMBOL_REF_P (*iter
))
6527 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6530 contains_symbolic_reference_p (const_rtx x
)
6532 subrtx_iterator::array_type array
;
6533 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6534 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6540 /* Return true if X contains a thread-local symbol. */
6543 tls_referenced_p (const_rtx x
)
6545 if (!targetm
.have_tls
)
6548 subrtx_iterator::array_type array
;
6549 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6550 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)