1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx
, const_rtx
, void *);
41 static bool covers_regno_p (const_rtx
, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
43 static int computed_jump_p_1 (const_rtx
);
44 static void parms_set (rtx
, const_rtx
, void *);
46 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
47 const_rtx
, machine_mode
,
48 unsigned HOST_WIDE_INT
);
49 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
53 const_rtx
, machine_mode
,
55 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
56 const_rtx
, machine_mode
,
59 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
60 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
62 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
63 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
64 SIGN_EXTEND then while narrowing we also have to enforce the
65 representation and sign-extend the value to mode DESTINATION_REP.
67 If the value is already sign-extended to DESTINATION_REP mode we
68 can just switch to DESTINATION mode on it. For each pair of
69 integral modes SOURCE and DESTINATION, when truncating from SOURCE
70 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
71 contains the number of high-order bits in SOURCE that have to be
72 copies of the sign-bit so that we can do this mode-switch to
76 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
78 /* Store X into index I of ARRAY. ARRAY is known to have at least I
79 elements. Return the new base of ARRAY. */
82 typename
T::value_type
*
83 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
85 size_t i
, value_type x
)
87 if (base
== array
.stack
)
94 gcc_checking_assert (i
== LOCAL_ELEMS
);
95 /* A previous iteration might also have moved from the stack to the
96 heap, in which case the heap array will already be big enough. */
97 if (vec_safe_length (array
.heap
) <= i
)
98 vec_safe_grow (array
.heap
, i
+ 1);
99 base
= array
.heap
->address ();
100 memcpy (base
, array
.stack
, sizeof (array
.stack
));
101 base
[LOCAL_ELEMS
] = x
;
104 unsigned int length
= array
.heap
->length ();
107 gcc_checking_assert (base
== array
.heap
->address ());
113 gcc_checking_assert (i
== length
);
114 vec_safe_push (array
.heap
, x
);
115 return array
.heap
->address ();
119 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
120 number of elements added to the worklist. */
122 template <typename T
>
124 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
126 size_t end
, rtx_type x
)
128 enum rtx_code code
= GET_CODE (x
);
129 const char *format
= GET_RTX_FORMAT (code
);
130 size_t orig_end
= end
;
131 if (__builtin_expect (INSN_P (x
), false))
133 /* Put the pattern at the top of the queue, since that's what
134 we're likely to want most. It also allows for the SEQUENCE
136 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
137 if (format
[i
] == 'e')
139 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
140 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
143 base
= add_single_to_queue (array
, base
, end
++, subx
);
147 for (int i
= 0; format
[i
]; ++i
)
148 if (format
[i
] == 'e')
150 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
151 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
154 base
= add_single_to_queue (array
, base
, end
++, subx
);
156 else if (format
[i
] == 'E')
158 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
159 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
160 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
161 for (unsigned int j
= 0; j
< length
; j
++)
162 base
[end
++] = T::get_value (vec
[j
]);
164 for (unsigned int j
= 0; j
< length
; j
++)
165 base
= add_single_to_queue (array
, base
, end
++,
166 T::get_value (vec
[j
]));
167 if (code
== SEQUENCE
&& end
== length
)
168 /* If the subrtxes of the sequence fill the entire array then
169 we know that no other parts of a containing insn are queued.
170 The caller is therefore iterating over the sequence as a
171 PATTERN (...), so we also want the patterns of the
173 for (unsigned int j
= 0; j
< length
; j
++)
175 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
177 base
[j
] = T::get_value (PATTERN (x
));
180 return end
- orig_end
;
183 template <typename T
>
185 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
187 vec_free (array
.heap
);
190 template <typename T
>
191 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
193 template class generic_subrtx_iterator
<const_rtx_accessor
>;
194 template class generic_subrtx_iterator
<rtx_var_accessor
>;
195 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
197 /* Return 1 if the value of X is unstable
198 (would be different at a different point in the program).
199 The frame pointer, arg pointer, etc. are considered stable
200 (within one function) and so is anything marked `unchanging'. */
203 rtx_unstable_p (const_rtx x
)
205 const RTX_CODE code
= GET_CODE (x
);
212 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
221 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
222 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
223 /* The arg pointer varies if it is not a fixed register. */
224 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
226 /* ??? When call-clobbered, the value is stable modulo the restore
227 that must happen after a call. This currently screws up local-alloc
228 into believing that the restore is not needed. */
229 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
234 if (MEM_VOLATILE_P (x
))
243 fmt
= GET_RTX_FORMAT (code
);
244 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
247 if (rtx_unstable_p (XEXP (x
, i
)))
250 else if (fmt
[i
] == 'E')
253 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
254 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
261 /* Return 1 if X has a value that can vary even between two
262 executions of the program. 0 means X can be compared reliably
263 against certain constants or near-constants.
264 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
265 zero, we are slightly more conservative.
266 The frame pointer and the arg pointer are considered constant. */
269 rtx_varies_p (const_rtx x
, bool for_alias
)
282 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
291 /* Note that we have to test for the actual rtx used for the frame
292 and arg pointers and not just the register number in case we have
293 eliminated the frame and/or arg pointer and are using it
295 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
296 /* The arg pointer varies if it is not a fixed register. */
297 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
299 if (x
== pic_offset_table_rtx
300 /* ??? When call-clobbered, the value is stable modulo the restore
301 that must happen after a call. This currently screws up
302 local-alloc into believing that the restore is not needed, so we
303 must return 0 only if we are called from alias analysis. */
304 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
309 /* The operand 0 of a LO_SUM is considered constant
310 (in fact it is related specifically to operand 1)
311 during alias analysis. */
312 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
313 || rtx_varies_p (XEXP (x
, 1), for_alias
);
316 if (MEM_VOLATILE_P (x
))
325 fmt
= GET_RTX_FORMAT (code
);
326 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
329 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
332 else if (fmt
[i
] == 'E')
335 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
336 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
343 /* Compute an approximation for the offset between the register
344 FROM and TO for the current function, as it was at the start
348 get_initial_register_offset (int from
, int to
)
350 static const struct elim_table_t
354 } table
[] = ELIMINABLE_REGS
;
355 HOST_WIDE_INT offset1
, offset2
;
361 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
362 before the reload pass. We need to give at least
363 an estimation for the resulting frame size. */
364 if (! reload_completed
)
366 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
367 #if !STACK_GROWS_DOWNWARD
370 if (to
== STACK_POINTER_REGNUM
)
372 else if (from
== STACK_POINTER_REGNUM
)
378 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
379 if (table
[i
].from
== from
)
381 if (table
[i
].to
== to
)
383 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
387 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
389 if (table
[j
].to
== to
390 && table
[j
].from
== table
[i
].to
)
392 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
394 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
396 return offset1
+ offset2
;
398 if (table
[j
].from
== to
399 && table
[j
].to
== table
[i
].to
)
401 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
403 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
405 return offset1
- offset2
;
409 else if (table
[i
].to
== from
)
411 if (table
[i
].from
== to
)
413 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
417 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
419 if (table
[j
].to
== to
420 && table
[j
].from
== table
[i
].from
)
422 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
424 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
426 return - offset1
+ offset2
;
428 if (table
[j
].from
== to
429 && table
[j
].to
== table
[i
].from
)
431 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
433 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
435 return - offset1
- offset2
;
440 /* If the requested register combination was not found,
441 try a different more simple combination. */
442 if (from
== ARG_POINTER_REGNUM
)
443 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
444 else if (to
== ARG_POINTER_REGNUM
)
445 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
446 else if (from
== HARD_FRAME_POINTER_REGNUM
)
447 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
448 else if (to
== HARD_FRAME_POINTER_REGNUM
)
449 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
454 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
455 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
456 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
457 references on strict alignment machines. */
460 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
461 machine_mode mode
, bool unaligned_mems
)
463 enum rtx_code code
= GET_CODE (x
);
465 /* The offset must be a multiple of the mode size if we are considering
466 unaligned memory references on strict alignment machines. */
467 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
469 HOST_WIDE_INT actual_offset
= offset
;
471 #ifdef SPARC_STACK_BOUNDARY_HACK
472 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
473 the real alignment of %sp. However, when it does this, the
474 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
475 if (SPARC_STACK_BOUNDARY_HACK
476 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
477 actual_offset
-= STACK_POINTER_OFFSET
;
480 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
487 if (SYMBOL_REF_WEAK (x
))
489 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
492 HOST_WIDE_INT decl_size
;
497 size
= GET_MODE_SIZE (mode
);
501 /* If the size of the access or of the symbol is unknown,
503 decl
= SYMBOL_REF_DECL (x
);
505 /* Else check that the access is in bounds. TODO: restructure
506 expr_size/tree_expr_size/int_expr_size and just use the latter. */
509 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
510 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
511 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
513 else if (TREE_CODE (decl
) == STRING_CST
)
514 decl_size
= TREE_STRING_LENGTH (decl
);
515 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
516 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
520 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
529 /* Stack references are assumed not to trap, but we need to deal with
530 nonsensical offsets. */
531 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
532 || x
== stack_pointer_rtx
533 /* The arg pointer varies if it is not a fixed register. */
534 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
537 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
539 HOST_WIDE_INT red_zone_size
= 0;
541 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
543 HOST_WIDE_INT low_bound
, high_bound
;
546 size
= GET_MODE_SIZE (mode
);
550 if (x
== frame_pointer_rtx
)
552 if (FRAME_GROWS_DOWNWARD
)
554 high_bound
= STARTING_FRAME_OFFSET
;
555 low_bound
= high_bound
- get_frame_size ();
559 low_bound
= STARTING_FRAME_OFFSET
;
560 high_bound
= low_bound
+ get_frame_size ();
563 else if (x
== hard_frame_pointer_rtx
)
565 HOST_WIDE_INT sp_offset
566 = get_initial_register_offset (STACK_POINTER_REGNUM
,
567 HARD_FRAME_POINTER_REGNUM
);
568 HOST_WIDE_INT ap_offset
569 = get_initial_register_offset (ARG_POINTER_REGNUM
,
570 HARD_FRAME_POINTER_REGNUM
);
572 #if STACK_GROWS_DOWNWARD
573 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
574 high_bound
= ap_offset
575 + FIRST_PARM_OFFSET (current_function_decl
)
576 #if !ARGS_GROW_DOWNWARD
581 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
582 low_bound
= ap_offset
583 + FIRST_PARM_OFFSET (current_function_decl
)
584 #if ARGS_GROW_DOWNWARD
590 else if (x
== stack_pointer_rtx
)
592 HOST_WIDE_INT ap_offset
593 = get_initial_register_offset (ARG_POINTER_REGNUM
,
594 STACK_POINTER_REGNUM
);
596 #if STACK_GROWS_DOWNWARD
597 low_bound
= - red_zone_size
- stack_boundary
;
598 high_bound
= ap_offset
599 + FIRST_PARM_OFFSET (current_function_decl
)
600 #if !ARGS_GROW_DOWNWARD
605 high_bound
= red_zone_size
+ stack_boundary
;
606 low_bound
= ap_offset
607 + FIRST_PARM_OFFSET (current_function_decl
)
608 #if ARGS_GROW_DOWNWARD
616 /* We assume that accesses are safe to at least the
618 Examples are varargs and __builtin_return_address. */
619 #if ARGS_GROW_DOWNWARD
620 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
622 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
623 - crtl
->args
.size
- stack_boundary
;
625 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
627 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
628 + crtl
->args
.size
+ stack_boundary
;
632 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
636 /* All of the virtual frame registers are stack references. */
637 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
638 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
643 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
644 mode
, unaligned_mems
);
647 /* An address is assumed not to trap if:
648 - it is the pic register plus a const unspec without offset. */
649 if (XEXP (x
, 0) == pic_offset_table_rtx
650 && GET_CODE (XEXP (x
, 1)) == CONST
651 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
655 /* - or it is an address that can't trap plus a constant integer. */
656 if (CONST_INT_P (XEXP (x
, 1))
657 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
658 size
, mode
, unaligned_mems
))
665 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
666 mode
, unaligned_mems
);
673 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
674 mode
, unaligned_mems
);
680 /* If it isn't one of the case above, it can cause a trap. */
684 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
687 rtx_addr_can_trap_p (const_rtx x
)
689 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
692 /* Return true if X contains a MEM subrtx. */
695 contains_mem_rtx_p (rtx x
)
697 subrtx_iterator::array_type array
;
698 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
705 /* Return true if X is an address that is known to not be zero. */
708 nonzero_address_p (const_rtx x
)
710 const enum rtx_code code
= GET_CODE (x
);
715 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
721 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
722 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
723 || x
== stack_pointer_rtx
724 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
726 /* All of the virtual frame registers are stack references. */
727 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
728 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
733 return nonzero_address_p (XEXP (x
, 0));
736 /* Handle PIC references. */
737 if (XEXP (x
, 0) == pic_offset_table_rtx
738 && CONSTANT_P (XEXP (x
, 1)))
743 /* Similar to the above; allow positive offsets. Further, since
744 auto-inc is only allowed in memories, the register must be a
746 if (CONST_INT_P (XEXP (x
, 1))
747 && INTVAL (XEXP (x
, 1)) > 0)
749 return nonzero_address_p (XEXP (x
, 0));
752 /* Similarly. Further, the offset is always positive. */
759 return nonzero_address_p (XEXP (x
, 0));
762 return nonzero_address_p (XEXP (x
, 1));
768 /* If it isn't one of the case above, might be zero. */
772 /* Return 1 if X refers to a memory location whose address
773 cannot be compared reliably with constant addresses,
774 or if X refers to a BLKmode memory object.
775 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
776 zero, we are slightly more conservative. */
779 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
790 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
792 fmt
= GET_RTX_FORMAT (code
);
793 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
796 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
799 else if (fmt
[i
] == 'E')
802 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
803 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
809 /* Return the CALL in X if there is one. */
812 get_call_rtx_from (rtx x
)
816 if (GET_CODE (x
) == PARALLEL
)
817 x
= XVECEXP (x
, 0, 0);
818 if (GET_CODE (x
) == SET
)
820 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
825 /* Return the value of the integer term in X, if one is apparent;
827 Only obvious integer terms are detected.
828 This is used in cse.c with the `related_value' field. */
831 get_integer_term (const_rtx x
)
833 if (GET_CODE (x
) == CONST
)
836 if (GET_CODE (x
) == MINUS
837 && CONST_INT_P (XEXP (x
, 1)))
838 return - INTVAL (XEXP (x
, 1));
839 if (GET_CODE (x
) == PLUS
840 && CONST_INT_P (XEXP (x
, 1)))
841 return INTVAL (XEXP (x
, 1));
845 /* If X is a constant, return the value sans apparent integer term;
847 Only obvious integer terms are detected. */
850 get_related_value (const_rtx x
)
852 if (GET_CODE (x
) != CONST
)
855 if (GET_CODE (x
) == PLUS
856 && CONST_INT_P (XEXP (x
, 1)))
858 else if (GET_CODE (x
) == MINUS
859 && CONST_INT_P (XEXP (x
, 1)))
864 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
865 to somewhere in the same object or object_block as SYMBOL. */
868 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
872 if (GET_CODE (symbol
) != SYMBOL_REF
)
880 if (CONSTANT_POOL_ADDRESS_P (symbol
)
881 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
884 decl
= SYMBOL_REF_DECL (symbol
);
885 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
889 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
890 && SYMBOL_REF_BLOCK (symbol
)
891 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
892 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
893 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
899 /* Split X into a base and a constant offset, storing them in *BASE_OUT
900 and *OFFSET_OUT respectively. */
903 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
905 if (GET_CODE (x
) == CONST
)
908 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
910 *base_out
= XEXP (x
, 0);
911 *offset_out
= XEXP (x
, 1);
916 *offset_out
= const0_rtx
;
919 /* Return the number of places FIND appears within X. If COUNT_DEST is
920 zero, we do not count occurrences inside the destination of a SET. */
923 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
927 const char *format_ptr
;
946 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
948 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
952 if (MEM_P (find
) && rtx_equal_p (x
, find
))
957 if (SET_DEST (x
) == find
&& ! count_dest
)
958 return count_occurrences (SET_SRC (x
), find
, count_dest
);
965 format_ptr
= GET_RTX_FORMAT (code
);
968 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
970 switch (*format_ptr
++)
973 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
977 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
978 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
986 /* Return TRUE if OP is a register or subreg of a register that
987 holds an unsigned quantity. Otherwise, return FALSE. */
990 unsigned_reg_p (rtx op
)
994 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
997 if (GET_CODE (op
) == SUBREG
998 && SUBREG_PROMOTED_SIGN (op
))
1005 /* Nonzero if register REG appears somewhere within IN.
1006 Also works if REG is not a register; in this case it checks
1007 for a subexpression of IN that is Lisp "equal" to REG. */
1010 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1022 if (GET_CODE (in
) == LABEL_REF
)
1023 return reg
== label_ref_label (in
);
1025 code
= GET_CODE (in
);
1029 /* Compare registers by number. */
1031 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1033 /* These codes have no constituent expressions
1041 /* These are kept unique for a given value. */
1048 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1051 fmt
= GET_RTX_FORMAT (code
);
1053 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1058 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1059 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1062 else if (fmt
[i
] == 'e'
1063 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1069 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1070 no CODE_LABEL insn. */
1073 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1078 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1084 /* Nonzero if register REG is used in an insn between
1085 FROM_INSN and TO_INSN (exclusive of those two). */
1088 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1089 const rtx_insn
*to_insn
)
1093 if (from_insn
== to_insn
)
1096 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1097 if (NONDEBUG_INSN_P (insn
)
1098 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1099 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1104 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1105 is entirely replaced by a new value and the only use is as a SET_DEST,
1106 we do not consider it a reference. */
1109 reg_referenced_p (const_rtx x
, const_rtx body
)
1113 switch (GET_CODE (body
))
1116 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1119 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1120 of a REG that occupies all of the REG, the insn references X if
1121 it is mentioned in the destination. */
1122 if (GET_CODE (SET_DEST (body
)) != CC0
1123 && GET_CODE (SET_DEST (body
)) != PC
1124 && !REG_P (SET_DEST (body
))
1125 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1126 && REG_P (SUBREG_REG (SET_DEST (body
)))
1127 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1128 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1129 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1130 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1131 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1136 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1137 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1144 return reg_overlap_mentioned_p (x
, body
);
1147 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1150 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1153 case UNSPEC_VOLATILE
:
1154 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1155 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1160 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1161 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1166 if (MEM_P (XEXP (body
, 0)))
1167 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1172 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1174 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1181 /* Nonzero if register REG is set or clobbered in an insn between
1182 FROM_INSN and TO_INSN (exclusive of those two). */
1185 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1186 const rtx_insn
*to_insn
)
1188 const rtx_insn
*insn
;
1190 if (from_insn
== to_insn
)
1193 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1194 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1199 /* Return true if REG is set or clobbered inside INSN. */
1202 reg_set_p (const_rtx reg
, const_rtx insn
)
1204 /* After delay slot handling, call and branch insns might be in a
1205 sequence. Check all the elements there. */
1206 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1208 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1209 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1215 /* We can be passed an insn or part of one. If we are passed an insn,
1216 check if a side-effect of the insn clobbers REG. */
1218 && (FIND_REG_INC_NOTE (insn
, reg
)
1221 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1222 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1223 GET_MODE (reg
), REGNO (reg
)))
1225 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1228 /* There are no REG_INC notes for SP autoinc. */
1229 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1231 subrtx_var_iterator::array_type array
;
1232 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1237 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1239 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1241 iter
.skip_subrtxes ();
1246 return set_of (reg
, insn
) != NULL_RTX
;
1249 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1250 only if none of them are modified between START and END. Return 1 if
1251 X contains a MEM; this routine does use memory aliasing. */
1254 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1256 const enum rtx_code code
= GET_CODE (x
);
1277 if (modified_between_p (XEXP (x
, 0), start
, end
))
1279 if (MEM_READONLY_P (x
))
1281 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1282 if (memory_modified_in_insn_p (x
, insn
))
1287 return reg_set_between_p (x
, start
, end
);
1293 fmt
= GET_RTX_FORMAT (code
);
1294 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1296 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1299 else if (fmt
[i
] == 'E')
1300 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1301 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1308 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1309 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1310 does use memory aliasing. */
1313 modified_in_p (const_rtx x
, const_rtx insn
)
1315 const enum rtx_code code
= GET_CODE (x
);
1332 if (modified_in_p (XEXP (x
, 0), insn
))
1334 if (MEM_READONLY_P (x
))
1336 if (memory_modified_in_insn_p (x
, insn
))
1341 return reg_set_p (x
, insn
);
1347 fmt
= GET_RTX_FORMAT (code
);
1348 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1350 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1353 else if (fmt
[i
] == 'E')
1354 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1355 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1362 /* Return true if X is a SUBREG and if storing a value to X would
1363 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1364 target, using a SUBREG to store to one half of a DImode REG would
1365 preserve the other half. */
1368 read_modify_subreg_p (const_rtx x
)
1370 unsigned int isize
, osize
;
1371 if (GET_CODE (x
) != SUBREG
)
1373 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1374 osize
= GET_MODE_SIZE (GET_MODE (x
));
1375 return isize
> osize
1376 && isize
> REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1379 /* Helper function for set_of. */
1387 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1389 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1390 if (rtx_equal_p (x
, data
->pat
)
1391 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1395 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1396 (either directly or via STRICT_LOW_PART and similar modifiers). */
1398 set_of (const_rtx pat
, const_rtx insn
)
1400 struct set_of_data data
;
1401 data
.found
= NULL_RTX
;
1403 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1407 /* Add all hard register in X to *PSET. */
1409 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1411 subrtx_iterator::array_type array
;
1412 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1414 const_rtx x
= *iter
;
1415 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1416 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1420 /* This function, called through note_stores, collects sets and
1421 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1424 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1426 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1427 if (REG_P (x
) && HARD_REGISTER_P (x
))
1428 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1431 /* Examine INSN, and compute the set of hard registers written by it.
1432 Store it in *PSET. Should only be called after reload. */
1434 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1438 CLEAR_HARD_REG_SET (*pset
);
1439 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1443 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1445 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1446 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1448 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1449 if (REG_NOTE_KIND (link
) == REG_INC
)
1450 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1453 /* Like record_hard_reg_sets, but called through note_uses. */
1455 record_hard_reg_uses (rtx
*px
, void *data
)
1457 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1460 /* Given an INSN, return a SET expression if this insn has only a single SET.
1461 It may also have CLOBBERs, USEs, or SET whose output
1462 will not be used, which we ignore. */
1465 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1468 int set_verified
= 1;
1471 if (GET_CODE (pat
) == PARALLEL
)
1473 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1475 rtx sub
= XVECEXP (pat
, 0, i
);
1476 switch (GET_CODE (sub
))
1483 /* We can consider insns having multiple sets, where all
1484 but one are dead as single set insns. In common case
1485 only single set is present in the pattern so we want
1486 to avoid checking for REG_UNUSED notes unless necessary.
1488 When we reach set first time, we just expect this is
1489 the single set we are looking for and only when more
1490 sets are found in the insn, we check them. */
1493 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1494 && !side_effects_p (set
))
1500 set
= sub
, set_verified
= 0;
1501 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1502 || side_effects_p (sub
))
1514 /* Given an INSN, return nonzero if it has more than one SET, else return
1518 multiple_sets (const_rtx insn
)
1523 /* INSN must be an insn. */
1524 if (! INSN_P (insn
))
1527 /* Only a PARALLEL can have multiple SETs. */
1528 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1530 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1531 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1533 /* If we have already found a SET, then return now. */
1541 /* Either zero or one SET. */
1545 /* Return nonzero if the destination of SET equals the source
1546 and there are no side effects. */
1549 set_noop_p (const_rtx set
)
1551 rtx src
= SET_SRC (set
);
1552 rtx dst
= SET_DEST (set
);
1554 if (dst
== pc_rtx
&& src
== pc_rtx
)
1557 if (MEM_P (dst
) && MEM_P (src
))
1558 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1560 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1561 return rtx_equal_p (XEXP (dst
, 0), src
)
1562 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1563 && !side_effects_p (src
);
1565 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1566 dst
= XEXP (dst
, 0);
1568 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1570 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1572 src
= SUBREG_REG (src
);
1573 dst
= SUBREG_REG (dst
);
1576 /* It is a NOOP if destination overlaps with selected src vector
1578 if (GET_CODE (src
) == VEC_SELECT
1579 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1580 && HARD_REGISTER_P (XEXP (src
, 0))
1581 && HARD_REGISTER_P (dst
))
1584 rtx par
= XEXP (src
, 1);
1585 rtx src0
= XEXP (src
, 0);
1586 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1587 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1589 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1590 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1593 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1594 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1597 return (REG_P (src
) && REG_P (dst
)
1598 && REGNO (src
) == REGNO (dst
));
1601 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1605 noop_move_p (const rtx_insn
*insn
)
1607 rtx pat
= PATTERN (insn
);
1609 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1612 /* Insns carrying these notes are useful later on. */
1613 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1616 /* Check the code to be executed for COND_EXEC. */
1617 if (GET_CODE (pat
) == COND_EXEC
)
1618 pat
= COND_EXEC_CODE (pat
);
1620 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1623 if (GET_CODE (pat
) == PARALLEL
)
1626 /* If nothing but SETs of registers to themselves,
1627 this insn can also be deleted. */
1628 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1630 rtx tem
= XVECEXP (pat
, 0, i
);
1632 if (GET_CODE (tem
) == USE
1633 || GET_CODE (tem
) == CLOBBER
)
1636 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1646 /* Return nonzero if register in range [REGNO, ENDREGNO)
1647 appears either explicitly or implicitly in X
1648 other than being stored into.
1650 References contained within the substructure at LOC do not count.
1651 LOC may be zero, meaning don't ignore anything. */
1654 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1658 unsigned int x_regno
;
1663 /* The contents of a REG_NONNEG note is always zero, so we must come here
1664 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1668 code
= GET_CODE (x
);
1673 x_regno
= REGNO (x
);
1675 /* If we modifying the stack, frame, or argument pointer, it will
1676 clobber a virtual register. In fact, we could be more precise,
1677 but it isn't worth it. */
1678 if ((x_regno
== STACK_POINTER_REGNUM
1679 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1680 && x_regno
== ARG_POINTER_REGNUM
)
1681 || x_regno
== FRAME_POINTER_REGNUM
)
1682 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1685 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1688 /* If this is a SUBREG of a hard reg, we can see exactly which
1689 registers are being modified. Otherwise, handle normally. */
1690 if (REG_P (SUBREG_REG (x
))
1691 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1693 unsigned int inner_regno
= subreg_regno (x
);
1694 unsigned int inner_endregno
1695 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1696 ? subreg_nregs (x
) : 1);
1698 return endregno
> inner_regno
&& regno
< inner_endregno
;
1704 if (&SET_DEST (x
) != loc
1705 /* Note setting a SUBREG counts as referring to the REG it is in for
1706 a pseudo but not for hard registers since we can
1707 treat each word individually. */
1708 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1709 && loc
!= &SUBREG_REG (SET_DEST (x
))
1710 && REG_P (SUBREG_REG (SET_DEST (x
)))
1711 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1712 && refers_to_regno_p (regno
, endregno
,
1713 SUBREG_REG (SET_DEST (x
)), loc
))
1714 || (!REG_P (SET_DEST (x
))
1715 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1718 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1727 /* X does not match, so try its subexpressions. */
1729 fmt
= GET_RTX_FORMAT (code
);
1730 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1732 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1740 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1743 else if (fmt
[i
] == 'E')
1746 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1747 if (loc
!= &XVECEXP (x
, i
, j
)
1748 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1755 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1756 we check if any register number in X conflicts with the relevant register
1757 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1758 contains a MEM (we don't bother checking for memory addresses that can't
1759 conflict because we expect this to be a rare case. */
1762 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1764 unsigned int regno
, endregno
;
1766 /* If either argument is a constant, then modifying X can not
1767 affect IN. Here we look at IN, we can profitably combine
1768 CONSTANT_P (x) with the switch statement below. */
1769 if (CONSTANT_P (in
))
1773 switch (GET_CODE (x
))
1775 case STRICT_LOW_PART
:
1778 /* Overly conservative. */
1783 regno
= REGNO (SUBREG_REG (x
));
1784 if (regno
< FIRST_PSEUDO_REGISTER
)
1785 regno
= subreg_regno (x
);
1786 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1787 ? subreg_nregs (x
) : 1);
1792 endregno
= END_REGNO (x
);
1794 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1804 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1805 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1808 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1811 else if (fmt
[i
] == 'E')
1814 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1815 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1825 return reg_mentioned_p (x
, in
);
1831 /* If any register in here refers to it we return true. */
1832 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1833 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1834 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1840 gcc_assert (CONSTANT_P (x
));
1845 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1846 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1847 ignored by note_stores, but passed to FUN.
1849 FUN receives three arguments:
1850 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1851 2. the SET or CLOBBER rtx that does the store,
1852 3. the pointer DATA provided to note_stores.
1854 If the item being stored in or clobbered is a SUBREG of a hard register,
1855 the SUBREG will be passed. */
1858 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1862 if (GET_CODE (x
) == COND_EXEC
)
1863 x
= COND_EXEC_CODE (x
);
1865 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1867 rtx dest
= SET_DEST (x
);
1869 while ((GET_CODE (dest
) == SUBREG
1870 && (!REG_P (SUBREG_REG (dest
))
1871 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1872 || GET_CODE (dest
) == ZERO_EXTRACT
1873 || GET_CODE (dest
) == STRICT_LOW_PART
)
1874 dest
= XEXP (dest
, 0);
1876 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1877 each of whose first operand is a register. */
1878 if (GET_CODE (dest
) == PARALLEL
)
1880 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1881 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1882 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1885 (*fun
) (dest
, x
, data
);
1888 else if (GET_CODE (x
) == PARALLEL
)
1889 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1890 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1893 /* Like notes_stores, but call FUN for each expression that is being
1894 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1895 FUN for each expression, not any interior subexpressions. FUN receives a
1896 pointer to the expression and the DATA passed to this function.
1898 Note that this is not quite the same test as that done in reg_referenced_p
1899 since that considers something as being referenced if it is being
1900 partially set, while we do not. */
1903 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1908 switch (GET_CODE (body
))
1911 (*fun
) (&COND_EXEC_TEST (body
), data
);
1912 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1916 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1917 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1921 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1922 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1926 (*fun
) (&XEXP (body
, 0), data
);
1930 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1931 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1935 (*fun
) (&TRAP_CONDITION (body
), data
);
1939 (*fun
) (&XEXP (body
, 0), data
);
1943 case UNSPEC_VOLATILE
:
1944 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1945 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1949 if (MEM_P (XEXP (body
, 0)))
1950 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1955 rtx dest
= SET_DEST (body
);
1957 /* For sets we replace everything in source plus registers in memory
1958 expression in store and operands of a ZERO_EXTRACT. */
1959 (*fun
) (&SET_SRC (body
), data
);
1961 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1963 (*fun
) (&XEXP (dest
, 1), data
);
1964 (*fun
) (&XEXP (dest
, 2), data
);
1967 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1968 dest
= XEXP (dest
, 0);
1971 (*fun
) (&XEXP (dest
, 0), data
);
1976 /* All the other possibilities never store. */
1977 (*fun
) (pbody
, data
);
1982 /* Return nonzero if X's old contents don't survive after INSN.
1983 This will be true if X is (cc0) or if X is a register and
1984 X dies in INSN or because INSN entirely sets X.
1986 "Entirely set" means set directly and not through a SUBREG, or
1987 ZERO_EXTRACT, so no trace of the old contents remains.
1988 Likewise, REG_INC does not count.
1990 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1991 but for this use that makes no difference, since regs don't overlap
1992 during their lifetimes. Therefore, this function may be used
1993 at any time after deaths have been computed.
1995 If REG is a hard reg that occupies multiple machine registers, this
1996 function will only return 1 if each of those registers will be replaced
2000 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2002 unsigned int regno
, end_regno
;
2005 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2006 if (GET_CODE (x
) == CC0
)
2009 gcc_assert (REG_P (x
));
2012 end_regno
= END_REGNO (x
);
2013 for (i
= regno
; i
< end_regno
; i
++)
2014 if (! dead_or_set_regno_p (insn
, i
))
2020 /* Return TRUE iff DEST is a register or subreg of a register and
2021 doesn't change the number of words of the inner register, and any
2022 part of the register is TEST_REGNO. */
2025 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2027 unsigned int regno
, endregno
;
2029 if (GET_CODE (dest
) == SUBREG
2030 && (((GET_MODE_SIZE (GET_MODE (dest
))
2031 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
2032 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
2033 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
2034 dest
= SUBREG_REG (dest
);
2039 regno
= REGNO (dest
);
2040 endregno
= END_REGNO (dest
);
2041 return (test_regno
>= regno
&& test_regno
< endregno
);
2044 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2045 any member matches the covers_regno_no_parallel_p criteria. */
2048 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2050 if (GET_CODE (dest
) == PARALLEL
)
2052 /* Some targets place small structures in registers for return
2053 values of functions, and those registers are wrapped in
2054 PARALLELs that we may see as the destination of a SET. */
2057 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2059 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2060 if (inner
!= NULL_RTX
2061 && covers_regno_no_parallel_p (inner
, test_regno
))
2068 return covers_regno_no_parallel_p (dest
, test_regno
);
2071 /* Utility function for dead_or_set_p to check an individual register. */
2074 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2078 /* See if there is a death note for something that includes TEST_REGNO. */
2079 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2083 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2086 pattern
= PATTERN (insn
);
2088 /* If a COND_EXEC is not executed, the value survives. */
2089 if (GET_CODE (pattern
) == COND_EXEC
)
2092 if (GET_CODE (pattern
) == SET
)
2093 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2094 else if (GET_CODE (pattern
) == PARALLEL
)
2098 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2100 rtx body
= XVECEXP (pattern
, 0, i
);
2102 if (GET_CODE (body
) == COND_EXEC
)
2103 body
= COND_EXEC_CODE (body
);
2105 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2106 && covers_regno_p (SET_DEST (body
), test_regno
))
2114 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2115 If DATUM is nonzero, look for one whose datum is DATUM. */
2118 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2122 gcc_checking_assert (insn
);
2124 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2125 if (! INSN_P (insn
))
2129 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2130 if (REG_NOTE_KIND (link
) == kind
)
2135 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2136 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2141 /* Return the reg-note of kind KIND in insn INSN which applies to register
2142 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2143 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2144 it might be the case that the note overlaps REGNO. */
2147 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2151 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2152 if (! INSN_P (insn
))
2155 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2156 if (REG_NOTE_KIND (link
) == kind
2157 /* Verify that it is a register, so that scratch and MEM won't cause a
2159 && REG_P (XEXP (link
, 0))
2160 && REGNO (XEXP (link
, 0)) <= regno
2161 && END_REGNO (XEXP (link
, 0)) > regno
)
2166 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2170 find_reg_equal_equiv_note (const_rtx insn
)
2177 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2178 if (REG_NOTE_KIND (link
) == REG_EQUAL
2179 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2181 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2182 insns that have multiple sets. Checking single_set to
2183 make sure of this is not the proper check, as explained
2184 in the comment in set_unique_reg_note.
2186 This should be changed into an assert. */
2187 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2194 /* Check whether INSN is a single_set whose source is known to be
2195 equivalent to a constant. Return that constant if so, otherwise
2199 find_constant_src (const rtx_insn
*insn
)
2203 set
= single_set (insn
);
2206 x
= avoid_constant_pool_reference (SET_SRC (set
));
2211 note
= find_reg_equal_equiv_note (insn
);
2212 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2213 return XEXP (note
, 0);
2218 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2219 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2222 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2224 /* If it's not a CALL_INSN, it can't possibly have a
2225 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2235 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2237 link
= XEXP (link
, 1))
2238 if (GET_CODE (XEXP (link
, 0)) == code
2239 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2244 unsigned int regno
= REGNO (datum
);
2246 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2247 to pseudo registers, so don't bother checking. */
2249 if (regno
< FIRST_PSEUDO_REGISTER
)
2251 unsigned int end_regno
= END_REGNO (datum
);
2254 for (i
= regno
; i
< end_regno
; i
++)
2255 if (find_regno_fusage (insn
, code
, i
))
2263 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2264 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2267 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2271 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2272 to pseudo registers, so don't bother checking. */
2274 if (regno
>= FIRST_PSEUDO_REGISTER
2278 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2282 if (GET_CODE (op
= XEXP (link
, 0)) == code
2283 && REG_P (reg
= XEXP (op
, 0))
2284 && REGNO (reg
) <= regno
2285 && END_REGNO (reg
) > regno
)
2293 /* Return true if KIND is an integer REG_NOTE. */
2296 int_reg_note_p (enum reg_note kind
)
2298 return kind
== REG_BR_PROB
;
2301 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2302 stored as the pointer to the next register note. */
2305 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2309 gcc_checking_assert (!int_reg_note_p (kind
));
2314 case REG_LABEL_TARGET
:
2315 case REG_LABEL_OPERAND
:
2317 /* These types of register notes use an INSN_LIST rather than an
2318 EXPR_LIST, so that copying is done right and dumps look
2320 note
= alloc_INSN_LIST (datum
, list
);
2321 PUT_REG_NOTE_KIND (note
, kind
);
2325 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2332 /* Add register note with kind KIND and datum DATUM to INSN. */
2335 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2337 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2340 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2343 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2345 gcc_checking_assert (int_reg_note_p (kind
));
2346 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2347 datum
, REG_NOTES (insn
));
2350 /* Add a register note like NOTE to INSN. */
2353 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2355 if (GET_CODE (note
) == INT_LIST
)
2356 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2358 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2361 /* Duplicate NOTE and return the copy. */
2363 duplicate_reg_note (rtx note
)
2365 reg_note kind
= REG_NOTE_KIND (note
);
2367 if (GET_CODE (note
) == INT_LIST
)
2368 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2369 else if (GET_CODE (note
) == EXPR_LIST
)
2370 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2372 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2375 /* Remove register note NOTE from the REG_NOTES of INSN. */
2378 remove_note (rtx_insn
*insn
, const_rtx note
)
2382 if (note
== NULL_RTX
)
2385 if (REG_NOTES (insn
) == note
)
2386 REG_NOTES (insn
) = XEXP (note
, 1);
2388 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2389 if (XEXP (link
, 1) == note
)
2391 XEXP (link
, 1) = XEXP (note
, 1);
2395 switch (REG_NOTE_KIND (note
))
2399 df_notes_rescan (insn
);
2406 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2407 Return true if any note has been removed. */
2410 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2415 loc
= ®_NOTES (insn
);
2418 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2419 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2421 *loc
= XEXP (*loc
, 1);
2425 loc
= &XEXP (*loc
, 1);
2430 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2433 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2440 /* This loop is a little tricky. We cannot just go down the chain because
2441 it is being modified by some actions in the loop. So we just iterate
2442 over the head. We plan to drain the list anyway. */
2443 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2445 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2446 rtx note
= find_reg_equal_equiv_note (insn
);
2448 /* This assert is generally triggered when someone deletes a REG_EQUAL
2449 or REG_EQUIV note by hacking the list manually rather than calling
2453 remove_note (insn
, note
);
2457 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2458 return 1 if it is found. A simple equality test is used to determine if
2462 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2466 for (x
= listp
; x
; x
= XEXP (x
, 1))
2467 if (node
== XEXP (x
, 0))
2473 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2474 remove that entry from the list if it is found.
2476 A simple equality test is used to determine if NODE matches. */
2479 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2481 rtx_expr_list
*temp
= *listp
;
2482 rtx_expr_list
*prev
= NULL
;
2486 if (node
== temp
->element ())
2488 /* Splice the node out of the list. */
2490 XEXP (prev
, 1) = temp
->next ();
2492 *listp
= temp
->next ();
2498 temp
= temp
->next ();
2502 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2503 remove that entry from the list if it is found.
2505 A simple equality test is used to determine if NODE matches. */
2508 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2510 rtx_insn_list
*temp
= *listp
;
2511 rtx_insn_list
*prev
= NULL
;
2515 if (node
== temp
->insn ())
2517 /* Splice the node out of the list. */
2519 XEXP (prev
, 1) = temp
->next ();
2521 *listp
= temp
->next ();
2527 temp
= temp
->next ();
2531 /* Nonzero if X contains any volatile instructions. These are instructions
2532 which may cause unpredictable machine state instructions, and thus no
2533 instructions or register uses should be moved or combined across them.
2534 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2537 volatile_insn_p (const_rtx x
)
2539 const RTX_CODE code
= GET_CODE (x
);
2557 case UNSPEC_VOLATILE
:
2562 if (MEM_VOLATILE_P (x
))
2569 /* Recursively scan the operands of this expression. */
2572 const char *const fmt
= GET_RTX_FORMAT (code
);
2575 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2579 if (volatile_insn_p (XEXP (x
, i
)))
2582 else if (fmt
[i
] == 'E')
2585 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2586 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2594 /* Nonzero if X contains any volatile memory references
2595 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2598 volatile_refs_p (const_rtx x
)
2600 const RTX_CODE code
= GET_CODE (x
);
2616 case UNSPEC_VOLATILE
:
2622 if (MEM_VOLATILE_P (x
))
2629 /* Recursively scan the operands of this expression. */
2632 const char *const fmt
= GET_RTX_FORMAT (code
);
2635 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2639 if (volatile_refs_p (XEXP (x
, i
)))
2642 else if (fmt
[i
] == 'E')
2645 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2646 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2654 /* Similar to above, except that it also rejects register pre- and post-
2658 side_effects_p (const_rtx x
)
2660 const RTX_CODE code
= GET_CODE (x
);
2677 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2678 when some combination can't be done. If we see one, don't think
2679 that we can simplify the expression. */
2680 return (GET_MODE (x
) != VOIDmode
);
2689 case UNSPEC_VOLATILE
:
2695 if (MEM_VOLATILE_P (x
))
2702 /* Recursively scan the operands of this expression. */
2705 const char *fmt
= GET_RTX_FORMAT (code
);
2708 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2712 if (side_effects_p (XEXP (x
, i
)))
2715 else if (fmt
[i
] == 'E')
2718 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2719 if (side_effects_p (XVECEXP (x
, i
, j
)))
2727 /* Return nonzero if evaluating rtx X might cause a trap.
2728 FLAGS controls how to consider MEMs. A nonzero means the context
2729 of the access may have changed from the original, such that the
2730 address may have become invalid. */
2733 may_trap_p_1 (const_rtx x
, unsigned flags
)
2739 /* We make no distinction currently, but this function is part of
2740 the internal target-hooks ABI so we keep the parameter as
2741 "unsigned flags". */
2742 bool code_changed
= flags
!= 0;
2746 code
= GET_CODE (x
);
2749 /* Handle these cases quickly. */
2761 return targetm
.unspec_may_trap_p (x
, flags
);
2763 case UNSPEC_VOLATILE
:
2769 return MEM_VOLATILE_P (x
);
2771 /* Memory ref can trap unless it's a static var or a stack slot. */
2773 /* Recognize specific pattern of stack checking probes. */
2774 if (flag_stack_check
2775 && MEM_VOLATILE_P (x
)
2776 && XEXP (x
, 0) == stack_pointer_rtx
)
2778 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2779 reference; moving it out of context such as when moving code
2780 when optimizing, might cause its address to become invalid. */
2782 || !MEM_NOTRAP_P (x
))
2784 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2785 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2786 GET_MODE (x
), code_changed
);
2791 /* Division by a non-constant might trap. */
2796 if (HONOR_SNANS (x
))
2798 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2799 return flag_trapping_math
;
2800 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2805 /* An EXPR_LIST is used to represent a function call. This
2806 certainly may trap. */
2815 /* Some floating point comparisons may trap. */
2816 if (!flag_trapping_math
)
2818 /* ??? There is no machine independent way to check for tests that trap
2819 when COMPARE is used, though many targets do make this distinction.
2820 For instance, sparc uses CCFPE for compares which generate exceptions
2821 and CCFP for compares which do not generate exceptions. */
2824 /* But often the compare has some CC mode, so check operand
2826 if (HONOR_NANS (XEXP (x
, 0))
2827 || HONOR_NANS (XEXP (x
, 1)))
2833 if (HONOR_SNANS (x
))
2835 /* Often comparison is CC mode, so check operand modes. */
2836 if (HONOR_SNANS (XEXP (x
, 0))
2837 || HONOR_SNANS (XEXP (x
, 1)))
2842 /* Conversion of floating point might trap. */
2843 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2850 /* These operations don't trap even with floating point. */
2854 /* Any floating arithmetic may trap. */
2855 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2859 fmt
= GET_RTX_FORMAT (code
);
2860 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2864 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2867 else if (fmt
[i
] == 'E')
2870 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2871 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2878 /* Return nonzero if evaluating rtx X might cause a trap. */
2881 may_trap_p (const_rtx x
)
2883 return may_trap_p_1 (x
, 0);
2886 /* Same as above, but additionally return nonzero if evaluating rtx X might
2887 cause a fault. We define a fault for the purpose of this function as a
2888 erroneous execution condition that cannot be encountered during the normal
2889 execution of a valid program; the typical example is an unaligned memory
2890 access on a strict alignment machine. The compiler guarantees that it
2891 doesn't generate code that will fault from a valid program, but this
2892 guarantee doesn't mean anything for individual instructions. Consider
2893 the following example:
2895 struct S { int d; union { char *cp; int *ip; }; };
2897 int foo(struct S *s)
2905 on a strict alignment machine. In a valid program, foo will never be
2906 invoked on a structure for which d is equal to 1 and the underlying
2907 unique field of the union not aligned on a 4-byte boundary, but the
2908 expression *s->ip might cause a fault if considered individually.
2910 At the RTL level, potentially problematic expressions will almost always
2911 verify may_trap_p; for example, the above dereference can be emitted as
2912 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2913 However, suppose that foo is inlined in a caller that causes s->cp to
2914 point to a local character variable and guarantees that s->d is not set
2915 to 1; foo may have been effectively translated into pseudo-RTL as:
2918 (set (reg:SI) (mem:SI (%fp - 7)))
2920 (set (reg:QI) (mem:QI (%fp - 7)))
2922 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2923 memory reference to a stack slot, but it will certainly cause a fault
2924 on a strict alignment machine. */
2927 may_trap_or_fault_p (const_rtx x
)
2929 return may_trap_p_1 (x
, 1);
2932 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2933 i.e., an inequality. */
2936 inequality_comparisons_p (const_rtx x
)
2940 const enum rtx_code code
= GET_CODE (x
);
2968 len
= GET_RTX_LENGTH (code
);
2969 fmt
= GET_RTX_FORMAT (code
);
2971 for (i
= 0; i
< len
; i
++)
2975 if (inequality_comparisons_p (XEXP (x
, i
)))
2978 else if (fmt
[i
] == 'E')
2981 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2982 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2990 /* Replace any occurrence of FROM in X with TO. The function does
2991 not enter into CONST_DOUBLE for the replace.
2993 Note that copying is not done so X must not be shared unless all copies
2996 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2997 those pointer-equal ones. */
3000 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3008 /* Allow this function to make replacements in EXPR_LISTs. */
3015 && REGNO (x
) == REGNO (from
))
3017 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3020 else if (GET_CODE (x
) == SUBREG
)
3022 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3024 if (CONST_INT_P (new_rtx
))
3026 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3027 GET_MODE (SUBREG_REG (x
)),
3032 SUBREG_REG (x
) = new_rtx
;
3036 else if (GET_CODE (x
) == ZERO_EXTEND
)
3038 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3040 if (CONST_INT_P (new_rtx
))
3042 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3043 new_rtx
, GET_MODE (XEXP (x
, 0)));
3047 XEXP (x
, 0) = new_rtx
;
3052 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3053 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3056 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3057 else if (fmt
[i
] == 'E')
3058 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3059 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3060 from
, to
, all_regs
);
3066 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3067 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3070 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3072 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3074 if (JUMP_TABLE_DATA_P (x
))
3077 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3078 int len
= GET_NUM_ELEM (vec
);
3079 for (int i
= 0; i
< len
; ++i
)
3081 rtx ref
= RTVEC_ELT (vec
, i
);
3082 if (XEXP (ref
, 0) == old_label
)
3084 XEXP (ref
, 0) = new_label
;
3085 if (update_label_nuses
)
3087 ++LABEL_NUSES (new_label
);
3088 --LABEL_NUSES (old_label
);
3095 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3096 field. This is not handled by the iterator because it doesn't
3097 handle unprinted ('0') fields. */
3098 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3099 JUMP_LABEL (x
) = new_label
;
3101 subrtx_ptr_iterator::array_type array
;
3102 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3107 if (GET_CODE (x
) == SYMBOL_REF
3108 && CONSTANT_POOL_ADDRESS_P (x
))
3110 rtx c
= get_pool_constant (x
);
3111 if (rtx_referenced_p (old_label
, c
))
3113 /* Create a copy of constant C; replace the label inside
3114 but do not update LABEL_NUSES because uses in constant pool
3116 rtx new_c
= copy_rtx (c
);
3117 replace_label (&new_c
, old_label
, new_label
, false);
3119 /* Add the new constant NEW_C to constant pool and replace
3120 the old reference to constant by new reference. */
3121 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3122 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3126 if ((GET_CODE (x
) == LABEL_REF
3127 || GET_CODE (x
) == INSN_LIST
)
3128 && XEXP (x
, 0) == old_label
)
3130 XEXP (x
, 0) = new_label
;
3131 if (update_label_nuses
)
3133 ++LABEL_NUSES (new_label
);
3134 --LABEL_NUSES (old_label
);
3142 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3143 rtx_insn
*new_label
, bool update_label_nuses
)
3145 rtx insn_as_rtx
= insn
;
3146 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3147 gcc_checking_assert (insn_as_rtx
== insn
);
3150 /* Return true if X is referenced in BODY. */
3153 rtx_referenced_p (const_rtx x
, const_rtx body
)
3155 subrtx_iterator::array_type array
;
3156 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3157 if (const_rtx y
= *iter
)
3159 /* Check if a label_ref Y refers to label X. */
3160 if (GET_CODE (y
) == LABEL_REF
3162 && label_ref_label (y
) == x
)
3165 if (rtx_equal_p (x
, y
))
3168 /* If Y is a reference to pool constant traverse the constant. */
3169 if (GET_CODE (y
) == SYMBOL_REF
3170 && CONSTANT_POOL_ADDRESS_P (y
))
3171 iter
.substitute (get_pool_constant (y
));
3176 /* If INSN is a tablejump return true and store the label (before jump table) to
3177 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3180 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3181 rtx_jump_table_data
**tablep
)
3186 rtx target
= JUMP_LABEL (insn
);
3187 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3190 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3191 rtx_insn
*table
= next_insn (label
);
3192 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3198 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3202 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3203 constant that is not in the constant pool and not in the condition
3204 of an IF_THEN_ELSE. */
3207 computed_jump_p_1 (const_rtx x
)
3209 const enum rtx_code code
= GET_CODE (x
);
3226 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3227 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3230 return (computed_jump_p_1 (XEXP (x
, 1))
3231 || computed_jump_p_1 (XEXP (x
, 2)));
3237 fmt
= GET_RTX_FORMAT (code
);
3238 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3241 && computed_jump_p_1 (XEXP (x
, i
)))
3244 else if (fmt
[i
] == 'E')
3245 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3246 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3253 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3255 Tablejumps and casesi insns are not considered indirect jumps;
3256 we can recognize them by a (use (label_ref)). */
3259 computed_jump_p (const rtx_insn
*insn
)
3264 rtx pat
= PATTERN (insn
);
3266 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3267 if (JUMP_LABEL (insn
) != NULL
)
3270 if (GET_CODE (pat
) == PARALLEL
)
3272 int len
= XVECLEN (pat
, 0);
3273 int has_use_labelref
= 0;
3275 for (i
= len
- 1; i
>= 0; i
--)
3276 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3277 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3280 has_use_labelref
= 1;
3284 if (! has_use_labelref
)
3285 for (i
= len
- 1; i
>= 0; i
--)
3286 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3287 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3288 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3291 else if (GET_CODE (pat
) == SET
3292 && SET_DEST (pat
) == pc_rtx
3293 && computed_jump_p_1 (SET_SRC (pat
)))
3301 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3302 the equivalent add insn and pass the result to FN, using DATA as the
3306 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3308 rtx x
= XEXP (mem
, 0);
3309 switch (GET_CODE (x
))
3314 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3315 rtx r1
= XEXP (x
, 0);
3316 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3317 return fn (mem
, x
, r1
, r1
, c
, data
);
3323 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3324 rtx r1
= XEXP (x
, 0);
3325 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3326 return fn (mem
, x
, r1
, r1
, c
, data
);
3332 rtx r1
= XEXP (x
, 0);
3333 rtx add
= XEXP (x
, 1);
3334 return fn (mem
, x
, r1
, add
, NULL
, data
);
3342 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3343 For each such autoinc operation found, call FN, passing it
3344 the innermost enclosing MEM, the operation itself, the RTX modified
3345 by the operation, two RTXs (the second may be NULL) that, once
3346 added, represent the value to be held by the modified RTX
3347 afterwards, and DATA. FN is to return 0 to continue the
3348 traversal or any other value to have it returned to the caller of
3349 for_each_inc_dec. */
3352 for_each_inc_dec (rtx x
,
3353 for_each_inc_dec_fn fn
,
3356 subrtx_var_iterator::array_type array
;
3357 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3362 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3364 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3367 iter
.skip_subrtxes ();
3374 /* Searches X for any reference to REGNO, returning the rtx of the
3375 reference found if any. Otherwise, returns NULL_RTX. */
3378 regno_use_in (unsigned int regno
, rtx x
)
3384 if (REG_P (x
) && REGNO (x
) == regno
)
3387 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3388 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3392 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3395 else if (fmt
[i
] == 'E')
3396 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3397 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3404 /* Return a value indicating whether OP, an operand of a commutative
3405 operation, is preferred as the first or second operand. The more
3406 positive the value, the stronger the preference for being the first
3410 commutative_operand_precedence (rtx op
)
3412 enum rtx_code code
= GET_CODE (op
);
3414 /* Constants always become the second operand. Prefer "nice" constants. */
3415 if (code
== CONST_INT
)
3417 if (code
== CONST_WIDE_INT
)
3419 if (code
== CONST_DOUBLE
)
3421 if (code
== CONST_FIXED
)
3423 op
= avoid_constant_pool_reference (op
);
3424 code
= GET_CODE (op
);
3426 switch (GET_RTX_CLASS (code
))
3429 if (code
== CONST_INT
)
3431 if (code
== CONST_WIDE_INT
)
3433 if (code
== CONST_DOUBLE
)
3435 if (code
== CONST_FIXED
)
3440 /* SUBREGs of objects should come second. */
3441 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3446 /* Complex expressions should be the first, so decrease priority
3447 of objects. Prefer pointer objects over non pointer objects. */
3448 if ((REG_P (op
) && REG_POINTER (op
))
3449 || (MEM_P (op
) && MEM_POINTER (op
)))
3453 case RTX_COMM_ARITH
:
3454 /* Prefer operands that are themselves commutative to be first.
3455 This helps to make things linear. In particular,
3456 (and (and (reg) (reg)) (not (reg))) is canonical. */
3460 /* If only one operand is a binary expression, it will be the first
3461 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3462 is canonical, although it will usually be further simplified. */
3466 /* Then prefer NEG and NOT. */
3467 if (code
== NEG
|| code
== NOT
)
3476 /* Return 1 iff it is necessary to swap operands of commutative operation
3477 in order to canonicalize expression. */
3480 swap_commutative_operands_p (rtx x
, rtx y
)
3482 return (commutative_operand_precedence (x
)
3483 < commutative_operand_precedence (y
));
3486 /* Return 1 if X is an autoincrement side effect and the register is
3487 not the stack pointer. */
3489 auto_inc_p (const_rtx x
)
3491 switch (GET_CODE (x
))
3499 /* There are no REG_INC notes for SP. */
3500 if (XEXP (x
, 0) != stack_pointer_rtx
)
3508 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3510 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3519 code
= GET_CODE (in
);
3520 fmt
= GET_RTX_FORMAT (code
);
3521 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3525 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3528 else if (fmt
[i
] == 'E')
3529 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3530 if (loc
== &XVECEXP (in
, i
, j
)
3531 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3537 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3538 and SUBREG_BYTE, return the bit offset where the subreg begins
3539 (counting from the least significant bit of the operand). */
3542 subreg_lsb_1 (machine_mode outer_mode
,
3543 machine_mode inner_mode
,
3544 unsigned int subreg_byte
)
3546 unsigned int bitpos
;
3550 /* A paradoxical subreg begins at bit position 0. */
3551 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
3554 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3555 /* If the subreg crosses a word boundary ensure that
3556 it also begins and ends on a word boundary. */
3557 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3558 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3559 && (subreg_byte
% UNITS_PER_WORD
3560 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3562 if (WORDS_BIG_ENDIAN
)
3563 word
= (GET_MODE_SIZE (inner_mode
)
3564 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3566 word
= subreg_byte
/ UNITS_PER_WORD
;
3567 bitpos
= word
* BITS_PER_WORD
;
3569 if (BYTES_BIG_ENDIAN
)
3570 byte
= (GET_MODE_SIZE (inner_mode
)
3571 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3573 byte
= subreg_byte
% UNITS_PER_WORD
;
3574 bitpos
+= byte
* BITS_PER_UNIT
;
3579 /* Given a subreg X, return the bit offset where the subreg begins
3580 (counting from the least significant bit of the reg). */
3583 subreg_lsb (const_rtx x
)
3585 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3589 /* Return the subreg byte offset for a subreg whose outer value has
3590 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3591 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3592 lsb of the inner value. This is the inverse of the calculation
3593 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3596 subreg_size_offset_from_lsb (unsigned int outer_bytes
,
3597 unsigned int inner_bytes
,
3598 unsigned int lsb_shift
)
3600 /* A paradoxical subreg begins at bit position 0. */
3601 if (outer_bytes
> inner_bytes
)
3603 gcc_checking_assert (lsb_shift
== 0);
3607 gcc_assert (lsb_shift
% BITS_PER_UNIT
== 0);
3608 unsigned int lower_bytes
= lsb_shift
/ BITS_PER_UNIT
;
3609 unsigned int upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3610 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3612 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3616 unsigned int lower_word_part
= lower_bytes
& -UNITS_PER_WORD
;
3617 unsigned int upper_word_part
= upper_bytes
& -UNITS_PER_WORD
;
3618 if (WORDS_BIG_ENDIAN
)
3619 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3621 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3625 /* Fill in information about a subreg of a hard register.
3626 xregno - A regno of an inner hard subreg_reg (or what will become one).
3627 xmode - The mode of xregno.
3628 offset - The byte offset.
3629 ymode - The mode of a top level SUBREG (or what may become one).
3630 info - Pointer to structure to fill in.
3632 Rather than considering one particular inner register (and thus one
3633 particular "outer" register) in isolation, this function really uses
3634 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3635 function does not check whether adding INFO->offset to XREGNO gives
3636 a valid hard register; even if INFO->offset + XREGNO is out of range,
3637 there might be another register of the same type that is in range.
3638 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3639 the new register, since that can depend on things like whether the final
3640 register number is even or odd. Callers that want to check whether
3641 this particular subreg can be replaced by a simple (reg ...) should
3642 use simplify_subreg_regno. */
3645 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3646 unsigned int offset
, machine_mode ymode
,
3647 struct subreg_info
*info
)
3649 unsigned int nregs_xmode
, nregs_ymode
;
3651 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3653 unsigned int xsize
= GET_MODE_SIZE (xmode
);
3654 unsigned int ysize
= GET_MODE_SIZE (ymode
);
3655 bool rknown
= false;
3657 /* If the register representation of a non-scalar mode has holes in it,
3658 we expect the scalar units to be concatenated together, with the holes
3659 distributed evenly among the scalar units. Each scalar unit must occupy
3660 at least one register. */
3661 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3663 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3664 unsigned int nunits
= GET_MODE_NUNITS (xmode
);
3665 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
3666 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3667 gcc_assert (nregs_xmode
3669 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3670 gcc_assert (hard_regno_nregs (xregno
, xmode
)
3671 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
3673 /* You can only ask for a SUBREG of a value with holes in the middle
3674 if you don't cross the holes. (Such a SUBREG should be done by
3675 picking a different register class, or doing it in memory if
3676 necessary.) An example of a value with holes is XCmode on 32-bit
3677 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3678 3 for each part, but in memory it's two 128-bit parts.
3679 Padding is assumed to be at the end (not necessarily the 'high part')
3681 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3682 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3683 != ((offset
+ ysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3685 info
->representable_p
= false;
3690 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
3692 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
3694 /* Paradoxical subregs are otherwise valid. */
3695 if (!rknown
&& offset
== 0 && ysize
> xsize
)
3697 info
->representable_p
= true;
3698 /* If this is a big endian paradoxical subreg, which uses more
3699 actual hard registers than the original register, we must
3700 return a negative offset so that we find the proper highpart
3703 We assume that the ordering of registers within a multi-register
3704 value has a consistent endianness: if bytes and register words
3705 have different endianness, the hard registers that make up a
3706 multi-register value must be at least word-sized. */
3707 if (REG_WORDS_BIG_ENDIAN
)
3708 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3711 info
->nregs
= nregs_ymode
;
3715 /* If registers store different numbers of bits in the different
3716 modes, we cannot generally form this subreg. */
3717 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3718 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3719 && (xsize
% nregs_xmode
) == 0
3720 && (ysize
% nregs_ymode
) == 0)
3722 int regsize_xmode
= xsize
/ nregs_xmode
;
3723 int regsize_ymode
= ysize
/ nregs_ymode
;
3725 && ((nregs_ymode
> 1 && regsize_xmode
> regsize_ymode
)
3726 || (nregs_xmode
> 1 && regsize_ymode
> regsize_xmode
)))
3728 info
->representable_p
= false;
3729 info
->nregs
= CEIL (ysize
, regsize_xmode
);
3730 info
->offset
= offset
/ regsize_xmode
;
3733 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3734 would go outside of XMODE. */
3735 if (!rknown
&& ysize
+ offset
> xsize
)
3737 info
->representable_p
= false;
3738 info
->nregs
= nregs_ymode
;
3739 info
->offset
= offset
/ regsize_xmode
;
3742 /* Quick exit for the simple and common case of extracting whole
3743 subregisters from a multiregister value. */
3744 /* ??? It would be better to integrate this into the code below,
3745 if we can generalize the concept enough and figure out how
3746 odd-sized modes can coexist with the other weird cases we support. */
3748 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3749 && regsize_xmode
== regsize_ymode
3750 && (offset
% regsize_ymode
) == 0)
3752 info
->representable_p
= true;
3753 info
->nregs
= nregs_ymode
;
3754 info
->offset
= offset
/ regsize_ymode
;
3755 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3760 /* Lowpart subregs are otherwise valid. */
3761 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3763 info
->representable_p
= true;
3766 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3769 info
->nregs
= nregs_ymode
;
3774 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3775 values there are in (reg:XMODE XREGNO). We can view the register
3776 as consisting of this number of independent "blocks", where each
3777 block occupies NREGS_YMODE registers and contains exactly one
3778 representable YMODE value. */
3779 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3780 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3782 /* Calculate the number of bytes in each block. This must always
3783 be exact, otherwise we don't know how to verify the constraint.
3784 These conditions may be relaxed but subreg_regno_offset would
3785 need to be redesigned. */
3786 gcc_assert ((xsize
% num_blocks
) == 0);
3787 unsigned int bytes_per_block
= xsize
/ num_blocks
;
3789 /* Get the number of the first block that contains the subreg and the byte
3790 offset of the subreg from the start of that block. */
3791 unsigned int block_number
= offset
/ bytes_per_block
;
3792 unsigned int subblock_offset
= offset
% bytes_per_block
;
3796 /* Only the lowpart of each block is representable. */
3797 info
->representable_p
3799 == subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3803 /* We assume that the ordering of registers within a multi-register
3804 value has a consistent endianness: if bytes and register words
3805 have different endianness, the hard registers that make up a
3806 multi-register value must be at least word-sized. */
3807 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3808 /* The block number we calculated above followed memory endianness.
3809 Convert it to register endianness by counting back from the end.
3810 (Note that, because of the assumption above, each block must be
3811 at least word-sized.) */
3812 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3814 info
->offset
= block_number
* nregs_ymode
;
3815 info
->nregs
= nregs_ymode
;
3818 /* This function returns the regno offset of a subreg expression.
3819 xregno - A regno of an inner hard subreg_reg (or what will become one).
3820 xmode - The mode of xregno.
3821 offset - The byte offset.
3822 ymode - The mode of a top level SUBREG (or what may become one).
3823 RETURN - The regno offset which would be used. */
3825 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3826 unsigned int offset
, machine_mode ymode
)
3828 struct subreg_info info
;
3829 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3833 /* This function returns true when the offset is representable via
3834 subreg_offset in the given regno.
3835 xregno - A regno of an inner hard subreg_reg (or what will become one).
3836 xmode - The mode of xregno.
3837 offset - The byte offset.
3838 ymode - The mode of a top level SUBREG (or what may become one).
3839 RETURN - Whether the offset is representable. */
3841 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3842 unsigned int offset
, machine_mode ymode
)
3844 struct subreg_info info
;
3845 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3846 return info
.representable_p
;
3849 /* Return the number of a YMODE register to which
3851 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3853 can be simplified. Return -1 if the subreg can't be simplified.
3855 XREGNO is a hard register number. */
3858 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3859 unsigned int offset
, machine_mode ymode
)
3861 struct subreg_info info
;
3862 unsigned int yregno
;
3864 /* Give the backend a chance to disallow the mode change. */
3865 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3866 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3867 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3868 /* We can use mode change in LRA for some transformations. */
3869 && ! lra_in_progress
)
3872 /* We shouldn't simplify stack-related registers. */
3873 if ((!reload_completed
|| frame_pointer_needed
)
3874 && xregno
== FRAME_POINTER_REGNUM
)
3877 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3878 && xregno
== ARG_POINTER_REGNUM
)
3881 if (xregno
== STACK_POINTER_REGNUM
3882 /* We should convert hard stack register in LRA if it is
3884 && ! lra_in_progress
)
3887 /* Try to get the register offset. */
3888 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3889 if (!info
.representable_p
)
3892 /* Make sure that the offsetted register value is in range. */
3893 yregno
= xregno
+ info
.offset
;
3894 if (!HARD_REGISTER_NUM_P (yregno
))
3897 /* See whether (reg:YMODE YREGNO) is valid.
3899 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3900 This is a kludge to work around how complex FP arguments are passed
3901 on IA-64 and should be fixed. See PR target/49226. */
3902 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
3903 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
3906 return (int) yregno
;
3909 /* Return the final regno that a subreg expression refers to. */
3911 subreg_regno (const_rtx x
)
3914 rtx subreg
= SUBREG_REG (x
);
3915 int regno
= REGNO (subreg
);
3917 ret
= regno
+ subreg_regno_offset (regno
,
3925 /* Return the number of registers that a subreg expression refers
3928 subreg_nregs (const_rtx x
)
3930 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3933 /* Return the number of registers that a subreg REG with REGNO
3934 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3935 changed so that the regno can be passed in. */
3938 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3940 struct subreg_info info
;
3941 rtx subreg
= SUBREG_REG (x
);
3943 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3948 struct parms_set_data
3954 /* Helper function for noticing stores to parameter registers. */
3956 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3958 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3959 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3960 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3962 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3967 /* Look backward for first parameter to be loaded.
3968 Note that loads of all parameters will not necessarily be
3969 found if CSE has eliminated some of them (e.g., an argument
3970 to the outer function is passed down as a parameter).
3971 Do not skip BOUNDARY. */
3973 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3975 struct parms_set_data parm
;
3977 rtx_insn
*before
, *first_set
;
3979 /* Since different machines initialize their parameter registers
3980 in different orders, assume nothing. Collect the set of all
3981 parameter registers. */
3982 CLEAR_HARD_REG_SET (parm
.regs
);
3984 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3985 if (GET_CODE (XEXP (p
, 0)) == USE
3986 && REG_P (XEXP (XEXP (p
, 0), 0))
3987 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
3989 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3991 /* We only care about registers which can hold function
3993 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3996 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4000 first_set
= call_insn
;
4002 /* Search backward for the first set of a register in this set. */
4003 while (parm
.nregs
&& before
!= boundary
)
4005 before
= PREV_INSN (before
);
4007 /* It is possible that some loads got CSEed from one call to
4008 another. Stop in that case. */
4009 if (CALL_P (before
))
4012 /* Our caller needs either ensure that we will find all sets
4013 (in case code has not been optimized yet), or take care
4014 for possible labels in a way by setting boundary to preceding
4016 if (LABEL_P (before
))
4018 gcc_assert (before
== boundary
);
4022 if (INSN_P (before
))
4024 int nregs_old
= parm
.nregs
;
4025 note_stores (PATTERN (before
), parms_set
, &parm
);
4026 /* If we found something that did not set a parameter reg,
4027 we're done. Do not keep going, as that might result
4028 in hoisting an insn before the setting of a pseudo
4029 that is used by the hoisted insn. */
4030 if (nregs_old
!= parm
.nregs
)
4039 /* Return true if we should avoid inserting code between INSN and preceding
4040 call instruction. */
4043 keep_with_call_p (const rtx_insn
*insn
)
4047 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4049 if (REG_P (SET_DEST (set
))
4050 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4051 && fixed_regs
[REGNO (SET_DEST (set
))]
4052 && general_operand (SET_SRC (set
), VOIDmode
))
4054 if (REG_P (SET_SRC (set
))
4055 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4056 && REG_P (SET_DEST (set
))
4057 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4059 /* There may be a stack pop just after the call and before the store
4060 of the return register. Search for the actual store when deciding
4061 if we can break or not. */
4062 if (SET_DEST (set
) == stack_pointer_rtx
)
4064 /* This CONST_CAST is okay because next_nonnote_insn just
4065 returns its argument and we assign it to a const_rtx
4068 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4069 if (i2
&& keep_with_call_p (i2
))
4076 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4077 to non-complex jumps. That is, direct unconditional, conditional,
4078 and tablejumps, but not computed jumps or returns. It also does
4079 not apply to the fallthru case of a conditional jump. */
4082 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4084 rtx tmp
= JUMP_LABEL (jump_insn
);
4085 rtx_jump_table_data
*table
;
4090 if (tablejump_p (jump_insn
, NULL
, &table
))
4092 rtvec vec
= table
->get_labels ();
4093 int i
, veclen
= GET_NUM_ELEM (vec
);
4095 for (i
= 0; i
< veclen
; ++i
)
4096 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4100 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4107 /* Return an estimate of the cost of computing rtx X.
4108 One use is in cse, to decide which expression to keep in the hash table.
4109 Another is in rtl generation, to pick the cheapest way to multiply.
4110 Other uses like the latter are expected in the future.
4112 X appears as operand OPNO in an expression with code OUTER_CODE.
4113 SPEED specifies whether costs optimized for speed or size should
4117 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4118 int opno
, bool speed
)
4129 if (GET_MODE (x
) != VOIDmode
)
4130 mode
= GET_MODE (x
);
4132 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4133 many insns, taking N times as long. */
4134 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4138 /* Compute the default costs of certain things.
4139 Note that targetm.rtx_costs can override the defaults. */
4141 code
= GET_CODE (x
);
4145 /* Multiplication has time-complexity O(N*N), where N is the
4146 number of units (translated from digits) when using
4147 schoolbook long multiplication. */
4148 total
= factor
* factor
* COSTS_N_INSNS (5);
4154 /* Similarly, complexity for schoolbook long division. */
4155 total
= factor
* factor
* COSTS_N_INSNS (7);
4158 /* Used in combine.c as a marker. */
4162 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4163 the mode for the factor. */
4164 mode
= GET_MODE (SET_DEST (x
));
4165 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4170 total
= factor
* COSTS_N_INSNS (1);
4180 /* If we can't tie these modes, make this expensive. The larger
4181 the mode, the more expensive it is. */
4182 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4183 return COSTS_N_INSNS (2 + factor
);
4187 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4194 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4199 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4200 which is already in total. */
4202 fmt
= GET_RTX_FORMAT (code
);
4203 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4205 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4206 else if (fmt
[i
] == 'E')
4207 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4208 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4213 /* Fill in the structure C with information about both speed and size rtx
4214 costs for X, which is operand OPNO in an expression with code OUTER. */
4217 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4218 struct full_rtx_costs
*c
)
4220 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4221 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4225 /* Return cost of address expression X.
4226 Expect that X is properly formed address reference.
4228 SPEED parameter specify whether costs optimized for speed or size should
4232 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4234 /* We may be asked for cost of various unusual addresses, such as operands
4235 of push instruction. It is not worthwhile to complicate writing
4236 of the target hook by such cases. */
4238 if (!memory_address_addr_space_p (mode
, x
, as
))
4241 return targetm
.address_cost (x
, mode
, as
, speed
);
4244 /* If the target doesn't override, compute the cost as with arithmetic. */
4247 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4249 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4253 unsigned HOST_WIDE_INT
4254 nonzero_bits (const_rtx x
, machine_mode mode
)
4256 if (mode
== VOIDmode
)
4257 mode
= GET_MODE (x
);
4258 scalar_int_mode int_mode
;
4259 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4260 return GET_MODE_MASK (mode
);
4261 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4265 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4267 if (mode
== VOIDmode
)
4268 mode
= GET_MODE (x
);
4269 scalar_int_mode int_mode
;
4270 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4272 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4275 /* Return true if nonzero_bits1 might recurse into both operands
4279 nonzero_bits_binary_arith_p (const_rtx x
)
4281 if (!ARITHMETIC_P (x
))
4283 switch (GET_CODE (x
))
4305 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4306 It avoids exponential behavior in nonzero_bits1 when X has
4307 identical subexpressions on the first or the second level. */
4309 static unsigned HOST_WIDE_INT
4310 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4311 machine_mode known_mode
,
4312 unsigned HOST_WIDE_INT known_ret
)
4314 if (x
== known_x
&& mode
== known_mode
)
4317 /* Try to find identical subexpressions. If found call
4318 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4319 precomputed value for the subexpression as KNOWN_RET. */
4321 if (nonzero_bits_binary_arith_p (x
))
4323 rtx x0
= XEXP (x
, 0);
4324 rtx x1
= XEXP (x
, 1);
4326 /* Check the first level. */
4328 return nonzero_bits1 (x
, mode
, x0
, mode
,
4329 cached_nonzero_bits (x0
, mode
, known_x
,
4330 known_mode
, known_ret
));
4332 /* Check the second level. */
4333 if (nonzero_bits_binary_arith_p (x0
)
4334 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4335 return nonzero_bits1 (x
, mode
, x1
, mode
,
4336 cached_nonzero_bits (x1
, mode
, known_x
,
4337 known_mode
, known_ret
));
4339 if (nonzero_bits_binary_arith_p (x1
)
4340 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4341 return nonzero_bits1 (x
, mode
, x0
, mode
,
4342 cached_nonzero_bits (x0
, mode
, known_x
,
4343 known_mode
, known_ret
));
4346 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4349 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4350 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4351 is less useful. We can't allow both, because that results in exponential
4352 run time recursion. There is a nullstone testcase that triggered
4353 this. This macro avoids accidental uses of num_sign_bit_copies. */
4354 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4356 /* Given an expression, X, compute which bits in X can be nonzero.
4357 We don't care about bits outside of those defined in MODE.
4359 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4360 an arithmetic operation, we can do better. */
4362 static unsigned HOST_WIDE_INT
4363 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4364 machine_mode known_mode
,
4365 unsigned HOST_WIDE_INT known_ret
)
4367 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4368 unsigned HOST_WIDE_INT inner_nz
;
4370 machine_mode inner_mode
;
4371 scalar_int_mode xmode
;
4373 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4375 if (CONST_INT_P (x
))
4377 if (SHORT_IMMEDIATES_SIGN_EXTEND
4379 && mode_width
< BITS_PER_WORD
4380 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4381 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4386 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4388 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4390 /* If X is wider than MODE, use its mode instead. */
4391 if (xmode_width
> mode_width
)
4394 nonzero
= GET_MODE_MASK (mode
);
4395 mode_width
= xmode_width
;
4398 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4399 /* Our only callers in this case look for single bit values. So
4400 just return the mode mask. Those tests will then be false. */
4403 /* If MODE is wider than X, but both are a single word for both the host
4404 and target machines, we can compute this from which bits of the
4405 object might be nonzero in its own mode, taking into account the fact
4406 that on many CISC machines, accessing an object in a wider mode
4407 causes the high-order bits to become undefined. So they are
4408 not known to be zero. */
4410 if (!WORD_REGISTER_OPERATIONS
4411 && mode_width
> xmode_width
4412 && xmode_width
<= BITS_PER_WORD
4413 && xmode_width
<= HOST_BITS_PER_WIDE_INT
)
4415 nonzero
&= cached_nonzero_bits (x
, xmode
,
4416 known_x
, known_mode
, known_ret
);
4417 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4421 /* Please keep nonzero_bits_binary_arith_p above in sync with
4422 the code in the switch below. */
4423 code
= GET_CODE (x
);
4427 #if defined(POINTERS_EXTEND_UNSIGNED)
4428 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4429 all the bits above ptr_mode are known to be zero. */
4430 /* As we do not know which address space the pointer is referring to,
4431 we can do this only if the target does not support different pointer
4432 or address modes depending on the address space. */
4433 if (target_default_pointer_address_modes_p ()
4434 && POINTERS_EXTEND_UNSIGNED
4437 && !targetm
.have_ptr_extend ())
4438 nonzero
&= GET_MODE_MASK (ptr_mode
);
4441 /* Include declared information about alignment of pointers. */
4442 /* ??? We don't properly preserve REG_POINTER changes across
4443 pointer-to-integer casts, so we can't trust it except for
4444 things that we know must be pointers. See execute/960116-1.c. */
4445 if ((x
== stack_pointer_rtx
4446 || x
== frame_pointer_rtx
4447 || x
== arg_pointer_rtx
)
4448 && REGNO_POINTER_ALIGN (REGNO (x
)))
4450 unsigned HOST_WIDE_INT alignment
4451 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4453 #ifdef PUSH_ROUNDING
4454 /* If PUSH_ROUNDING is defined, it is possible for the
4455 stack to be momentarily aligned only to that amount,
4456 so we pick the least alignment. */
4457 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4458 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4462 nonzero
&= ~(alignment
- 1);
4466 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4467 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4471 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4472 known_mode
, known_ret
);
4474 return nonzero_for_hook
;
4478 /* In many, if not most, RISC machines, reading a byte from memory
4479 zeros the rest of the register. Noticing that fact saves a lot
4480 of extra zero-extends. */
4481 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4482 nonzero
&= GET_MODE_MASK (xmode
);
4486 case UNEQ
: case LTGT
:
4487 case GT
: case GTU
: case UNGT
:
4488 case LT
: case LTU
: case UNLT
:
4489 case GE
: case GEU
: case UNGE
:
4490 case LE
: case LEU
: case UNLE
:
4491 case UNORDERED
: case ORDERED
:
4492 /* If this produces an integer result, we know which bits are set.
4493 Code here used to clear bits outside the mode of X, but that is
4495 /* Mind that MODE is the mode the caller wants to look at this
4496 operation in, and not the actual operation mode. We can wind
4497 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4498 that describes the results of a vector compare. */
4499 if (GET_MODE_CLASS (xmode
) == MODE_INT
4500 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4501 nonzero
= STORE_FLAG_VALUE
;
4506 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4507 and num_sign_bit_copies. */
4508 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4512 if (xmode_width
< mode_width
)
4513 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4518 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4519 and num_sign_bit_copies. */
4520 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4526 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4527 known_x
, known_mode
, known_ret
)
4528 & GET_MODE_MASK (mode
));
4532 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4533 known_x
, known_mode
, known_ret
);
4534 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4535 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4539 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4540 Otherwise, show all the bits in the outer mode but not the inner
4542 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4543 known_x
, known_mode
, known_ret
);
4544 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4546 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4547 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4548 inner_nz
|= (GET_MODE_MASK (mode
)
4549 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4552 nonzero
&= inner_nz
;
4556 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4557 known_x
, known_mode
, known_ret
)
4558 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4559 known_x
, known_mode
, known_ret
);
4563 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4565 unsigned HOST_WIDE_INT nonzero0
4566 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4567 known_x
, known_mode
, known_ret
);
4569 /* Don't call nonzero_bits for the second time if it cannot change
4571 if ((nonzero
& nonzero0
) != nonzero
)
4573 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4574 known_x
, known_mode
, known_ret
);
4578 case PLUS
: case MINUS
:
4580 case DIV
: case UDIV
:
4581 case MOD
: case UMOD
:
4582 /* We can apply the rules of arithmetic to compute the number of
4583 high- and low-order zero bits of these operations. We start by
4584 computing the width (position of the highest-order nonzero bit)
4585 and the number of low-order zero bits for each value. */
4587 unsigned HOST_WIDE_INT nz0
4588 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4589 known_x
, known_mode
, known_ret
);
4590 unsigned HOST_WIDE_INT nz1
4591 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4592 known_x
, known_mode
, known_ret
);
4593 int sign_index
= xmode_width
- 1;
4594 int width0
= floor_log2 (nz0
) + 1;
4595 int width1
= floor_log2 (nz1
) + 1;
4596 int low0
= ctz_or_zero (nz0
);
4597 int low1
= ctz_or_zero (nz1
);
4598 unsigned HOST_WIDE_INT op0_maybe_minusp
4599 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4600 unsigned HOST_WIDE_INT op1_maybe_minusp
4601 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4602 unsigned int result_width
= mode_width
;
4608 result_width
= MAX (width0
, width1
) + 1;
4609 result_low
= MIN (low0
, low1
);
4612 result_low
= MIN (low0
, low1
);
4615 result_width
= width0
+ width1
;
4616 result_low
= low0
+ low1
;
4621 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4622 result_width
= width0
;
4627 result_width
= width0
;
4632 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4633 result_width
= MIN (width0
, width1
);
4634 result_low
= MIN (low0
, low1
);
4639 result_width
= MIN (width0
, width1
);
4640 result_low
= MIN (low0
, low1
);
4646 if (result_width
< mode_width
)
4647 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4650 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4655 if (CONST_INT_P (XEXP (x
, 1))
4656 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4657 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4661 /* If this is a SUBREG formed for a promoted variable that has
4662 been zero-extended, we know that at least the high-order bits
4663 are zero, though others might be too. */
4664 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4665 nonzero
= GET_MODE_MASK (xmode
)
4666 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
4667 known_x
, known_mode
, known_ret
);
4669 /* If the inner mode is a single word for both the host and target
4670 machines, we can compute this from which bits of the inner
4671 object might be nonzero. */
4672 inner_mode
= GET_MODE (SUBREG_REG (x
));
4673 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4674 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
)
4676 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4677 known_x
, known_mode
, known_ret
);
4679 /* On many CISC machines, accessing an object in a wider mode
4680 causes the high-order bits to become undefined. So they are
4681 not known to be zero. */
4683 if ((!WORD_REGISTER_OPERATIONS
4684 /* If this is a typical RISC machine, we only have to worry
4685 about the way loads are extended. */
4686 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4687 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4688 : extend_op
!= ZERO_EXTEND
)
4689 || (!MEM_P (SUBREG_REG (x
)) && !REG_P (SUBREG_REG (x
))))
4690 && xmode_width
> GET_MODE_PRECISION (inner_mode
))
4691 nonzero
|= (GET_MODE_MASK (xmode
) & ~GET_MODE_MASK (inner_mode
));
4699 /* The nonzero bits are in two classes: any bits within MODE
4700 that aren't in xmode are always significant. The rest of the
4701 nonzero bits are those that are significant in the operand of
4702 the shift when shifted the appropriate number of bits. This
4703 shows that high-order bits are cleared by the right shift and
4704 low-order bits by left shifts. */
4705 if (CONST_INT_P (XEXP (x
, 1))
4706 && INTVAL (XEXP (x
, 1)) >= 0
4707 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4708 && INTVAL (XEXP (x
, 1)) < xmode_width
)
4710 int count
= INTVAL (XEXP (x
, 1));
4711 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
4712 unsigned HOST_WIDE_INT op_nonzero
4713 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4714 known_x
, known_mode
, known_ret
);
4715 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4716 unsigned HOST_WIDE_INT outer
= 0;
4718 if (mode_width
> xmode_width
)
4719 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4721 if (code
== LSHIFTRT
)
4723 else if (code
== ASHIFTRT
)
4727 /* If the sign bit may have been nonzero before the shift, we
4728 need to mark all the places it could have been copied to
4729 by the shift as possibly nonzero. */
4730 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
4731 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
4732 << (xmode_width
- count
));
4734 else if (code
== ASHIFT
)
4737 inner
= ((inner
<< (count
% xmode_width
)
4738 | (inner
>> (xmode_width
- (count
% xmode_width
))))
4741 nonzero
&= (outer
| inner
);
4747 /* This is at most the number of bits in the mode. */
4748 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4752 /* If CLZ has a known value at zero, then the nonzero bits are
4753 that value, plus the number of bits in the mode minus one. */
4754 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4756 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4762 /* If CTZ has a known value at zero, then the nonzero bits are
4763 that value, plus the number of bits in the mode minus one. */
4764 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4766 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4772 /* This is at most the number of bits in the mode minus 1. */
4773 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4782 unsigned HOST_WIDE_INT nonzero_true
4783 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4784 known_x
, known_mode
, known_ret
);
4786 /* Don't call nonzero_bits for the second time if it cannot change
4788 if ((nonzero
& nonzero_true
) != nonzero
)
4789 nonzero
&= nonzero_true
4790 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4791 known_x
, known_mode
, known_ret
);
4802 /* See the macro definition above. */
4803 #undef cached_num_sign_bit_copies
4806 /* Return true if num_sign_bit_copies1 might recurse into both operands
4810 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4812 if (!ARITHMETIC_P (x
))
4814 switch (GET_CODE (x
))
4832 /* The function cached_num_sign_bit_copies is a wrapper around
4833 num_sign_bit_copies1. It avoids exponential behavior in
4834 num_sign_bit_copies1 when X has identical subexpressions on the
4835 first or the second level. */
4838 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
4839 const_rtx known_x
, machine_mode known_mode
,
4840 unsigned int known_ret
)
4842 if (x
== known_x
&& mode
== known_mode
)
4845 /* Try to find identical subexpressions. If found call
4846 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4847 the precomputed value for the subexpression as KNOWN_RET. */
4849 if (num_sign_bit_copies_binary_arith_p (x
))
4851 rtx x0
= XEXP (x
, 0);
4852 rtx x1
= XEXP (x
, 1);
4854 /* Check the first level. */
4857 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4858 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4862 /* Check the second level. */
4863 if (num_sign_bit_copies_binary_arith_p (x0
)
4864 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4866 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4867 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4871 if (num_sign_bit_copies_binary_arith_p (x1
)
4872 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4874 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4875 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4880 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4883 /* Return the number of bits at the high-order end of X that are known to
4884 be equal to the sign bit. X will be used in mode MODE. The returned
4885 value will always be between 1 and the number of bits in MODE. */
4888 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4889 machine_mode known_mode
,
4890 unsigned int known_ret
)
4892 enum rtx_code code
= GET_CODE (x
);
4893 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4894 int num0
, num1
, result
;
4895 unsigned HOST_WIDE_INT nonzero
;
4897 if (CONST_INT_P (x
))
4899 /* If the constant is negative, take its 1's complement and remask.
4900 Then see how many zero bits we have. */
4901 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4902 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4903 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4904 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4906 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4909 scalar_int_mode xmode
, inner_mode
;
4910 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4913 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4915 /* For a smaller mode, just ignore the high bits. */
4916 if (bitwidth
< xmode_width
)
4918 num0
= cached_num_sign_bit_copies (x
, xmode
,
4919 known_x
, known_mode
, known_ret
);
4920 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
4923 if (bitwidth
> xmode_width
)
4925 /* If this machine does not do all register operations on the entire
4926 register and MODE is wider than the mode of X, we can say nothing
4927 at all about the high-order bits. */
4928 if (!WORD_REGISTER_OPERATIONS
)
4931 /* Likewise on machines that do, if the mode of the object is smaller
4932 than a word and loads of that size don't sign extend, we can say
4933 nothing about the high order bits. */
4934 if (xmode_width
< BITS_PER_WORD
4935 && load_extend_op (xmode
) != SIGN_EXTEND
)
4939 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4940 the code in the switch below. */
4945 #if defined(POINTERS_EXTEND_UNSIGNED)
4946 /* If pointers extend signed and this is a pointer in Pmode, say that
4947 all the bits above ptr_mode are known to be sign bit copies. */
4948 /* As we do not know which address space the pointer is referring to,
4949 we can do this only if the target does not support different pointer
4950 or address modes depending on the address space. */
4951 if (target_default_pointer_address_modes_p ()
4952 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
4953 && mode
== Pmode
&& REG_POINTER (x
)
4954 && !targetm
.have_ptr_extend ())
4955 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4959 unsigned int copies_for_hook
= 1, copies
= 1;
4960 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
4964 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4965 known_mode
, known_ret
);
4967 if (copies
> 1 || copies_for_hook
> 1)
4968 return MAX (copies
, copies_for_hook
);
4970 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4975 /* Some RISC machines sign-extend all loads of smaller than a word. */
4976 if (load_extend_op (xmode
) == SIGN_EXTEND
)
4977 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
4981 /* If this is a SUBREG for a promoted object that is sign-extended
4982 and we are looking at it in a wider mode, we know that at least the
4983 high-order bits are known to be sign bit copies. */
4985 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4987 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4988 known_x
, known_mode
, known_ret
);
4989 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
4992 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
4994 /* For a smaller object, just ignore the high bits. */
4995 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
4997 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
4998 known_x
, known_mode
,
5000 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5004 /* For paradoxical SUBREGs on machines where all register operations
5005 affect the entire register, just look inside. Note that we are
5006 passing MODE to the recursive call, so the number of sign bit
5007 copies will remain relative to that mode, not the inner mode. */
5009 /* This works only if loads sign extend. Otherwise, if we get a
5010 reload for the inner part, it may be loaded from the stack, and
5011 then we lose all sign bit copies that existed before the store
5014 if (WORD_REGISTER_OPERATIONS
5015 && load_extend_op (inner_mode
) == SIGN_EXTEND
5016 && paradoxical_subreg_p (x
)
5017 && (MEM_P (SUBREG_REG (x
)) || REG_P (SUBREG_REG (x
))))
5018 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5019 known_x
, known_mode
, known_ret
);
5024 if (CONST_INT_P (XEXP (x
, 1)))
5025 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5029 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5030 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5031 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5032 known_x
, known_mode
, known_ret
));
5036 /* For a smaller object, just ignore the high bits. */
5037 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5038 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5039 known_x
, known_mode
, known_ret
);
5040 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5044 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5045 known_x
, known_mode
, known_ret
);
5047 case ROTATE
: case ROTATERT
:
5048 /* If we are rotating left by a number of bits less than the number
5049 of sign bit copies, we can just subtract that amount from the
5051 if (CONST_INT_P (XEXP (x
, 1))
5052 && INTVAL (XEXP (x
, 1)) >= 0
5053 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5055 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5056 known_x
, known_mode
, known_ret
);
5057 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5058 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5063 /* In general, this subtracts one sign bit copy. But if the value
5064 is known to be positive, the number of sign bit copies is the
5065 same as that of the input. Finally, if the input has just one bit
5066 that might be nonzero, all the bits are copies of the sign bit. */
5067 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5068 known_x
, known_mode
, known_ret
);
5069 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5070 return num0
> 1 ? num0
- 1 : 1;
5072 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5077 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5082 case IOR
: case AND
: case XOR
:
5083 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5084 /* Logical operations will preserve the number of sign-bit copies.
5085 MIN and MAX operations always return one of the operands. */
5086 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5087 known_x
, known_mode
, known_ret
);
5088 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5089 known_x
, known_mode
, known_ret
);
5091 /* If num1 is clearing some of the top bits then regardless of
5092 the other term, we are guaranteed to have at least that many
5093 high-order zero bits. */
5096 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5097 && CONST_INT_P (XEXP (x
, 1))
5098 && (UINTVAL (XEXP (x
, 1))
5099 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5102 /* Similarly for IOR when setting high-order bits. */
5105 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5106 && CONST_INT_P (XEXP (x
, 1))
5107 && (UINTVAL (XEXP (x
, 1))
5108 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5111 return MIN (num0
, num1
);
5113 case PLUS
: case MINUS
:
5114 /* For addition and subtraction, we can have a 1-bit carry. However,
5115 if we are subtracting 1 from a positive number, there will not
5116 be such a carry. Furthermore, if the positive number is known to
5117 be 0 or 1, we know the result is either -1 or 0. */
5119 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5120 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5122 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5123 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5124 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5125 : bitwidth
- floor_log2 (nonzero
) - 1);
5128 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5129 known_x
, known_mode
, known_ret
);
5130 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5131 known_x
, known_mode
, known_ret
);
5132 result
= MAX (1, MIN (num0
, num1
) - 1);
5137 /* The number of bits of the product is the sum of the number of
5138 bits of both terms. However, unless one of the terms if known
5139 to be positive, we must allow for an additional bit since negating
5140 a negative number can remove one sign bit copy. */
5142 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5143 known_x
, known_mode
, known_ret
);
5144 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5145 known_x
, known_mode
, known_ret
);
5147 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5149 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5150 || (((nonzero_bits (XEXP (x
, 0), mode
)
5151 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5152 && ((nonzero_bits (XEXP (x
, 1), mode
)
5153 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5157 return MAX (1, result
);
5160 /* The result must be <= the first operand. If the first operand
5161 has the high bit set, we know nothing about the number of sign
5163 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5165 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5166 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5169 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5170 known_x
, known_mode
, known_ret
);
5173 /* The result must be <= the second operand. If the second operand
5174 has (or just might have) the high bit set, we know nothing about
5175 the number of sign bit copies. */
5176 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5178 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5179 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5182 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5183 known_x
, known_mode
, known_ret
);
5186 /* Similar to unsigned division, except that we have to worry about
5187 the case where the divisor is negative, in which case we have
5189 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5190 known_x
, known_mode
, known_ret
);
5192 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5193 || (nonzero_bits (XEXP (x
, 1), mode
)
5194 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5200 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5201 known_x
, known_mode
, known_ret
);
5203 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5204 || (nonzero_bits (XEXP (x
, 1), mode
)
5205 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5211 /* Shifts by a constant add to the number of bits equal to the
5213 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5214 known_x
, known_mode
, known_ret
);
5215 if (CONST_INT_P (XEXP (x
, 1))
5216 && INTVAL (XEXP (x
, 1)) > 0
5217 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5218 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5223 /* Left shifts destroy copies. */
5224 if (!CONST_INT_P (XEXP (x
, 1))
5225 || INTVAL (XEXP (x
, 1)) < 0
5226 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5227 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5230 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5231 known_x
, known_mode
, known_ret
);
5232 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5235 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5236 known_x
, known_mode
, known_ret
);
5237 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5238 known_x
, known_mode
, known_ret
);
5239 return MIN (num0
, num1
);
5241 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5242 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5243 case GEU
: case GTU
: case LEU
: case LTU
:
5244 case UNORDERED
: case ORDERED
:
5245 /* If the constant is negative, take its 1's complement and remask.
5246 Then see how many zero bits we have. */
5247 nonzero
= STORE_FLAG_VALUE
;
5248 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5249 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5250 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5252 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5258 /* If we haven't been able to figure it out by one of the above rules,
5259 see if some of the high-order bits are known to be zero. If so,
5260 count those bits and return one less than that amount. If we can't
5261 safely compute the mask for this mode, always return BITWIDTH. */
5263 bitwidth
= GET_MODE_PRECISION (mode
);
5264 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5267 nonzero
= nonzero_bits (x
, mode
);
5268 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5269 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5272 /* Calculate the rtx_cost of a single instruction. A return value of
5273 zero indicates an instruction pattern without a known cost. */
5276 insn_rtx_cost (rtx pat
, bool speed
)
5281 /* Extract the single set rtx from the instruction pattern. We
5282 can't use single_set since we only have the pattern. We also
5283 consider PARALLELs of a normal set and a single comparison. In
5284 that case we use the cost of the non-comparison SET operation,
5285 which is most-likely to be the real cost of this operation. */
5286 if (GET_CODE (pat
) == SET
)
5288 else if (GET_CODE (pat
) == PARALLEL
)
5291 rtx comparison
= NULL_RTX
;
5293 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5295 rtx x
= XVECEXP (pat
, 0, i
);
5296 if (GET_CODE (x
) == SET
)
5298 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5313 if (!set
&& comparison
)
5322 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5323 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5326 /* Returns estimate on cost of computing SEQ. */
5329 seq_cost (const rtx_insn
*seq
, bool speed
)
5334 for (; seq
; seq
= NEXT_INSN (seq
))
5336 set
= single_set (seq
);
5338 cost
+= set_rtx_cost (set
, speed
);
5346 /* Given an insn INSN and condition COND, return the condition in a
5347 canonical form to simplify testing by callers. Specifically:
5349 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5350 (2) Both operands will be machine operands; (cc0) will have been replaced.
5351 (3) If an operand is a constant, it will be the second operand.
5352 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5353 for GE, GEU, and LEU.
5355 If the condition cannot be understood, or is an inequality floating-point
5356 comparison which needs to be reversed, 0 will be returned.
5358 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5360 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5361 insn used in locating the condition was found. If a replacement test
5362 of the condition is desired, it should be placed in front of that
5363 insn and we will be sure that the inputs are still valid.
5365 If WANT_REG is nonzero, we wish the condition to be relative to that
5366 register, if possible. Therefore, do not canonicalize the condition
5367 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5368 to be a compare to a CC mode register.
5370 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5374 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5375 rtx_insn
**earliest
,
5376 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5379 rtx_insn
*prev
= insn
;
5383 int reverse_code
= 0;
5385 basic_block bb
= BLOCK_FOR_INSN (insn
);
5387 code
= GET_CODE (cond
);
5388 mode
= GET_MODE (cond
);
5389 op0
= XEXP (cond
, 0);
5390 op1
= XEXP (cond
, 1);
5393 code
= reversed_comparison_code (cond
, insn
);
5394 if (code
== UNKNOWN
)
5400 /* If we are comparing a register with zero, see if the register is set
5401 in the previous insn to a COMPARE or a comparison operation. Perform
5402 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5405 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5406 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5407 && op1
== CONST0_RTX (GET_MODE (op0
))
5410 /* Set nonzero when we find something of interest. */
5413 /* If comparison with cc0, import actual comparison from compare
5417 if ((prev
= prev_nonnote_insn (prev
)) == 0
5418 || !NONJUMP_INSN_P (prev
)
5419 || (set
= single_set (prev
)) == 0
5420 || SET_DEST (set
) != cc0_rtx
)
5423 op0
= SET_SRC (set
);
5424 op1
= CONST0_RTX (GET_MODE (op0
));
5429 /* If this is a COMPARE, pick up the two things being compared. */
5430 if (GET_CODE (op0
) == COMPARE
)
5432 op1
= XEXP (op0
, 1);
5433 op0
= XEXP (op0
, 0);
5436 else if (!REG_P (op0
))
5439 /* Go back to the previous insn. Stop if it is not an INSN. We also
5440 stop if it isn't a single set or if it has a REG_INC note because
5441 we don't want to bother dealing with it. */
5443 prev
= prev_nonnote_nondebug_insn (prev
);
5446 || !NONJUMP_INSN_P (prev
)
5447 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5448 /* In cfglayout mode, there do not have to be labels at the
5449 beginning of a block, or jumps at the end, so the previous
5450 conditions would not stop us when we reach bb boundary. */
5451 || BLOCK_FOR_INSN (prev
) != bb
)
5454 set
= set_of (op0
, prev
);
5457 && (GET_CODE (set
) != SET
5458 || !rtx_equal_p (SET_DEST (set
), op0
)))
5461 /* If this is setting OP0, get what it sets it to if it looks
5465 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5466 #ifdef FLOAT_STORE_FLAG_VALUE
5467 REAL_VALUE_TYPE fsfv
;
5470 /* ??? We may not combine comparisons done in a CCmode with
5471 comparisons not done in a CCmode. This is to aid targets
5472 like Alpha that have an IEEE compliant EQ instruction, and
5473 a non-IEEE compliant BEQ instruction. The use of CCmode is
5474 actually artificial, simply to prevent the combination, but
5475 should not affect other platforms.
5477 However, we must allow VOIDmode comparisons to match either
5478 CCmode or non-CCmode comparison, because some ports have
5479 modeless comparisons inside branch patterns.
5481 ??? This mode check should perhaps look more like the mode check
5482 in simplify_comparison in combine. */
5483 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5484 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5486 && inner_mode
!= VOIDmode
)
5488 if (GET_CODE (SET_SRC (set
)) == COMPARE
5491 && val_signbit_known_set_p (inner_mode
,
5493 #ifdef FLOAT_STORE_FLAG_VALUE
5495 && SCALAR_FLOAT_MODE_P (inner_mode
)
5496 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5497 REAL_VALUE_NEGATIVE (fsfv
)))
5500 && COMPARISON_P (SET_SRC (set
))))
5502 else if (((code
== EQ
5504 && val_signbit_known_set_p (inner_mode
,
5506 #ifdef FLOAT_STORE_FLAG_VALUE
5508 && SCALAR_FLOAT_MODE_P (inner_mode
)
5509 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5510 REAL_VALUE_NEGATIVE (fsfv
)))
5513 && COMPARISON_P (SET_SRC (set
)))
5518 else if ((code
== EQ
|| code
== NE
)
5519 && GET_CODE (SET_SRC (set
)) == XOR
)
5520 /* Handle sequences like:
5523 ...(eq|ne op0 (const_int 0))...
5527 (eq op0 (const_int 0)) reduces to (eq X Y)
5528 (ne op0 (const_int 0)) reduces to (ne X Y)
5530 This is the form used by MIPS16, for example. */
5536 else if (reg_set_p (op0
, prev
))
5537 /* If this sets OP0, but not directly, we have to give up. */
5542 /* If the caller is expecting the condition to be valid at INSN,
5543 make sure X doesn't change before INSN. */
5544 if (valid_at_insn_p
)
5545 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5547 if (COMPARISON_P (x
))
5548 code
= GET_CODE (x
);
5551 code
= reversed_comparison_code (x
, prev
);
5552 if (code
== UNKNOWN
)
5557 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5563 /* If constant is first, put it last. */
5564 if (CONSTANT_P (op0
))
5565 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5567 /* If OP0 is the result of a comparison, we weren't able to find what
5568 was really being compared, so fail. */
5570 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5573 /* Canonicalize any ordered comparison with integers involving equality
5574 if we can do computations in the relevant mode and we do not
5577 scalar_int_mode op0_mode
;
5578 if (CONST_INT_P (op1
)
5579 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
5580 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
5582 HOST_WIDE_INT const_val
= INTVAL (op1
);
5583 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5584 unsigned HOST_WIDE_INT max_val
5585 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
5590 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5591 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
5594 /* When cross-compiling, const_val might be sign-extended from
5595 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5597 if ((const_val
& max_val
)
5598 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
5599 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
5603 if (uconst_val
< max_val
)
5604 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
5608 if (uconst_val
!= 0)
5609 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
5617 /* Never return CC0; return zero instead. */
5621 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5624 /* Given a jump insn JUMP, return the condition that will cause it to branch
5625 to its JUMP_LABEL. If the condition cannot be understood, or is an
5626 inequality floating-point comparison which needs to be reversed, 0 will
5629 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5630 insn used in locating the condition was found. If a replacement test
5631 of the condition is desired, it should be placed in front of that
5632 insn and we will be sure that the inputs are still valid. If EARLIEST
5633 is null, the returned condition will be valid at INSN.
5635 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5636 compare CC mode register.
5638 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5641 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5642 int valid_at_insn_p
)
5648 /* If this is not a standard conditional jump, we can't parse it. */
5650 || ! any_condjump_p (jump
))
5652 set
= pc_set (jump
);
5654 cond
= XEXP (SET_SRC (set
), 0);
5656 /* If this branches to JUMP_LABEL when the condition is false, reverse
5659 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5660 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5662 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5663 allow_cc_mode
, valid_at_insn_p
);
5666 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5667 TARGET_MODE_REP_EXTENDED.
5669 Note that we assume that the property of
5670 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5671 narrower than mode B. I.e., if A is a mode narrower than B then in
5672 order to be able to operate on it in mode B, mode A needs to
5673 satisfy the requirements set by the representation of mode B. */
5676 init_num_sign_bit_copies_in_rep (void)
5678 opt_scalar_int_mode in_mode_iter
;
5679 scalar_int_mode mode
;
5681 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
5682 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
5684 scalar_int_mode in_mode
= in_mode_iter
.require ();
5687 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5688 extends to the next widest mode. */
5689 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5690 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
5692 /* We are in in_mode. Count how many bits outside of mode
5693 have to be copies of the sign-bit. */
5694 FOR_EACH_MODE (i
, mode
, in_mode
)
5696 /* This must always exist (for the last iteration it will be
5698 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
5700 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5701 /* We can only check sign-bit copies starting from the
5702 top-bit. In order to be able to check the bits we
5703 have already seen we pretend that subsequent bits
5704 have to be sign-bit copies too. */
5705 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5706 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5707 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5712 /* Suppose that truncation from the machine mode of X to MODE is not a
5713 no-op. See if there is anything special about X so that we can
5714 assume it already contains a truncated value of MODE. */
5717 truncated_to_mode (machine_mode mode
, const_rtx x
)
5719 /* This register has already been used in MODE without explicit
5721 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5724 /* See if we already satisfy the requirements of MODE. If yes we
5725 can just switch to MODE. */
5726 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5727 && (num_sign_bit_copies (x
, GET_MODE (x
))
5728 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5734 /* Return true if RTX code CODE has a single sequence of zero or more
5735 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5736 entry in that case. */
5739 setup_reg_subrtx_bounds (unsigned int code
)
5741 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5743 for (; format
[i
] != 'e'; ++i
)
5746 /* No subrtxes. Leave start and count as 0. */
5748 if (format
[i
] == 'E' || format
[i
] == 'V')
5752 /* Record the sequence of 'e's. */
5753 rtx_all_subrtx_bounds
[code
].start
= i
;
5756 while (format
[i
] == 'e');
5757 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5758 /* rtl-iter.h relies on this. */
5759 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5761 for (; format
[i
]; ++i
)
5762 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5768 /* Initialize rtx_all_subrtx_bounds. */
5773 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5775 if (!setup_reg_subrtx_bounds (i
))
5776 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5777 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5778 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5781 init_num_sign_bit_copies_in_rep ();
5784 /* Check whether this is a constant pool constant. */
5786 constant_pool_constant_p (rtx x
)
5788 x
= avoid_constant_pool_reference (x
);
5789 return CONST_DOUBLE_P (x
);
5792 /* If M is a bitmask that selects a field of low-order bits within an item but
5793 not the entire word, return the length of the field. Return -1 otherwise.
5794 M is used in machine mode MODE. */
5797 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5799 if (mode
!= VOIDmode
)
5801 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5803 m
&= GET_MODE_MASK (mode
);
5806 return exact_log2 (m
+ 1);
5809 /* Return the mode of MEM's address. */
5812 get_address_mode (rtx mem
)
5816 gcc_assert (MEM_P (mem
));
5817 mode
= GET_MODE (XEXP (mem
, 0));
5818 if (mode
!= VOIDmode
)
5819 return as_a
<scalar_int_mode
> (mode
);
5820 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5823 /* Split up a CONST_DOUBLE or integer constant rtx
5824 into two rtx's for single words,
5825 storing in *FIRST the word that comes first in memory in the target
5826 and in *SECOND the other.
5828 TODO: This function needs to be rewritten to work on any size
5832 split_double (rtx value
, rtx
*first
, rtx
*second
)
5834 if (CONST_INT_P (value
))
5836 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5838 /* In this case the CONST_INT holds both target words.
5839 Extract the bits from it into two word-sized pieces.
5840 Sign extend each half to HOST_WIDE_INT. */
5841 unsigned HOST_WIDE_INT low
, high
;
5842 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5843 unsigned bits_per_word
= BITS_PER_WORD
;
5845 /* Set sign_bit to the most significant bit of a word. */
5847 sign_bit
<<= bits_per_word
- 1;
5849 /* Set mask so that all bits of the word are set. We could
5850 have used 1 << BITS_PER_WORD instead of basing the
5851 calculation on sign_bit. However, on machines where
5852 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5853 compiler warning, even though the code would never be
5855 mask
= sign_bit
<< 1;
5858 /* Set sign_extend as any remaining bits. */
5859 sign_extend
= ~mask
;
5861 /* Pick the lower word and sign-extend it. */
5862 low
= INTVAL (value
);
5867 /* Pick the higher word, shifted to the least significant
5868 bits, and sign-extend it. */
5869 high
= INTVAL (value
);
5870 high
>>= bits_per_word
- 1;
5873 if (high
& sign_bit
)
5874 high
|= sign_extend
;
5876 /* Store the words in the target machine order. */
5877 if (WORDS_BIG_ENDIAN
)
5879 *first
= GEN_INT (high
);
5880 *second
= GEN_INT (low
);
5884 *first
= GEN_INT (low
);
5885 *second
= GEN_INT (high
);
5890 /* The rule for using CONST_INT for a wider mode
5891 is that we regard the value as signed.
5892 So sign-extend it. */
5893 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5894 if (WORDS_BIG_ENDIAN
)
5906 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5908 /* All of this is scary code and needs to be converted to
5909 properly work with any size integer. */
5910 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5911 if (WORDS_BIG_ENDIAN
)
5913 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5914 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5918 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5919 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5922 else if (!CONST_DOUBLE_P (value
))
5924 if (WORDS_BIG_ENDIAN
)
5926 *first
= const0_rtx
;
5932 *second
= const0_rtx
;
5935 else if (GET_MODE (value
) == VOIDmode
5936 /* This is the old way we did CONST_DOUBLE integers. */
5937 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5939 /* In an integer, the words are defined as most and least significant.
5940 So order them by the target's convention. */
5941 if (WORDS_BIG_ENDIAN
)
5943 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5944 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5948 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5949 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5956 /* Note, this converts the REAL_VALUE_TYPE to the target's
5957 format, splits up the floating point double and outputs
5958 exactly 32 bits of it into each of l[0] and l[1] --
5959 not necessarily BITS_PER_WORD bits. */
5960 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
5962 /* If 32 bits is an entire word for the target, but not for the host,
5963 then sign-extend on the host so that the number will look the same
5964 way on the host that it would on the target. See for instance
5965 simplify_unary_operation. The #if is needed to avoid compiler
5968 #if HOST_BITS_PER_LONG > 32
5969 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5971 if (l
[0] & ((long) 1 << 31))
5972 l
[0] |= ((unsigned long) (-1) << 32);
5973 if (l
[1] & ((long) 1 << 31))
5974 l
[1] |= ((unsigned long) (-1) << 32);
5978 *first
= GEN_INT (l
[0]);
5979 *second
= GEN_INT (l
[1]);
5983 /* Return true if X is a sign_extract or zero_extract from the least
5987 lsb_bitfield_op_p (rtx x
)
5989 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5991 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5992 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5993 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5995 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
6000 /* Strip outer address "mutations" from LOC and return a pointer to the
6001 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6002 stripped expression there.
6004 "Mutations" either convert between modes or apply some kind of
6005 extension, truncation or alignment. */
6008 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6012 enum rtx_code code
= GET_CODE (*loc
);
6013 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6014 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6015 used to convert between pointer sizes. */
6016 loc
= &XEXP (*loc
, 0);
6017 else if (lsb_bitfield_op_p (*loc
))
6018 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6019 acts as a combined truncation and extension. */
6020 loc
= &XEXP (*loc
, 0);
6021 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6022 /* (and ... (const_int -X)) is used to align to X bytes. */
6023 loc
= &XEXP (*loc
, 0);
6024 else if (code
== SUBREG
6025 && !OBJECT_P (SUBREG_REG (*loc
))
6026 && subreg_lowpart_p (*loc
))
6027 /* (subreg (operator ...) ...) inside and is used for mode
6029 loc
= &SUBREG_REG (*loc
);
6037 /* Return true if CODE applies some kind of scale. The scaled value is
6038 is the first operand and the scale is the second. */
6041 binary_scale_code_p (enum rtx_code code
)
6043 return (code
== MULT
6045 /* Needed by ARM targets. */
6049 || code
== ROTATERT
);
6052 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6053 (see address_info). Return null otherwise. */
6056 get_base_term (rtx
*inner
)
6058 if (GET_CODE (*inner
) == LO_SUM
)
6059 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6062 || GET_CODE (*inner
) == SUBREG
6063 || GET_CODE (*inner
) == SCRATCH
)
6068 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6069 (see address_info). Return null otherwise. */
6072 get_index_term (rtx
*inner
)
6074 /* At present, only constant scales are allowed. */
6075 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6076 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6079 || GET_CODE (*inner
) == SUBREG
6080 || GET_CODE (*inner
) == SCRATCH
)
6085 /* Set the segment part of address INFO to LOC, given that INNER is the
6089 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6091 gcc_assert (!info
->segment
);
6092 info
->segment
= loc
;
6093 info
->segment_term
= inner
;
6096 /* Set the base part of address INFO to LOC, given that INNER is the
6100 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6102 gcc_assert (!info
->base
);
6104 info
->base_term
= inner
;
6107 /* Set the index part of address INFO to LOC, given that INNER is the
6111 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6113 gcc_assert (!info
->index
);
6115 info
->index_term
= inner
;
6118 /* Set the displacement part of address INFO to LOC, given that INNER
6119 is the constant term. */
6122 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6124 gcc_assert (!info
->disp
);
6126 info
->disp_term
= inner
;
6129 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6130 rest of INFO accordingly. */
6133 decompose_incdec_address (struct address_info
*info
)
6135 info
->autoinc_p
= true;
6137 rtx
*base
= &XEXP (*info
->inner
, 0);
6138 set_address_base (info
, base
, base
);
6139 gcc_checking_assert (info
->base
== info
->base_term
);
6141 /* These addresses are only valid when the size of the addressed
6143 gcc_checking_assert (info
->mode
!= VOIDmode
);
6146 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6147 of INFO accordingly. */
6150 decompose_automod_address (struct address_info
*info
)
6152 info
->autoinc_p
= true;
6154 rtx
*base
= &XEXP (*info
->inner
, 0);
6155 set_address_base (info
, base
, base
);
6156 gcc_checking_assert (info
->base
== info
->base_term
);
6158 rtx plus
= XEXP (*info
->inner
, 1);
6159 gcc_assert (GET_CODE (plus
) == PLUS
);
6161 info
->base_term2
= &XEXP (plus
, 0);
6162 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6164 rtx
*step
= &XEXP (plus
, 1);
6165 rtx
*inner_step
= strip_address_mutations (step
);
6166 if (CONSTANT_P (*inner_step
))
6167 set_address_disp (info
, step
, inner_step
);
6169 set_address_index (info
, step
, inner_step
);
6172 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6173 values in [PTR, END). Return a pointer to the end of the used array. */
6176 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6179 if (GET_CODE (x
) == PLUS
)
6181 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6182 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6186 gcc_assert (ptr
!= end
);
6192 /* Evaluate the likelihood of X being a base or index value, returning
6193 positive if it is likely to be a base, negative if it is likely to be
6194 an index, and 0 if we can't tell. Make the magnitude of the return
6195 value reflect the amount of confidence we have in the answer.
6197 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6200 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6201 enum rtx_code outer_code
, enum rtx_code index_code
)
6203 /* Believe *_POINTER unless the address shape requires otherwise. */
6204 if (REG_P (x
) && REG_POINTER (x
))
6206 if (MEM_P (x
) && MEM_POINTER (x
))
6209 if (REG_P (x
) && HARD_REGISTER_P (x
))
6211 /* X is a hard register. If it only fits one of the base
6212 or index classes, choose that interpretation. */
6213 int regno
= REGNO (x
);
6214 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6215 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6216 if (base_p
!= index_p
)
6217 return base_p
? 1 : -1;
6222 /* INFO->INNER describes a normal, non-automodified address.
6223 Fill in the rest of INFO accordingly. */
6226 decompose_normal_address (struct address_info
*info
)
6228 /* Treat the address as the sum of up to four values. */
6230 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6231 ops
+ ARRAY_SIZE (ops
)) - ops
;
6233 /* If there is more than one component, any base component is in a PLUS. */
6235 info
->base_outer_code
= PLUS
;
6237 /* Try to classify each sum operand now. Leave those that could be
6238 either a base or an index in OPS. */
6241 for (size_t in
= 0; in
< n_ops
; ++in
)
6244 rtx
*inner
= strip_address_mutations (loc
);
6245 if (CONSTANT_P (*inner
))
6246 set_address_disp (info
, loc
, inner
);
6247 else if (GET_CODE (*inner
) == UNSPEC
)
6248 set_address_segment (info
, loc
, inner
);
6251 /* The only other possibilities are a base or an index. */
6252 rtx
*base_term
= get_base_term (inner
);
6253 rtx
*index_term
= get_index_term (inner
);
6254 gcc_assert (base_term
|| index_term
);
6256 set_address_index (info
, loc
, index_term
);
6257 else if (!index_term
)
6258 set_address_base (info
, loc
, base_term
);
6261 gcc_assert (base_term
== index_term
);
6263 inner_ops
[out
] = base_term
;
6269 /* Classify the remaining OPS members as bases and indexes. */
6272 /* If we haven't seen a base or an index yet, assume that this is
6273 the base. If we were confident that another term was the base
6274 or index, treat the remaining operand as the other kind. */
6276 set_address_base (info
, ops
[0], inner_ops
[0]);
6278 set_address_index (info
, ops
[0], inner_ops
[0]);
6282 /* In the event of a tie, assume the base comes first. */
6283 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6285 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6286 GET_CODE (*ops
[0])))
6288 set_address_base (info
, ops
[0], inner_ops
[0]);
6289 set_address_index (info
, ops
[1], inner_ops
[1]);
6293 set_address_base (info
, ops
[1], inner_ops
[1]);
6294 set_address_index (info
, ops
[0], inner_ops
[0]);
6298 gcc_assert (out
== 0);
6301 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6302 or VOIDmode if not known. AS is the address space associated with LOC.
6303 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6306 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6307 addr_space_t as
, enum rtx_code outer_code
)
6309 memset (info
, 0, sizeof (*info
));
6312 info
->addr_outer_code
= outer_code
;
6314 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6315 info
->base_outer_code
= outer_code
;
6316 switch (GET_CODE (*info
->inner
))
6322 decompose_incdec_address (info
);
6327 decompose_automod_address (info
);
6331 decompose_normal_address (info
);
6336 /* Describe address operand LOC in INFO. */
6339 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6341 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6344 /* Describe the address of MEM X in INFO. */
6347 decompose_mem_address (struct address_info
*info
, rtx x
)
6349 gcc_assert (MEM_P (x
));
6350 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6351 MEM_ADDR_SPACE (x
), MEM
);
6354 /* Update INFO after a change to the address it describes. */
6357 update_address (struct address_info
*info
)
6359 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6360 info
->addr_outer_code
);
6363 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6364 more complicated than that. */
6367 get_index_scale (const struct address_info
*info
)
6369 rtx index
= *info
->index
;
6370 if (GET_CODE (index
) == MULT
6371 && CONST_INT_P (XEXP (index
, 1))
6372 && info
->index_term
== &XEXP (index
, 0))
6373 return INTVAL (XEXP (index
, 1));
6375 if (GET_CODE (index
) == ASHIFT
6376 && CONST_INT_P (XEXP (index
, 1))
6377 && info
->index_term
== &XEXP (index
, 0))
6378 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6380 if (info
->index
== info
->index_term
)
6386 /* Return the "index code" of INFO, in the form required by
6390 get_index_code (const struct address_info
*info
)
6393 return GET_CODE (*info
->index
);
6396 return GET_CODE (*info
->disp
);
6401 /* Return true if RTL X contains a SYMBOL_REF. */
6404 contains_symbol_ref_p (const_rtx x
)
6406 subrtx_iterator::array_type array
;
6407 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6408 if (SYMBOL_REF_P (*iter
))
6414 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6417 contains_symbolic_reference_p (const_rtx x
)
6419 subrtx_iterator::array_type array
;
6420 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6421 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6427 /* Return true if X contains a thread-local symbol. */
6430 tls_referenced_p (const_rtx x
)
6432 if (!targetm
.have_tls
)
6435 subrtx_iterator::array_type array
;
6436 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6437 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)