1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx
, const_rtx
, void *);
41 static bool covers_regno_p (const_rtx
, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
43 static int computed_jump_p_1 (const_rtx
);
44 static void parms_set (rtx
, const_rtx
, void *);
46 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
47 const_rtx
, machine_mode
,
48 unsigned HOST_WIDE_INT
);
49 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
53 const_rtx
, machine_mode
,
55 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
56 const_rtx
, machine_mode
,
59 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
60 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
62 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
63 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
64 SIGN_EXTEND then while narrowing we also have to enforce the
65 representation and sign-extend the value to mode DESTINATION_REP.
67 If the value is already sign-extended to DESTINATION_REP mode we
68 can just switch to DESTINATION mode on it. For each pair of
69 integral modes SOURCE and DESTINATION, when truncating from SOURCE
70 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
71 contains the number of high-order bits in SOURCE that have to be
72 copies of the sign-bit so that we can do this mode-switch to
76 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
78 /* Store X into index I of ARRAY. ARRAY is known to have at least I
79 elements. Return the new base of ARRAY. */
82 typename
T::value_type
*
83 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
85 size_t i
, value_type x
)
87 if (base
== array
.stack
)
94 gcc_checking_assert (i
== LOCAL_ELEMS
);
95 /* A previous iteration might also have moved from the stack to the
96 heap, in which case the heap array will already be big enough. */
97 if (vec_safe_length (array
.heap
) <= i
)
98 vec_safe_grow (array
.heap
, i
+ 1);
99 base
= array
.heap
->address ();
100 memcpy (base
, array
.stack
, sizeof (array
.stack
));
101 base
[LOCAL_ELEMS
] = x
;
104 unsigned int length
= array
.heap
->length ();
107 gcc_checking_assert (base
== array
.heap
->address ());
113 gcc_checking_assert (i
== length
);
114 vec_safe_push (array
.heap
, x
);
115 return array
.heap
->address ();
119 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
120 number of elements added to the worklist. */
122 template <typename T
>
124 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
126 size_t end
, rtx_type x
)
128 enum rtx_code code
= GET_CODE (x
);
129 const char *format
= GET_RTX_FORMAT (code
);
130 size_t orig_end
= end
;
131 if (__builtin_expect (INSN_P (x
), false))
133 /* Put the pattern at the top of the queue, since that's what
134 we're likely to want most. It also allows for the SEQUENCE
136 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
137 if (format
[i
] == 'e')
139 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
140 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
143 base
= add_single_to_queue (array
, base
, end
++, subx
);
147 for (int i
= 0; format
[i
]; ++i
)
148 if (format
[i
] == 'e')
150 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
151 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
154 base
= add_single_to_queue (array
, base
, end
++, subx
);
156 else if (format
[i
] == 'E')
158 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
159 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
160 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
161 for (unsigned int j
= 0; j
< length
; j
++)
162 base
[end
++] = T::get_value (vec
[j
]);
164 for (unsigned int j
= 0; j
< length
; j
++)
165 base
= add_single_to_queue (array
, base
, end
++,
166 T::get_value (vec
[j
]));
167 if (code
== SEQUENCE
&& end
== length
)
168 /* If the subrtxes of the sequence fill the entire array then
169 we know that no other parts of a containing insn are queued.
170 The caller is therefore iterating over the sequence as a
171 PATTERN (...), so we also want the patterns of the
173 for (unsigned int j
= 0; j
< length
; j
++)
175 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
177 base
[j
] = T::get_value (PATTERN (x
));
180 return end
- orig_end
;
183 template <typename T
>
185 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
187 vec_free (array
.heap
);
190 template <typename T
>
191 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
193 template class generic_subrtx_iterator
<const_rtx_accessor
>;
194 template class generic_subrtx_iterator
<rtx_var_accessor
>;
195 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
197 /* Return 1 if the value of X is unstable
198 (would be different at a different point in the program).
199 The frame pointer, arg pointer, etc. are considered stable
200 (within one function) and so is anything marked `unchanging'. */
203 rtx_unstable_p (const_rtx x
)
205 const RTX_CODE code
= GET_CODE (x
);
212 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
221 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
222 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
223 /* The arg pointer varies if it is not a fixed register. */
224 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
226 /* ??? When call-clobbered, the value is stable modulo the restore
227 that must happen after a call. This currently screws up local-alloc
228 into believing that the restore is not needed. */
229 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
234 if (MEM_VOLATILE_P (x
))
243 fmt
= GET_RTX_FORMAT (code
);
244 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
247 if (rtx_unstable_p (XEXP (x
, i
)))
250 else if (fmt
[i
] == 'E')
253 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
254 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
261 /* Return 1 if X has a value that can vary even between two
262 executions of the program. 0 means X can be compared reliably
263 against certain constants or near-constants.
264 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
265 zero, we are slightly more conservative.
266 The frame pointer and the arg pointer are considered constant. */
269 rtx_varies_p (const_rtx x
, bool for_alias
)
282 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
291 /* Note that we have to test for the actual rtx used for the frame
292 and arg pointers and not just the register number in case we have
293 eliminated the frame and/or arg pointer and are using it
295 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
296 /* The arg pointer varies if it is not a fixed register. */
297 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
299 if (x
== pic_offset_table_rtx
300 /* ??? When call-clobbered, the value is stable modulo the restore
301 that must happen after a call. This currently screws up
302 local-alloc into believing that the restore is not needed, so we
303 must return 0 only if we are called from alias analysis. */
304 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
309 /* The operand 0 of a LO_SUM is considered constant
310 (in fact it is related specifically to operand 1)
311 during alias analysis. */
312 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
313 || rtx_varies_p (XEXP (x
, 1), for_alias
);
316 if (MEM_VOLATILE_P (x
))
325 fmt
= GET_RTX_FORMAT (code
);
326 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
329 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
332 else if (fmt
[i
] == 'E')
335 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
336 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
343 /* Compute an approximation for the offset between the register
344 FROM and TO for the current function, as it was at the start
348 get_initial_register_offset (int from
, int to
)
350 static const struct elim_table_t
354 } table
[] = ELIMINABLE_REGS
;
355 poly_int64 offset1
, offset2
;
361 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
362 before the reload pass. We need to give at least
363 an estimation for the resulting frame size. */
364 if (! reload_completed
)
366 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
367 #if !STACK_GROWS_DOWNWARD
370 if (to
== STACK_POINTER_REGNUM
)
372 else if (from
== STACK_POINTER_REGNUM
)
378 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
379 if (table
[i
].from
== from
)
381 if (table
[i
].to
== to
)
383 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
387 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
389 if (table
[j
].to
== to
390 && table
[j
].from
== table
[i
].to
)
392 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
394 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
396 return offset1
+ offset2
;
398 if (table
[j
].from
== to
399 && table
[j
].to
== table
[i
].to
)
401 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
403 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
405 return offset1
- offset2
;
409 else if (table
[i
].to
== from
)
411 if (table
[i
].from
== to
)
413 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
417 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
419 if (table
[j
].to
== to
420 && table
[j
].from
== table
[i
].from
)
422 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
424 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
426 return - offset1
+ offset2
;
428 if (table
[j
].from
== to
429 && table
[j
].to
== table
[i
].from
)
431 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
433 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
435 return - offset1
- offset2
;
440 /* If the requested register combination was not found,
441 try a different more simple combination. */
442 if (from
== ARG_POINTER_REGNUM
)
443 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
444 else if (to
== ARG_POINTER_REGNUM
)
445 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
446 else if (from
== HARD_FRAME_POINTER_REGNUM
)
447 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
448 else if (to
== HARD_FRAME_POINTER_REGNUM
)
449 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
454 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
455 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
456 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
457 references on strict alignment machines. */
460 rtx_addr_can_trap_p_1 (const_rtx x
, poly_int64 offset
, poly_int64 size
,
461 machine_mode mode
, bool unaligned_mems
)
463 enum rtx_code code
= GET_CODE (x
);
464 gcc_checking_assert (mode
== BLKmode
|| known_size_p (size
));
466 /* The offset must be a multiple of the mode size if we are considering
467 unaligned memory references on strict alignment machines. */
468 if (STRICT_ALIGNMENT
&& unaligned_mems
&& mode
!= BLKmode
)
470 poly_int64 actual_offset
= offset
;
472 #ifdef SPARC_STACK_BOUNDARY_HACK
473 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
474 the real alignment of %sp. However, when it does this, the
475 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
476 if (SPARC_STACK_BOUNDARY_HACK
477 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
478 actual_offset
-= STACK_POINTER_OFFSET
;
481 if (!multiple_p (actual_offset
, GET_MODE_SIZE (mode
)))
488 if (SYMBOL_REF_WEAK (x
))
490 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
493 poly_int64 decl_size
;
495 if (maybe_lt (offset
, 0))
497 if (!known_size_p (size
))
498 return maybe_ne (offset
, 0);
500 /* If the size of the access or of the symbol is unknown,
502 decl
= SYMBOL_REF_DECL (x
);
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
510 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl
), &decl_size
))
513 else if (TREE_CODE (decl
) == STRING_CST
)
514 decl_size
= TREE_STRING_LENGTH (decl
);
515 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
516 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
520 return (!known_size_p (decl_size
) || known_eq (decl_size
, 0)
521 ? maybe_ne (offset
, 0)
522 : maybe_gt (offset
+ size
, decl_size
));
531 /* Stack references are assumed not to trap, but we need to deal with
532 nonsensical offsets. */
533 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
534 || x
== stack_pointer_rtx
535 /* The arg pointer varies if it is not a fixed register. */
536 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
539 poly_int64 red_zone_size
= RED_ZONE_SIZE
;
541 poly_int64 red_zone_size
= 0;
543 poly_int64 stack_boundary
= PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
;
544 poly_int64 low_bound
, high_bound
;
546 if (!known_size_p (size
))
549 if (x
== frame_pointer_rtx
)
551 if (FRAME_GROWS_DOWNWARD
)
553 high_bound
= targetm
.starting_frame_offset ();
554 low_bound
= high_bound
- get_frame_size ();
558 low_bound
= targetm
.starting_frame_offset ();
559 high_bound
= low_bound
+ get_frame_size ();
562 else if (x
== hard_frame_pointer_rtx
)
565 = get_initial_register_offset (STACK_POINTER_REGNUM
,
566 HARD_FRAME_POINTER_REGNUM
);
568 = get_initial_register_offset (ARG_POINTER_REGNUM
,
569 HARD_FRAME_POINTER_REGNUM
);
571 #if STACK_GROWS_DOWNWARD
572 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
573 high_bound
= ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl
)
575 #if !ARGS_GROW_DOWNWARD
580 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
581 low_bound
= ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl
)
583 #if ARGS_GROW_DOWNWARD
589 else if (x
== stack_pointer_rtx
)
592 = get_initial_register_offset (ARG_POINTER_REGNUM
,
593 STACK_POINTER_REGNUM
);
595 #if STACK_GROWS_DOWNWARD
596 low_bound
= - red_zone_size
- stack_boundary
;
597 high_bound
= ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl
)
599 #if !ARGS_GROW_DOWNWARD
604 high_bound
= red_zone_size
+ stack_boundary
;
605 low_bound
= ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl
)
607 #if ARGS_GROW_DOWNWARD
615 /* We assume that accesses are safe to at least the
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
621 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
622 - crtl
->args
.size
- stack_boundary
;
624 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
626 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
627 + crtl
->args
.size
+ stack_boundary
;
631 if (known_ge (offset
, low_bound
)
632 && known_le (offset
, high_bound
- size
))
636 /* All of the virtual frame registers are stack references. */
637 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
638 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
643 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
644 mode
, unaligned_mems
);
647 /* An address is assumed not to trap if:
648 - it is the pic register plus a const unspec without offset. */
649 if (XEXP (x
, 0) == pic_offset_table_rtx
650 && GET_CODE (XEXP (x
, 1)) == CONST
651 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
652 && known_eq (offset
, 0))
655 /* - or it is an address that can't trap plus a constant integer. */
656 if (CONST_INT_P (XEXP (x
, 1))
657 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
658 size
, mode
, unaligned_mems
))
665 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
666 mode
, unaligned_mems
);
673 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
674 mode
, unaligned_mems
);
680 /* If it isn't one of the case above, it can cause a trap. */
684 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
687 rtx_addr_can_trap_p (const_rtx x
)
689 return rtx_addr_can_trap_p_1 (x
, 0, -1, BLKmode
, false);
692 /* Return true if X contains a MEM subrtx. */
695 contains_mem_rtx_p (rtx x
)
697 subrtx_iterator::array_type array
;
698 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
705 /* Return true if X is an address that is known to not be zero. */
708 nonzero_address_p (const_rtx x
)
710 const enum rtx_code code
= GET_CODE (x
);
715 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
721 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
722 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
723 || x
== stack_pointer_rtx
724 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
726 /* All of the virtual frame registers are stack references. */
727 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
728 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
733 return nonzero_address_p (XEXP (x
, 0));
736 /* Handle PIC references. */
737 if (XEXP (x
, 0) == pic_offset_table_rtx
738 && CONSTANT_P (XEXP (x
, 1)))
743 /* Similar to the above; allow positive offsets. Further, since
744 auto-inc is only allowed in memories, the register must be a
746 if (CONST_INT_P (XEXP (x
, 1))
747 && INTVAL (XEXP (x
, 1)) > 0)
749 return nonzero_address_p (XEXP (x
, 0));
752 /* Similarly. Further, the offset is always positive. */
759 return nonzero_address_p (XEXP (x
, 0));
762 return nonzero_address_p (XEXP (x
, 1));
768 /* If it isn't one of the case above, might be zero. */
772 /* Return 1 if X refers to a memory location whose address
773 cannot be compared reliably with constant addresses,
774 or if X refers to a BLKmode memory object.
775 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
776 zero, we are slightly more conservative. */
779 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
790 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
792 fmt
= GET_RTX_FORMAT (code
);
793 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
796 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
799 else if (fmt
[i
] == 'E')
802 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
803 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
809 /* Return the CALL in X if there is one. */
812 get_call_rtx_from (rtx x
)
816 if (GET_CODE (x
) == PARALLEL
)
817 x
= XVECEXP (x
, 0, 0);
818 if (GET_CODE (x
) == SET
)
820 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
825 /* Return the value of the integer term in X, if one is apparent;
827 Only obvious integer terms are detected.
828 This is used in cse.c with the `related_value' field. */
831 get_integer_term (const_rtx x
)
833 if (GET_CODE (x
) == CONST
)
836 if (GET_CODE (x
) == MINUS
837 && CONST_INT_P (XEXP (x
, 1)))
838 return - INTVAL (XEXP (x
, 1));
839 if (GET_CODE (x
) == PLUS
840 && CONST_INT_P (XEXP (x
, 1)))
841 return INTVAL (XEXP (x
, 1));
845 /* If X is a constant, return the value sans apparent integer term;
847 Only obvious integer terms are detected. */
850 get_related_value (const_rtx x
)
852 if (GET_CODE (x
) != CONST
)
855 if (GET_CODE (x
) == PLUS
856 && CONST_INT_P (XEXP (x
, 1)))
858 else if (GET_CODE (x
) == MINUS
859 && CONST_INT_P (XEXP (x
, 1)))
864 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
865 to somewhere in the same object or object_block as SYMBOL. */
868 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
872 if (GET_CODE (symbol
) != SYMBOL_REF
)
880 if (CONSTANT_POOL_ADDRESS_P (symbol
)
881 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
884 decl
= SYMBOL_REF_DECL (symbol
);
885 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
889 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
890 && SYMBOL_REF_BLOCK (symbol
)
891 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
892 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
893 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
899 /* Split X into a base and a constant offset, storing them in *BASE_OUT
900 and *OFFSET_OUT respectively. */
903 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
905 if (GET_CODE (x
) == CONST
)
908 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
910 *base_out
= XEXP (x
, 0);
911 *offset_out
= XEXP (x
, 1);
916 *offset_out
= const0_rtx
;
919 /* Express integer value X as some value Y plus a polynomial offset,
920 where Y is either const0_rtx, X or something within X (as opposed
921 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
924 strip_offset (rtx x
, poly_int64_pod
*offset_out
)
926 rtx base
= const0_rtx
;
928 if (GET_CODE (test
) == CONST
)
929 test
= XEXP (test
, 0);
930 if (GET_CODE (test
) == PLUS
)
932 base
= XEXP (test
, 0);
933 test
= XEXP (test
, 1);
935 if (poly_int_rtx_p (test
, offset_out
))
941 /* Return the argument size in REG_ARGS_SIZE note X. */
944 get_args_size (const_rtx x
)
946 gcc_checking_assert (REG_NOTE_KIND (x
) == REG_ARGS_SIZE
);
947 return rtx_to_poly_int64 (XEXP (x
, 0));
950 /* Return the number of places FIND appears within X. If COUNT_DEST is
951 zero, we do not count occurrences inside the destination of a SET. */
954 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
958 const char *format_ptr
;
977 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
979 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
983 if (MEM_P (find
) && rtx_equal_p (x
, find
))
988 if (SET_DEST (x
) == find
&& ! count_dest
)
989 return count_occurrences (SET_SRC (x
), find
, count_dest
);
996 format_ptr
= GET_RTX_FORMAT (code
);
999 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
1001 switch (*format_ptr
++)
1004 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
1008 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1009 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
1017 /* Return TRUE if OP is a register or subreg of a register that
1018 holds an unsigned quantity. Otherwise, return FALSE. */
1021 unsigned_reg_p (rtx op
)
1025 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1028 if (GET_CODE (op
) == SUBREG
1029 && SUBREG_PROMOTED_SIGN (op
))
1036 /* Nonzero if register REG appears somewhere within IN.
1037 Also works if REG is not a register; in this case it checks
1038 for a subexpression of IN that is Lisp "equal" to REG. */
1041 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1053 if (GET_CODE (in
) == LABEL_REF
)
1054 return reg
== label_ref_label (in
);
1056 code
= GET_CODE (in
);
1060 /* Compare registers by number. */
1062 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1064 /* These codes have no constituent expressions
1072 /* These are kept unique for a given value. */
1079 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1082 fmt
= GET_RTX_FORMAT (code
);
1084 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1089 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1090 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1093 else if (fmt
[i
] == 'e'
1094 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1100 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1101 no CODE_LABEL insn. */
1104 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1109 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1115 /* Nonzero if register REG is used in an insn between
1116 FROM_INSN and TO_INSN (exclusive of those two). */
1119 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1120 const rtx_insn
*to_insn
)
1124 if (from_insn
== to_insn
)
1127 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1128 if (NONDEBUG_INSN_P (insn
)
1129 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1130 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1135 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1136 is entirely replaced by a new value and the only use is as a SET_DEST,
1137 we do not consider it a reference. */
1140 reg_referenced_p (const_rtx x
, const_rtx body
)
1144 switch (GET_CODE (body
))
1147 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1150 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1151 of a REG that occupies all of the REG, the insn references X if
1152 it is mentioned in the destination. */
1153 if (GET_CODE (SET_DEST (body
)) != CC0
1154 && GET_CODE (SET_DEST (body
)) != PC
1155 && !REG_P (SET_DEST (body
))
1156 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1157 && REG_P (SUBREG_REG (SET_DEST (body
)))
1158 && !read_modify_subreg_p (SET_DEST (body
)))
1159 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1164 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1165 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1172 return reg_overlap_mentioned_p (x
, body
);
1175 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1178 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1181 case UNSPEC_VOLATILE
:
1182 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1183 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1188 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1189 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1194 if (MEM_P (XEXP (body
, 0)))
1195 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1200 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1202 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1209 /* Nonzero if register REG is set or clobbered in an insn between
1210 FROM_INSN and TO_INSN (exclusive of those two). */
1213 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1214 const rtx_insn
*to_insn
)
1216 const rtx_insn
*insn
;
1218 if (from_insn
== to_insn
)
1221 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1222 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1227 /* Return true if REG is set or clobbered inside INSN. */
1230 reg_set_p (const_rtx reg
, const_rtx insn
)
1232 /* After delay slot handling, call and branch insns might be in a
1233 sequence. Check all the elements there. */
1234 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1236 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1237 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1243 /* We can be passed an insn or part of one. If we are passed an insn,
1244 check if a side-effect of the insn clobbers REG. */
1246 && (FIND_REG_INC_NOTE (insn
, reg
)
1249 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1250 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1251 GET_MODE (reg
), REGNO (reg
)))
1253 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1256 /* There are no REG_INC notes for SP autoinc. */
1257 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1259 subrtx_var_iterator::array_type array
;
1260 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1265 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1267 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1269 iter
.skip_subrtxes ();
1274 return set_of (reg
, insn
) != NULL_RTX
;
1277 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1278 only if none of them are modified between START and END. Return 1 if
1279 X contains a MEM; this routine does use memory aliasing. */
1282 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1284 const enum rtx_code code
= GET_CODE (x
);
1305 if (modified_between_p (XEXP (x
, 0), start
, end
))
1307 if (MEM_READONLY_P (x
))
1309 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1310 if (memory_modified_in_insn_p (x
, insn
))
1315 return reg_set_between_p (x
, start
, end
);
1321 fmt
= GET_RTX_FORMAT (code
);
1322 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1324 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1327 else if (fmt
[i
] == 'E')
1328 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1329 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1336 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1337 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1338 does use memory aliasing. */
1341 modified_in_p (const_rtx x
, const_rtx insn
)
1343 const enum rtx_code code
= GET_CODE (x
);
1360 if (modified_in_p (XEXP (x
, 0), insn
))
1362 if (MEM_READONLY_P (x
))
1364 if (memory_modified_in_insn_p (x
, insn
))
1369 return reg_set_p (x
, insn
);
1375 fmt
= GET_RTX_FORMAT (code
);
1376 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1378 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1381 else if (fmt
[i
] == 'E')
1382 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1383 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1390 /* Return true if X is a SUBREG and if storing a value to X would
1391 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1392 target, using a SUBREG to store to one half of a DImode REG would
1393 preserve the other half. */
1396 read_modify_subreg_p (const_rtx x
)
1398 unsigned int isize
, osize
;
1399 if (GET_CODE (x
) != SUBREG
)
1401 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1402 osize
= GET_MODE_SIZE (GET_MODE (x
));
1403 return isize
> osize
1404 && isize
> REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1407 /* Helper function for set_of. */
1415 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1417 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1418 if (rtx_equal_p (x
, data
->pat
)
1419 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1423 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1424 (either directly or via STRICT_LOW_PART and similar modifiers). */
1426 set_of (const_rtx pat
, const_rtx insn
)
1428 struct set_of_data data
;
1429 data
.found
= NULL_RTX
;
1431 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1435 /* Add all hard register in X to *PSET. */
1437 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1439 subrtx_iterator::array_type array
;
1440 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1442 const_rtx x
= *iter
;
1443 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1444 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1448 /* This function, called through note_stores, collects sets and
1449 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1452 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1454 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1455 if (REG_P (x
) && HARD_REGISTER_P (x
))
1456 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1459 /* Examine INSN, and compute the set of hard registers written by it.
1460 Store it in *PSET. Should only be called after reload. */
1462 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1466 CLEAR_HARD_REG_SET (*pset
);
1467 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1471 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1473 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1474 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1476 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1477 if (REG_NOTE_KIND (link
) == REG_INC
)
1478 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1481 /* Like record_hard_reg_sets, but called through note_uses. */
1483 record_hard_reg_uses (rtx
*px
, void *data
)
1485 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1488 /* Given an INSN, return a SET expression if this insn has only a single SET.
1489 It may also have CLOBBERs, USEs, or SET whose output
1490 will not be used, which we ignore. */
1493 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1496 int set_verified
= 1;
1499 if (GET_CODE (pat
) == PARALLEL
)
1501 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1503 rtx sub
= XVECEXP (pat
, 0, i
);
1504 switch (GET_CODE (sub
))
1511 /* We can consider insns having multiple sets, where all
1512 but one are dead as single set insns. In common case
1513 only single set is present in the pattern so we want
1514 to avoid checking for REG_UNUSED notes unless necessary.
1516 When we reach set first time, we just expect this is
1517 the single set we are looking for and only when more
1518 sets are found in the insn, we check them. */
1521 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1522 && !side_effects_p (set
))
1528 set
= sub
, set_verified
= 0;
1529 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1530 || side_effects_p (sub
))
1542 /* Given an INSN, return nonzero if it has more than one SET, else return
1546 multiple_sets (const_rtx insn
)
1551 /* INSN must be an insn. */
1552 if (! INSN_P (insn
))
1555 /* Only a PARALLEL can have multiple SETs. */
1556 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1558 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1559 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1561 /* If we have already found a SET, then return now. */
1569 /* Either zero or one SET. */
1573 /* Return nonzero if the destination of SET equals the source
1574 and there are no side effects. */
1577 set_noop_p (const_rtx set
)
1579 rtx src
= SET_SRC (set
);
1580 rtx dst
= SET_DEST (set
);
1582 if (dst
== pc_rtx
&& src
== pc_rtx
)
1585 if (MEM_P (dst
) && MEM_P (src
))
1586 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1588 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1589 return rtx_equal_p (XEXP (dst
, 0), src
)
1590 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1591 && !side_effects_p (src
);
1593 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1594 dst
= XEXP (dst
, 0);
1596 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1598 if (maybe_ne (SUBREG_BYTE (src
), SUBREG_BYTE (dst
)))
1600 src
= SUBREG_REG (src
);
1601 dst
= SUBREG_REG (dst
);
1604 /* It is a NOOP if destination overlaps with selected src vector
1606 if (GET_CODE (src
) == VEC_SELECT
1607 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1608 && HARD_REGISTER_P (XEXP (src
, 0))
1609 && HARD_REGISTER_P (dst
))
1612 rtx par
= XEXP (src
, 1);
1613 rtx src0
= XEXP (src
, 0);
1614 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1615 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1617 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1618 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1621 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1622 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1625 return (REG_P (src
) && REG_P (dst
)
1626 && REGNO (src
) == REGNO (dst
));
1629 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1633 noop_move_p (const rtx_insn
*insn
)
1635 rtx pat
= PATTERN (insn
);
1637 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1640 /* Insns carrying these notes are useful later on. */
1641 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1644 /* Check the code to be executed for COND_EXEC. */
1645 if (GET_CODE (pat
) == COND_EXEC
)
1646 pat
= COND_EXEC_CODE (pat
);
1648 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1651 if (GET_CODE (pat
) == PARALLEL
)
1654 /* If nothing but SETs of registers to themselves,
1655 this insn can also be deleted. */
1656 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1658 rtx tem
= XVECEXP (pat
, 0, i
);
1660 if (GET_CODE (tem
) == USE
1661 || GET_CODE (tem
) == CLOBBER
)
1664 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1674 /* Return nonzero if register in range [REGNO, ENDREGNO)
1675 appears either explicitly or implicitly in X
1676 other than being stored into.
1678 References contained within the substructure at LOC do not count.
1679 LOC may be zero, meaning don't ignore anything. */
1682 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1686 unsigned int x_regno
;
1691 /* The contents of a REG_NONNEG note is always zero, so we must come here
1692 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1696 code
= GET_CODE (x
);
1701 x_regno
= REGNO (x
);
1703 /* If we modifying the stack, frame, or argument pointer, it will
1704 clobber a virtual register. In fact, we could be more precise,
1705 but it isn't worth it. */
1706 if ((x_regno
== STACK_POINTER_REGNUM
1707 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1708 && x_regno
== ARG_POINTER_REGNUM
)
1709 || x_regno
== FRAME_POINTER_REGNUM
)
1710 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1713 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1716 /* If this is a SUBREG of a hard reg, we can see exactly which
1717 registers are being modified. Otherwise, handle normally. */
1718 if (REG_P (SUBREG_REG (x
))
1719 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1721 unsigned int inner_regno
= subreg_regno (x
);
1722 unsigned int inner_endregno
1723 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1724 ? subreg_nregs (x
) : 1);
1726 return endregno
> inner_regno
&& regno
< inner_endregno
;
1732 if (&SET_DEST (x
) != loc
1733 /* Note setting a SUBREG counts as referring to the REG it is in for
1734 a pseudo but not for hard registers since we can
1735 treat each word individually. */
1736 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1737 && loc
!= &SUBREG_REG (SET_DEST (x
))
1738 && REG_P (SUBREG_REG (SET_DEST (x
)))
1739 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1740 && refers_to_regno_p (regno
, endregno
,
1741 SUBREG_REG (SET_DEST (x
)), loc
))
1742 || (!REG_P (SET_DEST (x
))
1743 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1746 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1755 /* X does not match, so try its subexpressions. */
1757 fmt
= GET_RTX_FORMAT (code
);
1758 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1760 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1768 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1771 else if (fmt
[i
] == 'E')
1774 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1775 if (loc
!= &XVECEXP (x
, i
, j
)
1776 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1783 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1784 we check if any register number in X conflicts with the relevant register
1785 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1786 contains a MEM (we don't bother checking for memory addresses that can't
1787 conflict because we expect this to be a rare case. */
1790 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1792 unsigned int regno
, endregno
;
1794 /* If either argument is a constant, then modifying X can not
1795 affect IN. Here we look at IN, we can profitably combine
1796 CONSTANT_P (x) with the switch statement below. */
1797 if (CONSTANT_P (in
))
1801 switch (GET_CODE (x
))
1803 case STRICT_LOW_PART
:
1806 /* Overly conservative. */
1811 regno
= REGNO (SUBREG_REG (x
));
1812 if (regno
< FIRST_PSEUDO_REGISTER
)
1813 regno
= subreg_regno (x
);
1814 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1815 ? subreg_nregs (x
) : 1);
1820 endregno
= END_REGNO (x
);
1822 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1832 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1833 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1836 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1839 else if (fmt
[i
] == 'E')
1842 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1843 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1853 return reg_mentioned_p (x
, in
);
1859 /* If any register in here refers to it we return true. */
1860 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1861 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1862 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1868 gcc_assert (CONSTANT_P (x
));
1873 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1874 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1875 ignored by note_stores, but passed to FUN.
1877 FUN receives three arguments:
1878 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1879 2. the SET or CLOBBER rtx that does the store,
1880 3. the pointer DATA provided to note_stores.
1882 If the item being stored in or clobbered is a SUBREG of a hard register,
1883 the SUBREG will be passed. */
1886 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1890 if (GET_CODE (x
) == COND_EXEC
)
1891 x
= COND_EXEC_CODE (x
);
1893 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1895 rtx dest
= SET_DEST (x
);
1897 while ((GET_CODE (dest
) == SUBREG
1898 && (!REG_P (SUBREG_REG (dest
))
1899 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1900 || GET_CODE (dest
) == ZERO_EXTRACT
1901 || GET_CODE (dest
) == STRICT_LOW_PART
)
1902 dest
= XEXP (dest
, 0);
1904 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1905 each of whose first operand is a register. */
1906 if (GET_CODE (dest
) == PARALLEL
)
1908 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1909 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1910 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1913 (*fun
) (dest
, x
, data
);
1916 else if (GET_CODE (x
) == PARALLEL
)
1917 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1918 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1921 /* Like notes_stores, but call FUN for each expression that is being
1922 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1923 FUN for each expression, not any interior subexpressions. FUN receives a
1924 pointer to the expression and the DATA passed to this function.
1926 Note that this is not quite the same test as that done in reg_referenced_p
1927 since that considers something as being referenced if it is being
1928 partially set, while we do not. */
1931 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1936 switch (GET_CODE (body
))
1939 (*fun
) (&COND_EXEC_TEST (body
), data
);
1940 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1944 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1945 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1949 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1950 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1954 (*fun
) (&XEXP (body
, 0), data
);
1958 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1959 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1963 (*fun
) (&TRAP_CONDITION (body
), data
);
1967 (*fun
) (&XEXP (body
, 0), data
);
1971 case UNSPEC_VOLATILE
:
1972 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1973 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1977 if (MEM_P (XEXP (body
, 0)))
1978 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1983 rtx dest
= SET_DEST (body
);
1985 /* For sets we replace everything in source plus registers in memory
1986 expression in store and operands of a ZERO_EXTRACT. */
1987 (*fun
) (&SET_SRC (body
), data
);
1989 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1991 (*fun
) (&XEXP (dest
, 1), data
);
1992 (*fun
) (&XEXP (dest
, 2), data
);
1995 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1996 dest
= XEXP (dest
, 0);
1999 (*fun
) (&XEXP (dest
, 0), data
);
2004 /* All the other possibilities never store. */
2005 (*fun
) (pbody
, data
);
2010 /* Return nonzero if X's old contents don't survive after INSN.
2011 This will be true if X is (cc0) or if X is a register and
2012 X dies in INSN or because INSN entirely sets X.
2014 "Entirely set" means set directly and not through a SUBREG, or
2015 ZERO_EXTRACT, so no trace of the old contents remains.
2016 Likewise, REG_INC does not count.
2018 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2019 but for this use that makes no difference, since regs don't overlap
2020 during their lifetimes. Therefore, this function may be used
2021 at any time after deaths have been computed.
2023 If REG is a hard reg that occupies multiple machine registers, this
2024 function will only return 1 if each of those registers will be replaced
2028 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2030 unsigned int regno
, end_regno
;
2033 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2034 if (GET_CODE (x
) == CC0
)
2037 gcc_assert (REG_P (x
));
2040 end_regno
= END_REGNO (x
);
2041 for (i
= regno
; i
< end_regno
; i
++)
2042 if (! dead_or_set_regno_p (insn
, i
))
2048 /* Return TRUE iff DEST is a register or subreg of a register, is a
2049 complete rather than read-modify-write destination, and contains
2050 register TEST_REGNO. */
2053 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2055 unsigned int regno
, endregno
;
2057 if (GET_CODE (dest
) == SUBREG
&& !read_modify_subreg_p (dest
))
2058 dest
= SUBREG_REG (dest
);
2063 regno
= REGNO (dest
);
2064 endregno
= END_REGNO (dest
);
2065 return (test_regno
>= regno
&& test_regno
< endregno
);
2068 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2069 any member matches the covers_regno_no_parallel_p criteria. */
2072 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2074 if (GET_CODE (dest
) == PARALLEL
)
2076 /* Some targets place small structures in registers for return
2077 values of functions, and those registers are wrapped in
2078 PARALLELs that we may see as the destination of a SET. */
2081 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2083 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2084 if (inner
!= NULL_RTX
2085 && covers_regno_no_parallel_p (inner
, test_regno
))
2092 return covers_regno_no_parallel_p (dest
, test_regno
);
2095 /* Utility function for dead_or_set_p to check an individual register. */
2098 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2102 /* See if there is a death note for something that includes TEST_REGNO. */
2103 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2107 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2110 pattern
= PATTERN (insn
);
2112 /* If a COND_EXEC is not executed, the value survives. */
2113 if (GET_CODE (pattern
) == COND_EXEC
)
2116 if (GET_CODE (pattern
) == SET
|| GET_CODE (pattern
) == CLOBBER
)
2117 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2118 else if (GET_CODE (pattern
) == PARALLEL
)
2122 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2124 rtx body
= XVECEXP (pattern
, 0, i
);
2126 if (GET_CODE (body
) == COND_EXEC
)
2127 body
= COND_EXEC_CODE (body
);
2129 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2130 && covers_regno_p (SET_DEST (body
), test_regno
))
2138 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2139 If DATUM is nonzero, look for one whose datum is DATUM. */
2142 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2146 gcc_checking_assert (insn
);
2148 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2149 if (! INSN_P (insn
))
2153 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2154 if (REG_NOTE_KIND (link
) == kind
)
2159 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2160 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2165 /* Return the reg-note of kind KIND in insn INSN which applies to register
2166 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2167 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2168 it might be the case that the note overlaps REGNO. */
2171 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2175 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2176 if (! INSN_P (insn
))
2179 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2180 if (REG_NOTE_KIND (link
) == kind
2181 /* Verify that it is a register, so that scratch and MEM won't cause a
2183 && REG_P (XEXP (link
, 0))
2184 && REGNO (XEXP (link
, 0)) <= regno
2185 && END_REGNO (XEXP (link
, 0)) > regno
)
2190 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2194 find_reg_equal_equiv_note (const_rtx insn
)
2201 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2202 if (REG_NOTE_KIND (link
) == REG_EQUAL
2203 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2205 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2206 insns that have multiple sets. Checking single_set to
2207 make sure of this is not the proper check, as explained
2208 in the comment in set_unique_reg_note.
2210 This should be changed into an assert. */
2211 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2218 /* Check whether INSN is a single_set whose source is known to be
2219 equivalent to a constant. Return that constant if so, otherwise
2223 find_constant_src (const rtx_insn
*insn
)
2227 set
= single_set (insn
);
2230 x
= avoid_constant_pool_reference (SET_SRC (set
));
2235 note
= find_reg_equal_equiv_note (insn
);
2236 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2237 return XEXP (note
, 0);
2242 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2243 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2246 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2248 /* If it's not a CALL_INSN, it can't possibly have a
2249 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2259 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2261 link
= XEXP (link
, 1))
2262 if (GET_CODE (XEXP (link
, 0)) == code
2263 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2268 unsigned int regno
= REGNO (datum
);
2270 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2271 to pseudo registers, so don't bother checking. */
2273 if (regno
< FIRST_PSEUDO_REGISTER
)
2275 unsigned int end_regno
= END_REGNO (datum
);
2278 for (i
= regno
; i
< end_regno
; i
++)
2279 if (find_regno_fusage (insn
, code
, i
))
2287 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2288 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2291 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2295 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2296 to pseudo registers, so don't bother checking. */
2298 if (regno
>= FIRST_PSEUDO_REGISTER
2302 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2306 if (GET_CODE (op
= XEXP (link
, 0)) == code
2307 && REG_P (reg
= XEXP (op
, 0))
2308 && REGNO (reg
) <= regno
2309 && END_REGNO (reg
) > regno
)
2317 /* Return true if KIND is an integer REG_NOTE. */
2320 int_reg_note_p (enum reg_note kind
)
2322 return kind
== REG_BR_PROB
;
2325 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2326 stored as the pointer to the next register note. */
2329 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2333 gcc_checking_assert (!int_reg_note_p (kind
));
2338 case REG_LABEL_TARGET
:
2339 case REG_LABEL_OPERAND
:
2341 /* These types of register notes use an INSN_LIST rather than an
2342 EXPR_LIST, so that copying is done right and dumps look
2344 note
= alloc_INSN_LIST (datum
, list
);
2345 PUT_REG_NOTE_KIND (note
, kind
);
2349 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2356 /* Add register note with kind KIND and datum DATUM to INSN. */
2359 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2361 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2364 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2367 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2369 gcc_checking_assert (int_reg_note_p (kind
));
2370 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2371 datum
, REG_NOTES (insn
));
2374 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2377 add_args_size_note (rtx_insn
*insn
, poly_int64 value
)
2379 gcc_checking_assert (!find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
));
2380 add_reg_note (insn
, REG_ARGS_SIZE
, gen_int_mode (value
, Pmode
));
2383 /* Add a register note like NOTE to INSN. */
2386 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2388 if (GET_CODE (note
) == INT_LIST
)
2389 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2391 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2394 /* Duplicate NOTE and return the copy. */
2396 duplicate_reg_note (rtx note
)
2398 reg_note kind
= REG_NOTE_KIND (note
);
2400 if (GET_CODE (note
) == INT_LIST
)
2401 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2402 else if (GET_CODE (note
) == EXPR_LIST
)
2403 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2405 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2408 /* Remove register note NOTE from the REG_NOTES of INSN. */
2411 remove_note (rtx_insn
*insn
, const_rtx note
)
2415 if (note
== NULL_RTX
)
2418 if (REG_NOTES (insn
) == note
)
2419 REG_NOTES (insn
) = XEXP (note
, 1);
2421 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2422 if (XEXP (link
, 1) == note
)
2424 XEXP (link
, 1) = XEXP (note
, 1);
2428 switch (REG_NOTE_KIND (note
))
2432 df_notes_rescan (insn
);
2439 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2440 Return true if any note has been removed. */
2443 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2448 loc
= ®_NOTES (insn
);
2451 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2452 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2454 *loc
= XEXP (*loc
, 1);
2458 loc
= &XEXP (*loc
, 1);
2463 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2466 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2473 /* This loop is a little tricky. We cannot just go down the chain because
2474 it is being modified by some actions in the loop. So we just iterate
2475 over the head. We plan to drain the list anyway. */
2476 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2478 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2479 rtx note
= find_reg_equal_equiv_note (insn
);
2481 /* This assert is generally triggered when someone deletes a REG_EQUAL
2482 or REG_EQUIV note by hacking the list manually rather than calling
2486 remove_note (insn
, note
);
2490 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2491 return 1 if it is found. A simple equality test is used to determine if
2495 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2499 for (x
= listp
; x
; x
= XEXP (x
, 1))
2500 if (node
== XEXP (x
, 0))
2506 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2507 remove that entry from the list if it is found.
2509 A simple equality test is used to determine if NODE matches. */
2512 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2514 rtx_expr_list
*temp
= *listp
;
2515 rtx_expr_list
*prev
= NULL
;
2519 if (node
== temp
->element ())
2521 /* Splice the node out of the list. */
2523 XEXP (prev
, 1) = temp
->next ();
2525 *listp
= temp
->next ();
2531 temp
= temp
->next ();
2535 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2536 remove that entry from the list if it is found.
2538 A simple equality test is used to determine if NODE matches. */
2541 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2543 rtx_insn_list
*temp
= *listp
;
2544 rtx_insn_list
*prev
= NULL
;
2548 if (node
== temp
->insn ())
2550 /* Splice the node out of the list. */
2552 XEXP (prev
, 1) = temp
->next ();
2554 *listp
= temp
->next ();
2560 temp
= temp
->next ();
2564 /* Nonzero if X contains any volatile instructions. These are instructions
2565 which may cause unpredictable machine state instructions, and thus no
2566 instructions or register uses should be moved or combined across them.
2567 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2570 volatile_insn_p (const_rtx x
)
2572 const RTX_CODE code
= GET_CODE (x
);
2590 case UNSPEC_VOLATILE
:
2595 if (MEM_VOLATILE_P (x
))
2602 /* Recursively scan the operands of this expression. */
2605 const char *const fmt
= GET_RTX_FORMAT (code
);
2608 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2612 if (volatile_insn_p (XEXP (x
, i
)))
2615 else if (fmt
[i
] == 'E')
2618 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2619 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2627 /* Nonzero if X contains any volatile memory references
2628 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2631 volatile_refs_p (const_rtx x
)
2633 const RTX_CODE code
= GET_CODE (x
);
2649 case UNSPEC_VOLATILE
:
2655 if (MEM_VOLATILE_P (x
))
2662 /* Recursively scan the operands of this expression. */
2665 const char *const fmt
= GET_RTX_FORMAT (code
);
2668 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2672 if (volatile_refs_p (XEXP (x
, i
)))
2675 else if (fmt
[i
] == 'E')
2678 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2679 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2687 /* Similar to above, except that it also rejects register pre- and post-
2691 side_effects_p (const_rtx x
)
2693 const RTX_CODE code
= GET_CODE (x
);
2710 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2711 when some combination can't be done. If we see one, don't think
2712 that we can simplify the expression. */
2713 return (GET_MODE (x
) != VOIDmode
);
2722 case UNSPEC_VOLATILE
:
2728 if (MEM_VOLATILE_P (x
))
2735 /* Recursively scan the operands of this expression. */
2738 const char *fmt
= GET_RTX_FORMAT (code
);
2741 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2745 if (side_effects_p (XEXP (x
, i
)))
2748 else if (fmt
[i
] == 'E')
2751 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2752 if (side_effects_p (XVECEXP (x
, i
, j
)))
2760 /* Return nonzero if evaluating rtx X might cause a trap.
2761 FLAGS controls how to consider MEMs. A nonzero means the context
2762 of the access may have changed from the original, such that the
2763 address may have become invalid. */
2766 may_trap_p_1 (const_rtx x
, unsigned flags
)
2772 /* We make no distinction currently, but this function is part of
2773 the internal target-hooks ABI so we keep the parameter as
2774 "unsigned flags". */
2775 bool code_changed
= flags
!= 0;
2779 code
= GET_CODE (x
);
2782 /* Handle these cases quickly. */
2794 return targetm
.unspec_may_trap_p (x
, flags
);
2796 case UNSPEC_VOLATILE
:
2802 return MEM_VOLATILE_P (x
);
2804 /* Memory ref can trap unless it's a static var or a stack slot. */
2806 /* Recognize specific pattern of stack checking probes. */
2807 if (flag_stack_check
2808 && MEM_VOLATILE_P (x
)
2809 && XEXP (x
, 0) == stack_pointer_rtx
)
2811 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2812 reference; moving it out of context such as when moving code
2813 when optimizing, might cause its address to become invalid. */
2815 || !MEM_NOTRAP_P (x
))
2817 poly_int64 size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : -1;
2818 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2819 GET_MODE (x
), code_changed
);
2824 /* Division by a non-constant might trap. */
2829 if (HONOR_SNANS (x
))
2831 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2832 return flag_trapping_math
;
2833 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2838 /* An EXPR_LIST is used to represent a function call. This
2839 certainly may trap. */
2848 /* Some floating point comparisons may trap. */
2849 if (!flag_trapping_math
)
2851 /* ??? There is no machine independent way to check for tests that trap
2852 when COMPARE is used, though many targets do make this distinction.
2853 For instance, sparc uses CCFPE for compares which generate exceptions
2854 and CCFP for compares which do not generate exceptions. */
2857 /* But often the compare has some CC mode, so check operand
2859 if (HONOR_NANS (XEXP (x
, 0))
2860 || HONOR_NANS (XEXP (x
, 1)))
2866 if (HONOR_SNANS (x
))
2868 /* Often comparison is CC mode, so check operand modes. */
2869 if (HONOR_SNANS (XEXP (x
, 0))
2870 || HONOR_SNANS (XEXP (x
, 1)))
2875 /* Conversion of floating point might trap. */
2876 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2883 /* These operations don't trap even with floating point. */
2887 /* Any floating arithmetic may trap. */
2888 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2892 fmt
= GET_RTX_FORMAT (code
);
2893 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2897 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2900 else if (fmt
[i
] == 'E')
2903 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2904 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2911 /* Return nonzero if evaluating rtx X might cause a trap. */
2914 may_trap_p (const_rtx x
)
2916 return may_trap_p_1 (x
, 0);
2919 /* Same as above, but additionally return nonzero if evaluating rtx X might
2920 cause a fault. We define a fault for the purpose of this function as a
2921 erroneous execution condition that cannot be encountered during the normal
2922 execution of a valid program; the typical example is an unaligned memory
2923 access on a strict alignment machine. The compiler guarantees that it
2924 doesn't generate code that will fault from a valid program, but this
2925 guarantee doesn't mean anything for individual instructions. Consider
2926 the following example:
2928 struct S { int d; union { char *cp; int *ip; }; };
2930 int foo(struct S *s)
2938 on a strict alignment machine. In a valid program, foo will never be
2939 invoked on a structure for which d is equal to 1 and the underlying
2940 unique field of the union not aligned on a 4-byte boundary, but the
2941 expression *s->ip might cause a fault if considered individually.
2943 At the RTL level, potentially problematic expressions will almost always
2944 verify may_trap_p; for example, the above dereference can be emitted as
2945 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2946 However, suppose that foo is inlined in a caller that causes s->cp to
2947 point to a local character variable and guarantees that s->d is not set
2948 to 1; foo may have been effectively translated into pseudo-RTL as:
2951 (set (reg:SI) (mem:SI (%fp - 7)))
2953 (set (reg:QI) (mem:QI (%fp - 7)))
2955 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2956 memory reference to a stack slot, but it will certainly cause a fault
2957 on a strict alignment machine. */
2960 may_trap_or_fault_p (const_rtx x
)
2962 return may_trap_p_1 (x
, 1);
2965 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2966 i.e., an inequality. */
2969 inequality_comparisons_p (const_rtx x
)
2973 const enum rtx_code code
= GET_CODE (x
);
3001 len
= GET_RTX_LENGTH (code
);
3002 fmt
= GET_RTX_FORMAT (code
);
3004 for (i
= 0; i
< len
; i
++)
3008 if (inequality_comparisons_p (XEXP (x
, i
)))
3011 else if (fmt
[i
] == 'E')
3014 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3015 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
3023 /* Replace any occurrence of FROM in X with TO. The function does
3024 not enter into CONST_DOUBLE for the replace.
3026 Note that copying is not done so X must not be shared unless all copies
3029 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3030 those pointer-equal ones. */
3033 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3041 /* Allow this function to make replacements in EXPR_LISTs. */
3048 && REGNO (x
) == REGNO (from
))
3050 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3053 else if (GET_CODE (x
) == SUBREG
)
3055 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3057 if (CONST_INT_P (new_rtx
))
3059 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3060 GET_MODE (SUBREG_REG (x
)),
3065 SUBREG_REG (x
) = new_rtx
;
3069 else if (GET_CODE (x
) == ZERO_EXTEND
)
3071 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3073 if (CONST_INT_P (new_rtx
))
3075 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3076 new_rtx
, GET_MODE (XEXP (x
, 0)));
3080 XEXP (x
, 0) = new_rtx
;
3085 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3086 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3089 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3090 else if (fmt
[i
] == 'E')
3091 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3092 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3093 from
, to
, all_regs
);
3099 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3100 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3103 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3105 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3107 if (JUMP_TABLE_DATA_P (x
))
3110 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3111 int len
= GET_NUM_ELEM (vec
);
3112 for (int i
= 0; i
< len
; ++i
)
3114 rtx ref
= RTVEC_ELT (vec
, i
);
3115 if (XEXP (ref
, 0) == old_label
)
3117 XEXP (ref
, 0) = new_label
;
3118 if (update_label_nuses
)
3120 ++LABEL_NUSES (new_label
);
3121 --LABEL_NUSES (old_label
);
3128 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3129 field. This is not handled by the iterator because it doesn't
3130 handle unprinted ('0') fields. */
3131 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3132 JUMP_LABEL (x
) = new_label
;
3134 subrtx_ptr_iterator::array_type array
;
3135 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3140 if (GET_CODE (x
) == SYMBOL_REF
3141 && CONSTANT_POOL_ADDRESS_P (x
))
3143 rtx c
= get_pool_constant (x
);
3144 if (rtx_referenced_p (old_label
, c
))
3146 /* Create a copy of constant C; replace the label inside
3147 but do not update LABEL_NUSES because uses in constant pool
3149 rtx new_c
= copy_rtx (c
);
3150 replace_label (&new_c
, old_label
, new_label
, false);
3152 /* Add the new constant NEW_C to constant pool and replace
3153 the old reference to constant by new reference. */
3154 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3155 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3159 if ((GET_CODE (x
) == LABEL_REF
3160 || GET_CODE (x
) == INSN_LIST
)
3161 && XEXP (x
, 0) == old_label
)
3163 XEXP (x
, 0) = new_label
;
3164 if (update_label_nuses
)
3166 ++LABEL_NUSES (new_label
);
3167 --LABEL_NUSES (old_label
);
3175 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3176 rtx_insn
*new_label
, bool update_label_nuses
)
3178 rtx insn_as_rtx
= insn
;
3179 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3180 gcc_checking_assert (insn_as_rtx
== insn
);
3183 /* Return true if X is referenced in BODY. */
3186 rtx_referenced_p (const_rtx x
, const_rtx body
)
3188 subrtx_iterator::array_type array
;
3189 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3190 if (const_rtx y
= *iter
)
3192 /* Check if a label_ref Y refers to label X. */
3193 if (GET_CODE (y
) == LABEL_REF
3195 && label_ref_label (y
) == x
)
3198 if (rtx_equal_p (x
, y
))
3201 /* If Y is a reference to pool constant traverse the constant. */
3202 if (GET_CODE (y
) == SYMBOL_REF
3203 && CONSTANT_POOL_ADDRESS_P (y
))
3204 iter
.substitute (get_pool_constant (y
));
3209 /* If INSN is a tablejump return true and store the label (before jump table) to
3210 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3213 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3214 rtx_jump_table_data
**tablep
)
3219 rtx target
= JUMP_LABEL (insn
);
3220 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3223 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3224 rtx_insn
*table
= next_insn (label
);
3225 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3231 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3235 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3236 constant that is not in the constant pool and not in the condition
3237 of an IF_THEN_ELSE. */
3240 computed_jump_p_1 (const_rtx x
)
3242 const enum rtx_code code
= GET_CODE (x
);
3259 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3260 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3263 return (computed_jump_p_1 (XEXP (x
, 1))
3264 || computed_jump_p_1 (XEXP (x
, 2)));
3270 fmt
= GET_RTX_FORMAT (code
);
3271 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3274 && computed_jump_p_1 (XEXP (x
, i
)))
3277 else if (fmt
[i
] == 'E')
3278 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3279 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3286 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3288 Tablejumps and casesi insns are not considered indirect jumps;
3289 we can recognize them by a (use (label_ref)). */
3292 computed_jump_p (const rtx_insn
*insn
)
3297 rtx pat
= PATTERN (insn
);
3299 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3300 if (JUMP_LABEL (insn
) != NULL
)
3303 if (GET_CODE (pat
) == PARALLEL
)
3305 int len
= XVECLEN (pat
, 0);
3306 int has_use_labelref
= 0;
3308 for (i
= len
- 1; i
>= 0; i
--)
3309 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3310 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3313 has_use_labelref
= 1;
3317 if (! has_use_labelref
)
3318 for (i
= len
- 1; i
>= 0; i
--)
3319 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3320 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3321 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3324 else if (GET_CODE (pat
) == SET
3325 && SET_DEST (pat
) == pc_rtx
3326 && computed_jump_p_1 (SET_SRC (pat
)))
3334 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3335 the equivalent add insn and pass the result to FN, using DATA as the
3339 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3341 rtx x
= XEXP (mem
, 0);
3342 switch (GET_CODE (x
))
3347 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3348 rtx r1
= XEXP (x
, 0);
3349 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3350 return fn (mem
, x
, r1
, r1
, c
, data
);
3356 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3357 rtx r1
= XEXP (x
, 0);
3358 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3359 return fn (mem
, x
, r1
, r1
, c
, data
);
3365 rtx r1
= XEXP (x
, 0);
3366 rtx add
= XEXP (x
, 1);
3367 return fn (mem
, x
, r1
, add
, NULL
, data
);
3375 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3376 For each such autoinc operation found, call FN, passing it
3377 the innermost enclosing MEM, the operation itself, the RTX modified
3378 by the operation, two RTXs (the second may be NULL) that, once
3379 added, represent the value to be held by the modified RTX
3380 afterwards, and DATA. FN is to return 0 to continue the
3381 traversal or any other value to have it returned to the caller of
3382 for_each_inc_dec. */
3385 for_each_inc_dec (rtx x
,
3386 for_each_inc_dec_fn fn
,
3389 subrtx_var_iterator::array_type array
;
3390 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3395 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3397 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3400 iter
.skip_subrtxes ();
3407 /* Searches X for any reference to REGNO, returning the rtx of the
3408 reference found if any. Otherwise, returns NULL_RTX. */
3411 regno_use_in (unsigned int regno
, rtx x
)
3417 if (REG_P (x
) && REGNO (x
) == regno
)
3420 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3421 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3425 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3428 else if (fmt
[i
] == 'E')
3429 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3430 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3437 /* Return a value indicating whether OP, an operand of a commutative
3438 operation, is preferred as the first or second operand. The more
3439 positive the value, the stronger the preference for being the first
3443 commutative_operand_precedence (rtx op
)
3445 enum rtx_code code
= GET_CODE (op
);
3447 /* Constants always become the second operand. Prefer "nice" constants. */
3448 if (code
== CONST_INT
)
3450 if (code
== CONST_WIDE_INT
)
3452 if (code
== CONST_POLY_INT
)
3454 if (code
== CONST_DOUBLE
)
3456 if (code
== CONST_FIXED
)
3458 op
= avoid_constant_pool_reference (op
);
3459 code
= GET_CODE (op
);
3461 switch (GET_RTX_CLASS (code
))
3464 if (code
== CONST_INT
)
3466 if (code
== CONST_WIDE_INT
)
3468 if (code
== CONST_POLY_INT
)
3470 if (code
== CONST_DOUBLE
)
3472 if (code
== CONST_FIXED
)
3477 /* SUBREGs of objects should come second. */
3478 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3483 /* Complex expressions should be the first, so decrease priority
3484 of objects. Prefer pointer objects over non pointer objects. */
3485 if ((REG_P (op
) && REG_POINTER (op
))
3486 || (MEM_P (op
) && MEM_POINTER (op
)))
3490 case RTX_COMM_ARITH
:
3491 /* Prefer operands that are themselves commutative to be first.
3492 This helps to make things linear. In particular,
3493 (and (and (reg) (reg)) (not (reg))) is canonical. */
3497 /* If only one operand is a binary expression, it will be the first
3498 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3499 is canonical, although it will usually be further simplified. */
3503 /* Then prefer NEG and NOT. */
3504 if (code
== NEG
|| code
== NOT
)
3513 /* Return 1 iff it is necessary to swap operands of commutative operation
3514 in order to canonicalize expression. */
3517 swap_commutative_operands_p (rtx x
, rtx y
)
3519 return (commutative_operand_precedence (x
)
3520 < commutative_operand_precedence (y
));
3523 /* Return 1 if X is an autoincrement side effect and the register is
3524 not the stack pointer. */
3526 auto_inc_p (const_rtx x
)
3528 switch (GET_CODE (x
))
3536 /* There are no REG_INC notes for SP. */
3537 if (XEXP (x
, 0) != stack_pointer_rtx
)
3545 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3547 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3556 code
= GET_CODE (in
);
3557 fmt
= GET_RTX_FORMAT (code
);
3558 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3562 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3565 else if (fmt
[i
] == 'E')
3566 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3567 if (loc
== &XVECEXP (in
, i
, j
)
3568 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3574 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3575 and SUBREG_BYTE, return the bit offset where the subreg begins
3576 (counting from the least significant bit of the operand). */
3579 subreg_lsb_1 (machine_mode outer_mode
,
3580 machine_mode inner_mode
,
3581 poly_uint64 subreg_byte
)
3583 poly_uint64 subreg_end
, trailing_bytes
, byte_pos
;
3585 /* A paradoxical subreg begins at bit position 0. */
3586 if (paradoxical_subreg_p (outer_mode
, inner_mode
))
3589 subreg_end
= subreg_byte
+ GET_MODE_SIZE (outer_mode
);
3590 trailing_bytes
= GET_MODE_SIZE (inner_mode
) - subreg_end
;
3591 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3592 byte_pos
= trailing_bytes
;
3593 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3594 byte_pos
= subreg_byte
;
3597 /* When bytes and words have opposite endianness, we must be able
3598 to split offsets into words and bytes at compile time. */
3599 poly_uint64 leading_word_part
3600 = force_align_down (subreg_byte
, UNITS_PER_WORD
);
3601 poly_uint64 trailing_word_part
3602 = force_align_down (trailing_bytes
, UNITS_PER_WORD
);
3603 /* If the subreg crosses a word boundary ensure that
3604 it also begins and ends on a word boundary. */
3605 gcc_assert (known_le (subreg_end
- leading_word_part
,
3606 (unsigned int) UNITS_PER_WORD
)
3607 || (known_eq (leading_word_part
, subreg_byte
)
3608 && known_eq (trailing_word_part
, trailing_bytes
)));
3609 if (WORDS_BIG_ENDIAN
)
3610 byte_pos
= trailing_word_part
+ (subreg_byte
- leading_word_part
);
3612 byte_pos
= leading_word_part
+ (trailing_bytes
- trailing_word_part
);
3615 return byte_pos
* BITS_PER_UNIT
;
3618 /* Given a subreg X, return the bit offset where the subreg begins
3619 (counting from the least significant bit of the reg). */
3622 subreg_lsb (const_rtx x
)
3624 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3628 /* Return the subreg byte offset for a subreg whose outer value has
3629 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3630 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3631 lsb of the inner value. This is the inverse of the calculation
3632 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3635 subreg_size_offset_from_lsb (poly_uint64 outer_bytes
, poly_uint64 inner_bytes
,
3636 poly_uint64 lsb_shift
)
3638 /* A paradoxical subreg begins at bit position 0. */
3639 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3640 if (maybe_gt (outer_bytes
, inner_bytes
))
3642 gcc_checking_assert (known_eq (lsb_shift
, 0U));
3646 poly_uint64 lower_bytes
= exact_div (lsb_shift
, BITS_PER_UNIT
);
3647 poly_uint64 upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3648 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3650 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3654 /* When bytes and words have opposite endianness, we must be able
3655 to split offsets into words and bytes at compile time. */
3656 poly_uint64 lower_word_part
= force_align_down (lower_bytes
,
3658 poly_uint64 upper_word_part
= force_align_down (upper_bytes
,
3660 if (WORDS_BIG_ENDIAN
)
3661 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3663 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3667 /* Fill in information about a subreg of a hard register.
3668 xregno - A regno of an inner hard subreg_reg (or what will become one).
3669 xmode - The mode of xregno.
3670 offset - The byte offset.
3671 ymode - The mode of a top level SUBREG (or what may become one).
3672 info - Pointer to structure to fill in.
3674 Rather than considering one particular inner register (and thus one
3675 particular "outer" register) in isolation, this function really uses
3676 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3677 function does not check whether adding INFO->offset to XREGNO gives
3678 a valid hard register; even if INFO->offset + XREGNO is out of range,
3679 there might be another register of the same type that is in range.
3680 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3681 the new register, since that can depend on things like whether the final
3682 register number is even or odd. Callers that want to check whether
3683 this particular subreg can be replaced by a simple (reg ...) should
3684 use simplify_subreg_regno. */
3687 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3688 poly_uint64 offset
, machine_mode ymode
,
3689 struct subreg_info
*info
)
3691 unsigned int nregs_xmode
, nregs_ymode
;
3693 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3695 unsigned int xsize
= GET_MODE_SIZE (xmode
);
3696 unsigned int ysize
= GET_MODE_SIZE (ymode
);
3697 bool rknown
= false;
3699 /* If the register representation of a non-scalar mode has holes in it,
3700 we expect the scalar units to be concatenated together, with the holes
3701 distributed evenly among the scalar units. Each scalar unit must occupy
3702 at least one register. */
3703 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3705 /* As a consequence, we must be dealing with a constant number of
3706 scalars, and thus a constant offset. */
3707 HOST_WIDE_INT coffset
= offset
.to_constant ();
3708 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3709 unsigned int nunits
= GET_MODE_NUNITS (xmode
);
3710 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
3711 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3712 gcc_assert (nregs_xmode
3714 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3715 gcc_assert (hard_regno_nregs (xregno
, xmode
)
3716 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
3718 /* You can only ask for a SUBREG of a value with holes in the middle
3719 if you don't cross the holes. (Such a SUBREG should be done by
3720 picking a different register class, or doing it in memory if
3721 necessary.) An example of a value with holes is XCmode on 32-bit
3722 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3723 3 for each part, but in memory it's two 128-bit parts.
3724 Padding is assumed to be at the end (not necessarily the 'high part')
3726 if ((coffset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3727 && (coffset
/ GET_MODE_SIZE (xmode_unit
)
3728 != ((coffset
+ ysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3730 info
->representable_p
= false;
3735 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
3737 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
3739 /* Paradoxical subregs are otherwise valid. */
3740 if (!rknown
&& known_eq (offset
, 0U) && ysize
> xsize
)
3742 info
->representable_p
= true;
3743 /* If this is a big endian paradoxical subreg, which uses more
3744 actual hard registers than the original register, we must
3745 return a negative offset so that we find the proper highpart
3748 We assume that the ordering of registers within a multi-register
3749 value has a consistent endianness: if bytes and register words
3750 have different endianness, the hard registers that make up a
3751 multi-register value must be at least word-sized. */
3752 if (REG_WORDS_BIG_ENDIAN
)
3753 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3756 info
->nregs
= nregs_ymode
;
3760 /* If registers store different numbers of bits in the different
3761 modes, we cannot generally form this subreg. */
3762 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3763 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3764 && (xsize
% nregs_xmode
) == 0
3765 && (ysize
% nregs_ymode
) == 0)
3767 int regsize_xmode
= xsize
/ nregs_xmode
;
3768 int regsize_ymode
= ysize
/ nregs_ymode
;
3770 && ((nregs_ymode
> 1 && regsize_xmode
> regsize_ymode
)
3771 || (nregs_xmode
> 1 && regsize_ymode
> regsize_xmode
)))
3773 info
->representable_p
= false;
3774 info
->nregs
= CEIL (ysize
, regsize_xmode
);
3775 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3776 /* Checked by validate_subreg. We must know at compile time
3777 which inner registers are being accessed. */
3781 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3782 would go outside of XMODE. */
3783 if (!rknown
&& maybe_gt (ysize
+ offset
, xsize
))
3785 info
->representable_p
= false;
3786 info
->nregs
= nregs_ymode
;
3787 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3788 /* Checked by validate_subreg. We must know at compile time
3789 which inner registers are being accessed. */
3793 /* Quick exit for the simple and common case of extracting whole
3794 subregisters from a multiregister value. */
3795 /* ??? It would be better to integrate this into the code below,
3796 if we can generalize the concept enough and figure out how
3797 odd-sized modes can coexist with the other weird cases we support. */
3798 HOST_WIDE_INT count
;
3800 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3801 && regsize_xmode
== regsize_ymode
3802 && constant_multiple_p (offset
, regsize_ymode
, &count
))
3804 info
->representable_p
= true;
3805 info
->nregs
= nregs_ymode
;
3806 info
->offset
= count
;
3807 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3812 /* Lowpart subregs are otherwise valid. */
3813 if (!rknown
&& known_eq (offset
, subreg_lowpart_offset (ymode
, xmode
)))
3815 info
->representable_p
= true;
3818 if (known_eq (offset
, 0U) || nregs_xmode
== nregs_ymode
)
3821 info
->nregs
= nregs_ymode
;
3826 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3827 values there are in (reg:XMODE XREGNO). We can view the register
3828 as consisting of this number of independent "blocks", where each
3829 block occupies NREGS_YMODE registers and contains exactly one
3830 representable YMODE value. */
3831 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3832 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3834 /* Calculate the number of bytes in each block. This must always
3835 be exact, otherwise we don't know how to verify the constraint.
3836 These conditions may be relaxed but subreg_regno_offset would
3837 need to be redesigned. */
3838 gcc_assert ((xsize
% num_blocks
) == 0);
3839 poly_uint64 bytes_per_block
= xsize
/ num_blocks
;
3841 /* Get the number of the first block that contains the subreg and the byte
3842 offset of the subreg from the start of that block. */
3843 unsigned int block_number
;
3844 poly_uint64 subblock_offset
;
3845 if (!can_div_trunc_p (offset
, bytes_per_block
, &block_number
,
3847 /* Checked by validate_subreg. We must know at compile time which
3848 inner registers are being accessed. */
3853 /* Only the lowpart of each block is representable. */
3854 info
->representable_p
3855 = known_eq (subblock_offset
,
3856 subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3860 /* We assume that the ordering of registers within a multi-register
3861 value has a consistent endianness: if bytes and register words
3862 have different endianness, the hard registers that make up a
3863 multi-register value must be at least word-sized. */
3864 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3865 /* The block number we calculated above followed memory endianness.
3866 Convert it to register endianness by counting back from the end.
3867 (Note that, because of the assumption above, each block must be
3868 at least word-sized.) */
3869 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3871 info
->offset
= block_number
* nregs_ymode
;
3872 info
->nregs
= nregs_ymode
;
3875 /* This function returns the regno offset of a subreg expression.
3876 xregno - A regno of an inner hard subreg_reg (or what will become one).
3877 xmode - The mode of xregno.
3878 offset - The byte offset.
3879 ymode - The mode of a top level SUBREG (or what may become one).
3880 RETURN - The regno offset which would be used. */
3882 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3883 poly_uint64 offset
, machine_mode ymode
)
3885 struct subreg_info info
;
3886 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3890 /* This function returns true when the offset is representable via
3891 subreg_offset in the given regno.
3892 xregno - A regno of an inner hard subreg_reg (or what will become one).
3893 xmode - The mode of xregno.
3894 offset - The byte offset.
3895 ymode - The mode of a top level SUBREG (or what may become one).
3896 RETURN - Whether the offset is representable. */
3898 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3899 poly_uint64 offset
, machine_mode ymode
)
3901 struct subreg_info info
;
3902 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3903 return info
.representable_p
;
3906 /* Return the number of a YMODE register to which
3908 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3910 can be simplified. Return -1 if the subreg can't be simplified.
3912 XREGNO is a hard register number. */
3915 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3916 poly_uint64 offset
, machine_mode ymode
)
3918 struct subreg_info info
;
3919 unsigned int yregno
;
3921 /* Give the backend a chance to disallow the mode change. */
3922 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3923 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3924 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3925 /* We can use mode change in LRA for some transformations. */
3926 && ! lra_in_progress
)
3929 /* We shouldn't simplify stack-related registers. */
3930 if ((!reload_completed
|| frame_pointer_needed
)
3931 && xregno
== FRAME_POINTER_REGNUM
)
3934 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3935 && xregno
== ARG_POINTER_REGNUM
)
3938 if (xregno
== STACK_POINTER_REGNUM
3939 /* We should convert hard stack register in LRA if it is
3941 && ! lra_in_progress
)
3944 /* Try to get the register offset. */
3945 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3946 if (!info
.representable_p
)
3949 /* Make sure that the offsetted register value is in range. */
3950 yregno
= xregno
+ info
.offset
;
3951 if (!HARD_REGISTER_NUM_P (yregno
))
3954 /* See whether (reg:YMODE YREGNO) is valid.
3956 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3957 This is a kludge to work around how complex FP arguments are passed
3958 on IA-64 and should be fixed. See PR target/49226. */
3959 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
3960 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
3963 return (int) yregno
;
3966 /* Return the final regno that a subreg expression refers to. */
3968 subreg_regno (const_rtx x
)
3971 rtx subreg
= SUBREG_REG (x
);
3972 int regno
= REGNO (subreg
);
3974 ret
= regno
+ subreg_regno_offset (regno
,
3982 /* Return the number of registers that a subreg expression refers
3985 subreg_nregs (const_rtx x
)
3987 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3990 /* Return the number of registers that a subreg REG with REGNO
3991 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3992 changed so that the regno can be passed in. */
3995 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3997 struct subreg_info info
;
3998 rtx subreg
= SUBREG_REG (x
);
4000 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
4005 struct parms_set_data
4011 /* Helper function for noticing stores to parameter registers. */
4013 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
4015 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
4016 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4017 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
4019 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
4024 /* Look backward for first parameter to be loaded.
4025 Note that loads of all parameters will not necessarily be
4026 found if CSE has eliminated some of them (e.g., an argument
4027 to the outer function is passed down as a parameter).
4028 Do not skip BOUNDARY. */
4030 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
4032 struct parms_set_data parm
;
4034 rtx_insn
*before
, *first_set
;
4036 /* Since different machines initialize their parameter registers
4037 in different orders, assume nothing. Collect the set of all
4038 parameter registers. */
4039 CLEAR_HARD_REG_SET (parm
.regs
);
4041 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
4042 if (GET_CODE (XEXP (p
, 0)) == USE
4043 && REG_P (XEXP (XEXP (p
, 0), 0))
4044 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
4046 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
4048 /* We only care about registers which can hold function
4050 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
4053 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4057 first_set
= call_insn
;
4059 /* Search backward for the first set of a register in this set. */
4060 while (parm
.nregs
&& before
!= boundary
)
4062 before
= PREV_INSN (before
);
4064 /* It is possible that some loads got CSEed from one call to
4065 another. Stop in that case. */
4066 if (CALL_P (before
))
4069 /* Our caller needs either ensure that we will find all sets
4070 (in case code has not been optimized yet), or take care
4071 for possible labels in a way by setting boundary to preceding
4073 if (LABEL_P (before
))
4075 gcc_assert (before
== boundary
);
4079 if (INSN_P (before
))
4081 int nregs_old
= parm
.nregs
;
4082 note_stores (PATTERN (before
), parms_set
, &parm
);
4083 /* If we found something that did not set a parameter reg,
4084 we're done. Do not keep going, as that might result
4085 in hoisting an insn before the setting of a pseudo
4086 that is used by the hoisted insn. */
4087 if (nregs_old
!= parm
.nregs
)
4096 /* Return true if we should avoid inserting code between INSN and preceding
4097 call instruction. */
4100 keep_with_call_p (const rtx_insn
*insn
)
4104 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4106 if (REG_P (SET_DEST (set
))
4107 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4108 && fixed_regs
[REGNO (SET_DEST (set
))]
4109 && general_operand (SET_SRC (set
), VOIDmode
))
4111 if (REG_P (SET_SRC (set
))
4112 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4113 && REG_P (SET_DEST (set
))
4114 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4116 /* There may be a stack pop just after the call and before the store
4117 of the return register. Search for the actual store when deciding
4118 if we can break or not. */
4119 if (SET_DEST (set
) == stack_pointer_rtx
)
4121 /* This CONST_CAST is okay because next_nonnote_insn just
4122 returns its argument and we assign it to a const_rtx
4125 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4126 if (i2
&& keep_with_call_p (i2
))
4133 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4134 to non-complex jumps. That is, direct unconditional, conditional,
4135 and tablejumps, but not computed jumps or returns. It also does
4136 not apply to the fallthru case of a conditional jump. */
4139 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4141 rtx tmp
= JUMP_LABEL (jump_insn
);
4142 rtx_jump_table_data
*table
;
4147 if (tablejump_p (jump_insn
, NULL
, &table
))
4149 rtvec vec
= table
->get_labels ();
4150 int i
, veclen
= GET_NUM_ELEM (vec
);
4152 for (i
= 0; i
< veclen
; ++i
)
4153 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4157 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4164 /* Return an estimate of the cost of computing rtx X.
4165 One use is in cse, to decide which expression to keep in the hash table.
4166 Another is in rtl generation, to pick the cheapest way to multiply.
4167 Other uses like the latter are expected in the future.
4169 X appears as operand OPNO in an expression with code OUTER_CODE.
4170 SPEED specifies whether costs optimized for speed or size should
4174 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4175 int opno
, bool speed
)
4186 if (GET_MODE (x
) != VOIDmode
)
4187 mode
= GET_MODE (x
);
4189 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4190 many insns, taking N times as long. */
4191 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4195 /* Compute the default costs of certain things.
4196 Note that targetm.rtx_costs can override the defaults. */
4198 code
= GET_CODE (x
);
4202 /* Multiplication has time-complexity O(N*N), where N is the
4203 number of units (translated from digits) when using
4204 schoolbook long multiplication. */
4205 total
= factor
* factor
* COSTS_N_INSNS (5);
4211 /* Similarly, complexity for schoolbook long division. */
4212 total
= factor
* factor
* COSTS_N_INSNS (7);
4215 /* Used in combine.c as a marker. */
4219 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4220 the mode for the factor. */
4221 mode
= GET_MODE (SET_DEST (x
));
4222 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4227 total
= factor
* COSTS_N_INSNS (1);
4237 /* If we can't tie these modes, make this expensive. The larger
4238 the mode, the more expensive it is. */
4239 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4240 return COSTS_N_INSNS (2 + factor
);
4244 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4251 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4256 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4257 which is already in total. */
4259 fmt
= GET_RTX_FORMAT (code
);
4260 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4262 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4263 else if (fmt
[i
] == 'E')
4264 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4265 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4270 /* Fill in the structure C with information about both speed and size rtx
4271 costs for X, which is operand OPNO in an expression with code OUTER. */
4274 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4275 struct full_rtx_costs
*c
)
4277 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4278 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4282 /* Return cost of address expression X.
4283 Expect that X is properly formed address reference.
4285 SPEED parameter specify whether costs optimized for speed or size should
4289 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4291 /* We may be asked for cost of various unusual addresses, such as operands
4292 of push instruction. It is not worthwhile to complicate writing
4293 of the target hook by such cases. */
4295 if (!memory_address_addr_space_p (mode
, x
, as
))
4298 return targetm
.address_cost (x
, mode
, as
, speed
);
4301 /* If the target doesn't override, compute the cost as with arithmetic. */
4304 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4306 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4310 unsigned HOST_WIDE_INT
4311 nonzero_bits (const_rtx x
, machine_mode mode
)
4313 if (mode
== VOIDmode
)
4314 mode
= GET_MODE (x
);
4315 scalar_int_mode int_mode
;
4316 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4317 return GET_MODE_MASK (mode
);
4318 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4322 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4324 if (mode
== VOIDmode
)
4325 mode
= GET_MODE (x
);
4326 scalar_int_mode int_mode
;
4327 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4329 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4332 /* Return true if nonzero_bits1 might recurse into both operands
4336 nonzero_bits_binary_arith_p (const_rtx x
)
4338 if (!ARITHMETIC_P (x
))
4340 switch (GET_CODE (x
))
4362 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4363 It avoids exponential behavior in nonzero_bits1 when X has
4364 identical subexpressions on the first or the second level. */
4366 static unsigned HOST_WIDE_INT
4367 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4368 machine_mode known_mode
,
4369 unsigned HOST_WIDE_INT known_ret
)
4371 if (x
== known_x
&& mode
== known_mode
)
4374 /* Try to find identical subexpressions. If found call
4375 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4376 precomputed value for the subexpression as KNOWN_RET. */
4378 if (nonzero_bits_binary_arith_p (x
))
4380 rtx x0
= XEXP (x
, 0);
4381 rtx x1
= XEXP (x
, 1);
4383 /* Check the first level. */
4385 return nonzero_bits1 (x
, mode
, x0
, mode
,
4386 cached_nonzero_bits (x0
, mode
, known_x
,
4387 known_mode
, known_ret
));
4389 /* Check the second level. */
4390 if (nonzero_bits_binary_arith_p (x0
)
4391 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4392 return nonzero_bits1 (x
, mode
, x1
, mode
,
4393 cached_nonzero_bits (x1
, mode
, known_x
,
4394 known_mode
, known_ret
));
4396 if (nonzero_bits_binary_arith_p (x1
)
4397 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4398 return nonzero_bits1 (x
, mode
, x0
, mode
,
4399 cached_nonzero_bits (x0
, mode
, known_x
,
4400 known_mode
, known_ret
));
4403 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4406 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4407 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4408 is less useful. We can't allow both, because that results in exponential
4409 run time recursion. There is a nullstone testcase that triggered
4410 this. This macro avoids accidental uses of num_sign_bit_copies. */
4411 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4413 /* Given an expression, X, compute which bits in X can be nonzero.
4414 We don't care about bits outside of those defined in MODE.
4416 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4417 an arithmetic operation, we can do better. */
4419 static unsigned HOST_WIDE_INT
4420 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4421 machine_mode known_mode
,
4422 unsigned HOST_WIDE_INT known_ret
)
4424 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4425 unsigned HOST_WIDE_INT inner_nz
;
4427 machine_mode inner_mode
;
4428 scalar_int_mode xmode
;
4430 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4432 if (CONST_INT_P (x
))
4434 if (SHORT_IMMEDIATES_SIGN_EXTEND
4436 && mode_width
< BITS_PER_WORD
4437 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4438 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4443 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4445 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4447 /* If X is wider than MODE, use its mode instead. */
4448 if (xmode_width
> mode_width
)
4451 nonzero
= GET_MODE_MASK (mode
);
4452 mode_width
= xmode_width
;
4455 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4456 /* Our only callers in this case look for single bit values. So
4457 just return the mode mask. Those tests will then be false. */
4460 /* If MODE is wider than X, but both are a single word for both the host
4461 and target machines, we can compute this from which bits of the
4462 object might be nonzero in its own mode, taking into account the fact
4463 that on many CISC machines, accessing an object in a wider mode
4464 causes the high-order bits to become undefined. So they are
4465 not known to be zero. */
4467 if (!WORD_REGISTER_OPERATIONS
4468 && mode_width
> xmode_width
4469 && xmode_width
<= BITS_PER_WORD
4470 && xmode_width
<= HOST_BITS_PER_WIDE_INT
)
4472 nonzero
&= cached_nonzero_bits (x
, xmode
,
4473 known_x
, known_mode
, known_ret
);
4474 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4478 /* Please keep nonzero_bits_binary_arith_p above in sync with
4479 the code in the switch below. */
4480 code
= GET_CODE (x
);
4484 #if defined(POINTERS_EXTEND_UNSIGNED)
4485 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4486 all the bits above ptr_mode are known to be zero. */
4487 /* As we do not know which address space the pointer is referring to,
4488 we can do this only if the target does not support different pointer
4489 or address modes depending on the address space. */
4490 if (target_default_pointer_address_modes_p ()
4491 && POINTERS_EXTEND_UNSIGNED
4494 && !targetm
.have_ptr_extend ())
4495 nonzero
&= GET_MODE_MASK (ptr_mode
);
4498 /* Include declared information about alignment of pointers. */
4499 /* ??? We don't properly preserve REG_POINTER changes across
4500 pointer-to-integer casts, so we can't trust it except for
4501 things that we know must be pointers. See execute/960116-1.c. */
4502 if ((x
== stack_pointer_rtx
4503 || x
== frame_pointer_rtx
4504 || x
== arg_pointer_rtx
)
4505 && REGNO_POINTER_ALIGN (REGNO (x
)))
4507 unsigned HOST_WIDE_INT alignment
4508 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4510 #ifdef PUSH_ROUNDING
4511 /* If PUSH_ROUNDING is defined, it is possible for the
4512 stack to be momentarily aligned only to that amount,
4513 so we pick the least alignment. */
4514 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4515 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4519 nonzero
&= ~(alignment
- 1);
4523 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4524 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4528 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4529 known_mode
, known_ret
);
4531 return nonzero_for_hook
;
4535 /* In many, if not most, RISC machines, reading a byte from memory
4536 zeros the rest of the register. Noticing that fact saves a lot
4537 of extra zero-extends. */
4538 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4539 nonzero
&= GET_MODE_MASK (xmode
);
4543 case UNEQ
: case LTGT
:
4544 case GT
: case GTU
: case UNGT
:
4545 case LT
: case LTU
: case UNLT
:
4546 case GE
: case GEU
: case UNGE
:
4547 case LE
: case LEU
: case UNLE
:
4548 case UNORDERED
: case ORDERED
:
4549 /* If this produces an integer result, we know which bits are set.
4550 Code here used to clear bits outside the mode of X, but that is
4552 /* Mind that MODE is the mode the caller wants to look at this
4553 operation in, and not the actual operation mode. We can wind
4554 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4555 that describes the results of a vector compare. */
4556 if (GET_MODE_CLASS (xmode
) == MODE_INT
4557 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4558 nonzero
= STORE_FLAG_VALUE
;
4563 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4564 and num_sign_bit_copies. */
4565 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4569 if (xmode_width
< mode_width
)
4570 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4575 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4576 and num_sign_bit_copies. */
4577 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4583 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4584 known_x
, known_mode
, known_ret
)
4585 & GET_MODE_MASK (mode
));
4589 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4590 known_x
, known_mode
, known_ret
);
4591 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4592 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4596 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4597 Otherwise, show all the bits in the outer mode but not the inner
4599 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4600 known_x
, known_mode
, known_ret
);
4601 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4603 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4604 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4605 inner_nz
|= (GET_MODE_MASK (mode
)
4606 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4609 nonzero
&= inner_nz
;
4613 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4614 known_x
, known_mode
, known_ret
)
4615 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4616 known_x
, known_mode
, known_ret
);
4620 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4622 unsigned HOST_WIDE_INT nonzero0
4623 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4624 known_x
, known_mode
, known_ret
);
4626 /* Don't call nonzero_bits for the second time if it cannot change
4628 if ((nonzero
& nonzero0
) != nonzero
)
4630 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4631 known_x
, known_mode
, known_ret
);
4635 case PLUS
: case MINUS
:
4637 case DIV
: case UDIV
:
4638 case MOD
: case UMOD
:
4639 /* We can apply the rules of arithmetic to compute the number of
4640 high- and low-order zero bits of these operations. We start by
4641 computing the width (position of the highest-order nonzero bit)
4642 and the number of low-order zero bits for each value. */
4644 unsigned HOST_WIDE_INT nz0
4645 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4646 known_x
, known_mode
, known_ret
);
4647 unsigned HOST_WIDE_INT nz1
4648 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4649 known_x
, known_mode
, known_ret
);
4650 int sign_index
= xmode_width
- 1;
4651 int width0
= floor_log2 (nz0
) + 1;
4652 int width1
= floor_log2 (nz1
) + 1;
4653 int low0
= ctz_or_zero (nz0
);
4654 int low1
= ctz_or_zero (nz1
);
4655 unsigned HOST_WIDE_INT op0_maybe_minusp
4656 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4657 unsigned HOST_WIDE_INT op1_maybe_minusp
4658 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4659 unsigned int result_width
= mode_width
;
4665 result_width
= MAX (width0
, width1
) + 1;
4666 result_low
= MIN (low0
, low1
);
4669 result_low
= MIN (low0
, low1
);
4672 result_width
= width0
+ width1
;
4673 result_low
= low0
+ low1
;
4678 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4679 result_width
= width0
;
4684 result_width
= width0
;
4689 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4690 result_width
= MIN (width0
, width1
);
4691 result_low
= MIN (low0
, low1
);
4696 result_width
= MIN (width0
, width1
);
4697 result_low
= MIN (low0
, low1
);
4703 if (result_width
< mode_width
)
4704 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4707 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4712 if (CONST_INT_P (XEXP (x
, 1))
4713 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4714 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4718 /* If this is a SUBREG formed for a promoted variable that has
4719 been zero-extended, we know that at least the high-order bits
4720 are zero, though others might be too. */
4721 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4722 nonzero
= GET_MODE_MASK (xmode
)
4723 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
4724 known_x
, known_mode
, known_ret
);
4726 /* If the inner mode is a single word for both the host and target
4727 machines, we can compute this from which bits of the inner
4728 object might be nonzero. */
4729 inner_mode
= GET_MODE (SUBREG_REG (x
));
4730 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4731 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
)
4733 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4734 known_x
, known_mode
, known_ret
);
4736 /* On many CISC machines, accessing an object in a wider mode
4737 causes the high-order bits to become undefined. So they are
4738 not known to be zero. */
4740 if ((!WORD_REGISTER_OPERATIONS
4741 /* If this is a typical RISC machine, we only have to worry
4742 about the way loads are extended. */
4743 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4744 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4745 : extend_op
!= ZERO_EXTEND
)
4746 || (!MEM_P (SUBREG_REG (x
)) && !REG_P (SUBREG_REG (x
))))
4747 && xmode_width
> GET_MODE_PRECISION (inner_mode
))
4748 nonzero
|= (GET_MODE_MASK (xmode
) & ~GET_MODE_MASK (inner_mode
));
4756 /* The nonzero bits are in two classes: any bits within MODE
4757 that aren't in xmode are always significant. The rest of the
4758 nonzero bits are those that are significant in the operand of
4759 the shift when shifted the appropriate number of bits. This
4760 shows that high-order bits are cleared by the right shift and
4761 low-order bits by left shifts. */
4762 if (CONST_INT_P (XEXP (x
, 1))
4763 && INTVAL (XEXP (x
, 1)) >= 0
4764 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4765 && INTVAL (XEXP (x
, 1)) < xmode_width
)
4767 int count
= INTVAL (XEXP (x
, 1));
4768 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
4769 unsigned HOST_WIDE_INT op_nonzero
4770 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4771 known_x
, known_mode
, known_ret
);
4772 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4773 unsigned HOST_WIDE_INT outer
= 0;
4775 if (mode_width
> xmode_width
)
4776 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4778 if (code
== LSHIFTRT
)
4780 else if (code
== ASHIFTRT
)
4784 /* If the sign bit may have been nonzero before the shift, we
4785 need to mark all the places it could have been copied to
4786 by the shift as possibly nonzero. */
4787 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
4788 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
4789 << (xmode_width
- count
));
4791 else if (code
== ASHIFT
)
4794 inner
= ((inner
<< (count
% xmode_width
)
4795 | (inner
>> (xmode_width
- (count
% xmode_width
))))
4798 nonzero
&= (outer
| inner
);
4804 /* This is at most the number of bits in the mode. */
4805 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4809 /* If CLZ has a known value at zero, then the nonzero bits are
4810 that value, plus the number of bits in the mode minus one. */
4811 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4813 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4819 /* If CTZ has a known value at zero, then the nonzero bits are
4820 that value, plus the number of bits in the mode minus one. */
4821 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4823 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4829 /* This is at most the number of bits in the mode minus 1. */
4830 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4839 unsigned HOST_WIDE_INT nonzero_true
4840 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4841 known_x
, known_mode
, known_ret
);
4843 /* Don't call nonzero_bits for the second time if it cannot change
4845 if ((nonzero
& nonzero_true
) != nonzero
)
4846 nonzero
&= nonzero_true
4847 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4848 known_x
, known_mode
, known_ret
);
4859 /* See the macro definition above. */
4860 #undef cached_num_sign_bit_copies
4863 /* Return true if num_sign_bit_copies1 might recurse into both operands
4867 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4869 if (!ARITHMETIC_P (x
))
4871 switch (GET_CODE (x
))
4889 /* The function cached_num_sign_bit_copies is a wrapper around
4890 num_sign_bit_copies1. It avoids exponential behavior in
4891 num_sign_bit_copies1 when X has identical subexpressions on the
4892 first or the second level. */
4895 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
4896 const_rtx known_x
, machine_mode known_mode
,
4897 unsigned int known_ret
)
4899 if (x
== known_x
&& mode
== known_mode
)
4902 /* Try to find identical subexpressions. If found call
4903 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4904 the precomputed value for the subexpression as KNOWN_RET. */
4906 if (num_sign_bit_copies_binary_arith_p (x
))
4908 rtx x0
= XEXP (x
, 0);
4909 rtx x1
= XEXP (x
, 1);
4911 /* Check the first level. */
4914 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4915 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4919 /* Check the second level. */
4920 if (num_sign_bit_copies_binary_arith_p (x0
)
4921 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4923 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4924 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4928 if (num_sign_bit_copies_binary_arith_p (x1
)
4929 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4931 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4932 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4937 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4940 /* Return the number of bits at the high-order end of X that are known to
4941 be equal to the sign bit. X will be used in mode MODE. The returned
4942 value will always be between 1 and the number of bits in MODE. */
4945 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4946 machine_mode known_mode
,
4947 unsigned int known_ret
)
4949 enum rtx_code code
= GET_CODE (x
);
4950 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4951 int num0
, num1
, result
;
4952 unsigned HOST_WIDE_INT nonzero
;
4954 if (CONST_INT_P (x
))
4956 /* If the constant is negative, take its 1's complement and remask.
4957 Then see how many zero bits we have. */
4958 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4959 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4960 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4961 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4963 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4966 scalar_int_mode xmode
, inner_mode
;
4967 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4970 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4972 /* For a smaller mode, just ignore the high bits. */
4973 if (bitwidth
< xmode_width
)
4975 num0
= cached_num_sign_bit_copies (x
, xmode
,
4976 known_x
, known_mode
, known_ret
);
4977 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
4980 if (bitwidth
> xmode_width
)
4982 /* If this machine does not do all register operations on the entire
4983 register and MODE is wider than the mode of X, we can say nothing
4984 at all about the high-order bits. */
4985 if (!WORD_REGISTER_OPERATIONS
)
4988 /* Likewise on machines that do, if the mode of the object is smaller
4989 than a word and loads of that size don't sign extend, we can say
4990 nothing about the high order bits. */
4991 if (xmode_width
< BITS_PER_WORD
4992 && load_extend_op (xmode
) != SIGN_EXTEND
)
4996 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4997 the code in the switch below. */
5002 #if defined(POINTERS_EXTEND_UNSIGNED)
5003 /* If pointers extend signed and this is a pointer in Pmode, say that
5004 all the bits above ptr_mode are known to be sign bit copies. */
5005 /* As we do not know which address space the pointer is referring to,
5006 we can do this only if the target does not support different pointer
5007 or address modes depending on the address space. */
5008 if (target_default_pointer_address_modes_p ()
5009 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
5010 && mode
== Pmode
&& REG_POINTER (x
)
5011 && !targetm
.have_ptr_extend ())
5012 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
5016 unsigned int copies_for_hook
= 1, copies
= 1;
5017 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
5021 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
5022 known_mode
, known_ret
);
5024 if (copies
> 1 || copies_for_hook
> 1)
5025 return MAX (copies
, copies_for_hook
);
5027 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5032 /* Some RISC machines sign-extend all loads of smaller than a word. */
5033 if (load_extend_op (xmode
) == SIGN_EXTEND
)
5034 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
5038 /* If this is a SUBREG for a promoted object that is sign-extended
5039 and we are looking at it in a wider mode, we know that at least the
5040 high-order bits are known to be sign bit copies. */
5042 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
5044 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5045 known_x
, known_mode
, known_ret
);
5046 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
5049 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
5051 /* For a smaller object, just ignore the high bits. */
5052 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
5054 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
5055 known_x
, known_mode
,
5057 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5061 /* For paradoxical SUBREGs on machines where all register operations
5062 affect the entire register, just look inside. Note that we are
5063 passing MODE to the recursive call, so the number of sign bit
5064 copies will remain relative to that mode, not the inner mode. */
5066 /* This works only if loads sign extend. Otherwise, if we get a
5067 reload for the inner part, it may be loaded from the stack, and
5068 then we lose all sign bit copies that existed before the store
5071 if (WORD_REGISTER_OPERATIONS
5072 && load_extend_op (inner_mode
) == SIGN_EXTEND
5073 && paradoxical_subreg_p (x
)
5074 && (MEM_P (SUBREG_REG (x
)) || REG_P (SUBREG_REG (x
))))
5075 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5076 known_x
, known_mode
, known_ret
);
5081 if (CONST_INT_P (XEXP (x
, 1)))
5082 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5086 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5087 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5088 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5089 known_x
, known_mode
, known_ret
));
5093 /* For a smaller object, just ignore the high bits. */
5094 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5095 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5096 known_x
, known_mode
, known_ret
);
5097 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5101 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5102 known_x
, known_mode
, known_ret
);
5104 case ROTATE
: case ROTATERT
:
5105 /* If we are rotating left by a number of bits less than the number
5106 of sign bit copies, we can just subtract that amount from the
5108 if (CONST_INT_P (XEXP (x
, 1))
5109 && INTVAL (XEXP (x
, 1)) >= 0
5110 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5112 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5113 known_x
, known_mode
, known_ret
);
5114 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5115 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5120 /* In general, this subtracts one sign bit copy. But if the value
5121 is known to be positive, the number of sign bit copies is the
5122 same as that of the input. Finally, if the input has just one bit
5123 that might be nonzero, all the bits are copies of the sign bit. */
5124 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5125 known_x
, known_mode
, known_ret
);
5126 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5127 return num0
> 1 ? num0
- 1 : 1;
5129 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5134 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5139 case IOR
: case AND
: case XOR
:
5140 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5141 /* Logical operations will preserve the number of sign-bit copies.
5142 MIN and MAX operations always return one of the operands. */
5143 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5144 known_x
, known_mode
, known_ret
);
5145 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5146 known_x
, known_mode
, known_ret
);
5148 /* If num1 is clearing some of the top bits then regardless of
5149 the other term, we are guaranteed to have at least that many
5150 high-order zero bits. */
5153 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5154 && CONST_INT_P (XEXP (x
, 1))
5155 && (UINTVAL (XEXP (x
, 1))
5156 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5159 /* Similarly for IOR when setting high-order bits. */
5162 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5163 && CONST_INT_P (XEXP (x
, 1))
5164 && (UINTVAL (XEXP (x
, 1))
5165 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5168 return MIN (num0
, num1
);
5170 case PLUS
: case MINUS
:
5171 /* For addition and subtraction, we can have a 1-bit carry. However,
5172 if we are subtracting 1 from a positive number, there will not
5173 be such a carry. Furthermore, if the positive number is known to
5174 be 0 or 1, we know the result is either -1 or 0. */
5176 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5177 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5179 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5180 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5181 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5182 : bitwidth
- floor_log2 (nonzero
) - 1);
5185 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5186 known_x
, known_mode
, known_ret
);
5187 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5188 known_x
, known_mode
, known_ret
);
5189 result
= MAX (1, MIN (num0
, num1
) - 1);
5194 /* The number of bits of the product is the sum of the number of
5195 bits of both terms. However, unless one of the terms if known
5196 to be positive, we must allow for an additional bit since negating
5197 a negative number can remove one sign bit copy. */
5199 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5200 known_x
, known_mode
, known_ret
);
5201 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5202 known_x
, known_mode
, known_ret
);
5204 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5206 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5207 || (((nonzero_bits (XEXP (x
, 0), mode
)
5208 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5209 && ((nonzero_bits (XEXP (x
, 1), mode
)
5210 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5214 return MAX (1, result
);
5217 /* The result must be <= the first operand. If the first operand
5218 has the high bit set, we know nothing about the number of sign
5220 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5222 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5223 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5226 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5227 known_x
, known_mode
, known_ret
);
5230 /* The result must be <= the second operand. If the second operand
5231 has (or just might have) the high bit set, we know nothing about
5232 the number of sign bit copies. */
5233 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5235 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5236 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5239 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5240 known_x
, known_mode
, known_ret
);
5243 /* Similar to unsigned division, except that we have to worry about
5244 the case where the divisor is negative, in which case we have
5246 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5247 known_x
, known_mode
, known_ret
);
5249 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5250 || (nonzero_bits (XEXP (x
, 1), mode
)
5251 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5257 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5258 known_x
, known_mode
, known_ret
);
5260 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5261 || (nonzero_bits (XEXP (x
, 1), mode
)
5262 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5268 /* Shifts by a constant add to the number of bits equal to the
5270 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5271 known_x
, known_mode
, known_ret
);
5272 if (CONST_INT_P (XEXP (x
, 1))
5273 && INTVAL (XEXP (x
, 1)) > 0
5274 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5275 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5280 /* Left shifts destroy copies. */
5281 if (!CONST_INT_P (XEXP (x
, 1))
5282 || INTVAL (XEXP (x
, 1)) < 0
5283 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5284 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5287 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5288 known_x
, known_mode
, known_ret
);
5289 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5292 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5293 known_x
, known_mode
, known_ret
);
5294 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5295 known_x
, known_mode
, known_ret
);
5296 return MIN (num0
, num1
);
5298 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5299 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5300 case GEU
: case GTU
: case LEU
: case LTU
:
5301 case UNORDERED
: case ORDERED
:
5302 /* If the constant is negative, take its 1's complement and remask.
5303 Then see how many zero bits we have. */
5304 nonzero
= STORE_FLAG_VALUE
;
5305 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5306 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5307 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5309 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5315 /* If we haven't been able to figure it out by one of the above rules,
5316 see if some of the high-order bits are known to be zero. If so,
5317 count those bits and return one less than that amount. If we can't
5318 safely compute the mask for this mode, always return BITWIDTH. */
5320 bitwidth
= GET_MODE_PRECISION (mode
);
5321 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5324 nonzero
= nonzero_bits (x
, mode
);
5325 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5326 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5329 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5330 zero indicates an instruction pattern without a known cost. */
5333 pattern_cost (rtx pat
, bool speed
)
5338 /* Extract the single set rtx from the instruction pattern. We
5339 can't use single_set since we only have the pattern. We also
5340 consider PARALLELs of a normal set and a single comparison. In
5341 that case we use the cost of the non-comparison SET operation,
5342 which is most-likely to be the real cost of this operation. */
5343 if (GET_CODE (pat
) == SET
)
5345 else if (GET_CODE (pat
) == PARALLEL
)
5348 rtx comparison
= NULL_RTX
;
5350 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5352 rtx x
= XVECEXP (pat
, 0, i
);
5353 if (GET_CODE (x
) == SET
)
5355 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5370 if (!set
&& comparison
)
5379 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5380 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5383 /* Calculate the cost of a single instruction. A return value of zero
5384 indicates an instruction pattern without a known cost. */
5387 insn_cost (rtx_insn
*insn
, bool speed
)
5389 if (targetm
.insn_cost
)
5390 return targetm
.insn_cost (insn
, speed
);
5392 return pattern_cost (PATTERN (insn
), speed
);
5395 /* Returns estimate on cost of computing SEQ. */
5398 seq_cost (const rtx_insn
*seq
, bool speed
)
5403 for (; seq
; seq
= NEXT_INSN (seq
))
5405 set
= single_set (seq
);
5407 cost
+= set_rtx_cost (set
, speed
);
5408 else if (NONDEBUG_INSN_P (seq
))
5410 int this_cost
= insn_cost (CONST_CAST_RTX_INSN (seq
), speed
);
5421 /* Given an insn INSN and condition COND, return the condition in a
5422 canonical form to simplify testing by callers. Specifically:
5424 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5425 (2) Both operands will be machine operands; (cc0) will have been replaced.
5426 (3) If an operand is a constant, it will be the second operand.
5427 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5428 for GE, GEU, and LEU.
5430 If the condition cannot be understood, or is an inequality floating-point
5431 comparison which needs to be reversed, 0 will be returned.
5433 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5435 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5436 insn used in locating the condition was found. If a replacement test
5437 of the condition is desired, it should be placed in front of that
5438 insn and we will be sure that the inputs are still valid.
5440 If WANT_REG is nonzero, we wish the condition to be relative to that
5441 register, if possible. Therefore, do not canonicalize the condition
5442 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5443 to be a compare to a CC mode register.
5445 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5449 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5450 rtx_insn
**earliest
,
5451 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5454 rtx_insn
*prev
= insn
;
5458 int reverse_code
= 0;
5460 basic_block bb
= BLOCK_FOR_INSN (insn
);
5462 code
= GET_CODE (cond
);
5463 mode
= GET_MODE (cond
);
5464 op0
= XEXP (cond
, 0);
5465 op1
= XEXP (cond
, 1);
5468 code
= reversed_comparison_code (cond
, insn
);
5469 if (code
== UNKNOWN
)
5475 /* If we are comparing a register with zero, see if the register is set
5476 in the previous insn to a COMPARE or a comparison operation. Perform
5477 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5480 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5481 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5482 && op1
== CONST0_RTX (GET_MODE (op0
))
5485 /* Set nonzero when we find something of interest. */
5488 /* If comparison with cc0, import actual comparison from compare
5492 if ((prev
= prev_nonnote_insn (prev
)) == 0
5493 || !NONJUMP_INSN_P (prev
)
5494 || (set
= single_set (prev
)) == 0
5495 || SET_DEST (set
) != cc0_rtx
)
5498 op0
= SET_SRC (set
);
5499 op1
= CONST0_RTX (GET_MODE (op0
));
5504 /* If this is a COMPARE, pick up the two things being compared. */
5505 if (GET_CODE (op0
) == COMPARE
)
5507 op1
= XEXP (op0
, 1);
5508 op0
= XEXP (op0
, 0);
5511 else if (!REG_P (op0
))
5514 /* Go back to the previous insn. Stop if it is not an INSN. We also
5515 stop if it isn't a single set or if it has a REG_INC note because
5516 we don't want to bother dealing with it. */
5518 prev
= prev_nonnote_nondebug_insn (prev
);
5521 || !NONJUMP_INSN_P (prev
)
5522 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5523 /* In cfglayout mode, there do not have to be labels at the
5524 beginning of a block, or jumps at the end, so the previous
5525 conditions would not stop us when we reach bb boundary. */
5526 || BLOCK_FOR_INSN (prev
) != bb
)
5529 set
= set_of (op0
, prev
);
5532 && (GET_CODE (set
) != SET
5533 || !rtx_equal_p (SET_DEST (set
), op0
)))
5536 /* If this is setting OP0, get what it sets it to if it looks
5540 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5541 #ifdef FLOAT_STORE_FLAG_VALUE
5542 REAL_VALUE_TYPE fsfv
;
5545 /* ??? We may not combine comparisons done in a CCmode with
5546 comparisons not done in a CCmode. This is to aid targets
5547 like Alpha that have an IEEE compliant EQ instruction, and
5548 a non-IEEE compliant BEQ instruction. The use of CCmode is
5549 actually artificial, simply to prevent the combination, but
5550 should not affect other platforms.
5552 However, we must allow VOIDmode comparisons to match either
5553 CCmode or non-CCmode comparison, because some ports have
5554 modeless comparisons inside branch patterns.
5556 ??? This mode check should perhaps look more like the mode check
5557 in simplify_comparison in combine. */
5558 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5559 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5561 && inner_mode
!= VOIDmode
)
5563 if (GET_CODE (SET_SRC (set
)) == COMPARE
5566 && val_signbit_known_set_p (inner_mode
,
5568 #ifdef FLOAT_STORE_FLAG_VALUE
5570 && SCALAR_FLOAT_MODE_P (inner_mode
)
5571 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5572 REAL_VALUE_NEGATIVE (fsfv
)))
5575 && COMPARISON_P (SET_SRC (set
))))
5577 else if (((code
== EQ
5579 && val_signbit_known_set_p (inner_mode
,
5581 #ifdef FLOAT_STORE_FLAG_VALUE
5583 && SCALAR_FLOAT_MODE_P (inner_mode
)
5584 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5585 REAL_VALUE_NEGATIVE (fsfv
)))
5588 && COMPARISON_P (SET_SRC (set
)))
5593 else if ((code
== EQ
|| code
== NE
)
5594 && GET_CODE (SET_SRC (set
)) == XOR
)
5595 /* Handle sequences like:
5598 ...(eq|ne op0 (const_int 0))...
5602 (eq op0 (const_int 0)) reduces to (eq X Y)
5603 (ne op0 (const_int 0)) reduces to (ne X Y)
5605 This is the form used by MIPS16, for example. */
5611 else if (reg_set_p (op0
, prev
))
5612 /* If this sets OP0, but not directly, we have to give up. */
5617 /* If the caller is expecting the condition to be valid at INSN,
5618 make sure X doesn't change before INSN. */
5619 if (valid_at_insn_p
)
5620 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5622 if (COMPARISON_P (x
))
5623 code
= GET_CODE (x
);
5626 code
= reversed_comparison_code (x
, prev
);
5627 if (code
== UNKNOWN
)
5632 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5638 /* If constant is first, put it last. */
5639 if (CONSTANT_P (op0
))
5640 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5642 /* If OP0 is the result of a comparison, we weren't able to find what
5643 was really being compared, so fail. */
5645 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5648 /* Canonicalize any ordered comparison with integers involving equality
5649 if we can do computations in the relevant mode and we do not
5652 scalar_int_mode op0_mode
;
5653 if (CONST_INT_P (op1
)
5654 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
5655 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
5657 HOST_WIDE_INT const_val
= INTVAL (op1
);
5658 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5659 unsigned HOST_WIDE_INT max_val
5660 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
5665 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5666 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
5669 /* When cross-compiling, const_val might be sign-extended from
5670 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5672 if ((const_val
& max_val
)
5673 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
5674 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
5678 if (uconst_val
< max_val
)
5679 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
5683 if (uconst_val
!= 0)
5684 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
5692 /* Never return CC0; return zero instead. */
5696 /* We promised to return a comparison. */
5697 rtx ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5698 if (COMPARISON_P (ret
))
5703 /* Given a jump insn JUMP, return the condition that will cause it to branch
5704 to its JUMP_LABEL. If the condition cannot be understood, or is an
5705 inequality floating-point comparison which needs to be reversed, 0 will
5708 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5709 insn used in locating the condition was found. If a replacement test
5710 of the condition is desired, it should be placed in front of that
5711 insn and we will be sure that the inputs are still valid. If EARLIEST
5712 is null, the returned condition will be valid at INSN.
5714 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5715 compare CC mode register.
5717 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5720 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5721 int valid_at_insn_p
)
5727 /* If this is not a standard conditional jump, we can't parse it. */
5729 || ! any_condjump_p (jump
))
5731 set
= pc_set (jump
);
5733 cond
= XEXP (SET_SRC (set
), 0);
5735 /* If this branches to JUMP_LABEL when the condition is false, reverse
5738 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5739 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5741 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5742 allow_cc_mode
, valid_at_insn_p
);
5745 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5746 TARGET_MODE_REP_EXTENDED.
5748 Note that we assume that the property of
5749 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5750 narrower than mode B. I.e., if A is a mode narrower than B then in
5751 order to be able to operate on it in mode B, mode A needs to
5752 satisfy the requirements set by the representation of mode B. */
5755 init_num_sign_bit_copies_in_rep (void)
5757 opt_scalar_int_mode in_mode_iter
;
5758 scalar_int_mode mode
;
5760 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
5761 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
5763 scalar_int_mode in_mode
= in_mode_iter
.require ();
5766 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5767 extends to the next widest mode. */
5768 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5769 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
5771 /* We are in in_mode. Count how many bits outside of mode
5772 have to be copies of the sign-bit. */
5773 FOR_EACH_MODE (i
, mode
, in_mode
)
5775 /* This must always exist (for the last iteration it will be
5777 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
5779 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5780 /* We can only check sign-bit copies starting from the
5781 top-bit. In order to be able to check the bits we
5782 have already seen we pretend that subsequent bits
5783 have to be sign-bit copies too. */
5784 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5785 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5786 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5791 /* Suppose that truncation from the machine mode of X to MODE is not a
5792 no-op. See if there is anything special about X so that we can
5793 assume it already contains a truncated value of MODE. */
5796 truncated_to_mode (machine_mode mode
, const_rtx x
)
5798 /* This register has already been used in MODE without explicit
5800 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5803 /* See if we already satisfy the requirements of MODE. If yes we
5804 can just switch to MODE. */
5805 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5806 && (num_sign_bit_copies (x
, GET_MODE (x
))
5807 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5813 /* Return true if RTX code CODE has a single sequence of zero or more
5814 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5815 entry in that case. */
5818 setup_reg_subrtx_bounds (unsigned int code
)
5820 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5822 for (; format
[i
] != 'e'; ++i
)
5825 /* No subrtxes. Leave start and count as 0. */
5827 if (format
[i
] == 'E' || format
[i
] == 'V')
5831 /* Record the sequence of 'e's. */
5832 rtx_all_subrtx_bounds
[code
].start
= i
;
5835 while (format
[i
] == 'e');
5836 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5837 /* rtl-iter.h relies on this. */
5838 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5840 for (; format
[i
]; ++i
)
5841 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5847 /* Initialize rtx_all_subrtx_bounds. */
5852 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5854 if (!setup_reg_subrtx_bounds (i
))
5855 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5856 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5857 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5860 init_num_sign_bit_copies_in_rep ();
5863 /* Check whether this is a constant pool constant. */
5865 constant_pool_constant_p (rtx x
)
5867 x
= avoid_constant_pool_reference (x
);
5868 return CONST_DOUBLE_P (x
);
5871 /* If M is a bitmask that selects a field of low-order bits within an item but
5872 not the entire word, return the length of the field. Return -1 otherwise.
5873 M is used in machine mode MODE. */
5876 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5878 if (mode
!= VOIDmode
)
5880 if (!HWI_COMPUTABLE_MODE_P (mode
))
5882 m
&= GET_MODE_MASK (mode
);
5885 return exact_log2 (m
+ 1);
5888 /* Return the mode of MEM's address. */
5891 get_address_mode (rtx mem
)
5895 gcc_assert (MEM_P (mem
));
5896 mode
= GET_MODE (XEXP (mem
, 0));
5897 if (mode
!= VOIDmode
)
5898 return as_a
<scalar_int_mode
> (mode
);
5899 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5902 /* Split up a CONST_DOUBLE or integer constant rtx
5903 into two rtx's for single words,
5904 storing in *FIRST the word that comes first in memory in the target
5905 and in *SECOND the other.
5907 TODO: This function needs to be rewritten to work on any size
5911 split_double (rtx value
, rtx
*first
, rtx
*second
)
5913 if (CONST_INT_P (value
))
5915 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5917 /* In this case the CONST_INT holds both target words.
5918 Extract the bits from it into two word-sized pieces.
5919 Sign extend each half to HOST_WIDE_INT. */
5920 unsigned HOST_WIDE_INT low
, high
;
5921 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5922 unsigned bits_per_word
= BITS_PER_WORD
;
5924 /* Set sign_bit to the most significant bit of a word. */
5926 sign_bit
<<= bits_per_word
- 1;
5928 /* Set mask so that all bits of the word are set. We could
5929 have used 1 << BITS_PER_WORD instead of basing the
5930 calculation on sign_bit. However, on machines where
5931 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5932 compiler warning, even though the code would never be
5934 mask
= sign_bit
<< 1;
5937 /* Set sign_extend as any remaining bits. */
5938 sign_extend
= ~mask
;
5940 /* Pick the lower word and sign-extend it. */
5941 low
= INTVAL (value
);
5946 /* Pick the higher word, shifted to the least significant
5947 bits, and sign-extend it. */
5948 high
= INTVAL (value
);
5949 high
>>= bits_per_word
- 1;
5952 if (high
& sign_bit
)
5953 high
|= sign_extend
;
5955 /* Store the words in the target machine order. */
5956 if (WORDS_BIG_ENDIAN
)
5958 *first
= GEN_INT (high
);
5959 *second
= GEN_INT (low
);
5963 *first
= GEN_INT (low
);
5964 *second
= GEN_INT (high
);
5969 /* The rule for using CONST_INT for a wider mode
5970 is that we regard the value as signed.
5971 So sign-extend it. */
5972 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5973 if (WORDS_BIG_ENDIAN
)
5985 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5987 /* All of this is scary code and needs to be converted to
5988 properly work with any size integer. */
5989 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5990 if (WORDS_BIG_ENDIAN
)
5992 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5993 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5997 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5998 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6001 else if (!CONST_DOUBLE_P (value
))
6003 if (WORDS_BIG_ENDIAN
)
6005 *first
= const0_rtx
;
6011 *second
= const0_rtx
;
6014 else if (GET_MODE (value
) == VOIDmode
6015 /* This is the old way we did CONST_DOUBLE integers. */
6016 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
6018 /* In an integer, the words are defined as most and least significant.
6019 So order them by the target's convention. */
6020 if (WORDS_BIG_ENDIAN
)
6022 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6023 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
6027 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
6028 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6035 /* Note, this converts the REAL_VALUE_TYPE to the target's
6036 format, splits up the floating point double and outputs
6037 exactly 32 bits of it into each of l[0] and l[1] --
6038 not necessarily BITS_PER_WORD bits. */
6039 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
6041 /* If 32 bits is an entire word for the target, but not for the host,
6042 then sign-extend on the host so that the number will look the same
6043 way on the host that it would on the target. See for instance
6044 simplify_unary_operation. The #if is needed to avoid compiler
6047 #if HOST_BITS_PER_LONG > 32
6048 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
6050 if (l
[0] & ((long) 1 << 31))
6051 l
[0] |= ((unsigned long) (-1) << 32);
6052 if (l
[1] & ((long) 1 << 31))
6053 l
[1] |= ((unsigned long) (-1) << 32);
6057 *first
= GEN_INT (l
[0]);
6058 *second
= GEN_INT (l
[1]);
6062 /* Return true if X is a sign_extract or zero_extract from the least
6066 lsb_bitfield_op_p (rtx x
)
6068 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
6070 machine_mode mode
= GET_MODE (XEXP (x
, 0));
6071 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
6072 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
6074 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
6079 /* Strip outer address "mutations" from LOC and return a pointer to the
6080 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6081 stripped expression there.
6083 "Mutations" either convert between modes or apply some kind of
6084 extension, truncation or alignment. */
6087 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6091 enum rtx_code code
= GET_CODE (*loc
);
6092 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6093 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6094 used to convert between pointer sizes. */
6095 loc
= &XEXP (*loc
, 0);
6096 else if (lsb_bitfield_op_p (*loc
))
6097 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6098 acts as a combined truncation and extension. */
6099 loc
= &XEXP (*loc
, 0);
6100 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6101 /* (and ... (const_int -X)) is used to align to X bytes. */
6102 loc
= &XEXP (*loc
, 0);
6103 else if (code
== SUBREG
6104 && !OBJECT_P (SUBREG_REG (*loc
))
6105 && subreg_lowpart_p (*loc
))
6106 /* (subreg (operator ...) ...) inside and is used for mode
6108 loc
= &SUBREG_REG (*loc
);
6116 /* Return true if CODE applies some kind of scale. The scaled value is
6117 is the first operand and the scale is the second. */
6120 binary_scale_code_p (enum rtx_code code
)
6122 return (code
== MULT
6124 /* Needed by ARM targets. */
6128 || code
== ROTATERT
);
6131 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6132 (see address_info). Return null otherwise. */
6135 get_base_term (rtx
*inner
)
6137 if (GET_CODE (*inner
) == LO_SUM
)
6138 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6141 || GET_CODE (*inner
) == SUBREG
6142 || GET_CODE (*inner
) == SCRATCH
)
6147 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6148 (see address_info). Return null otherwise. */
6151 get_index_term (rtx
*inner
)
6153 /* At present, only constant scales are allowed. */
6154 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6155 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6158 || GET_CODE (*inner
) == SUBREG
6159 || GET_CODE (*inner
) == SCRATCH
)
6164 /* Set the segment part of address INFO to LOC, given that INNER is the
6168 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6170 gcc_assert (!info
->segment
);
6171 info
->segment
= loc
;
6172 info
->segment_term
= inner
;
6175 /* Set the base part of address INFO to LOC, given that INNER is the
6179 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6181 gcc_assert (!info
->base
);
6183 info
->base_term
= inner
;
6186 /* Set the index part of address INFO to LOC, given that INNER is the
6190 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6192 gcc_assert (!info
->index
);
6194 info
->index_term
= inner
;
6197 /* Set the displacement part of address INFO to LOC, given that INNER
6198 is the constant term. */
6201 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6203 gcc_assert (!info
->disp
);
6205 info
->disp_term
= inner
;
6208 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6209 rest of INFO accordingly. */
6212 decompose_incdec_address (struct address_info
*info
)
6214 info
->autoinc_p
= true;
6216 rtx
*base
= &XEXP (*info
->inner
, 0);
6217 set_address_base (info
, base
, base
);
6218 gcc_checking_assert (info
->base
== info
->base_term
);
6220 /* These addresses are only valid when the size of the addressed
6222 gcc_checking_assert (info
->mode
!= VOIDmode
);
6225 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6226 of INFO accordingly. */
6229 decompose_automod_address (struct address_info
*info
)
6231 info
->autoinc_p
= true;
6233 rtx
*base
= &XEXP (*info
->inner
, 0);
6234 set_address_base (info
, base
, base
);
6235 gcc_checking_assert (info
->base
== info
->base_term
);
6237 rtx plus
= XEXP (*info
->inner
, 1);
6238 gcc_assert (GET_CODE (plus
) == PLUS
);
6240 info
->base_term2
= &XEXP (plus
, 0);
6241 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6243 rtx
*step
= &XEXP (plus
, 1);
6244 rtx
*inner_step
= strip_address_mutations (step
);
6245 if (CONSTANT_P (*inner_step
))
6246 set_address_disp (info
, step
, inner_step
);
6248 set_address_index (info
, step
, inner_step
);
6251 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6252 values in [PTR, END). Return a pointer to the end of the used array. */
6255 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6258 if (GET_CODE (x
) == PLUS
)
6260 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6261 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6265 gcc_assert (ptr
!= end
);
6271 /* Evaluate the likelihood of X being a base or index value, returning
6272 positive if it is likely to be a base, negative if it is likely to be
6273 an index, and 0 if we can't tell. Make the magnitude of the return
6274 value reflect the amount of confidence we have in the answer.
6276 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6279 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6280 enum rtx_code outer_code
, enum rtx_code index_code
)
6282 /* Believe *_POINTER unless the address shape requires otherwise. */
6283 if (REG_P (x
) && REG_POINTER (x
))
6285 if (MEM_P (x
) && MEM_POINTER (x
))
6288 if (REG_P (x
) && HARD_REGISTER_P (x
))
6290 /* X is a hard register. If it only fits one of the base
6291 or index classes, choose that interpretation. */
6292 int regno
= REGNO (x
);
6293 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6294 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6295 if (base_p
!= index_p
)
6296 return base_p
? 1 : -1;
6301 /* INFO->INNER describes a normal, non-automodified address.
6302 Fill in the rest of INFO accordingly. */
6305 decompose_normal_address (struct address_info
*info
)
6307 /* Treat the address as the sum of up to four values. */
6309 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6310 ops
+ ARRAY_SIZE (ops
)) - ops
;
6312 /* If there is more than one component, any base component is in a PLUS. */
6314 info
->base_outer_code
= PLUS
;
6316 /* Try to classify each sum operand now. Leave those that could be
6317 either a base or an index in OPS. */
6320 for (size_t in
= 0; in
< n_ops
; ++in
)
6323 rtx
*inner
= strip_address_mutations (loc
);
6324 if (CONSTANT_P (*inner
))
6325 set_address_disp (info
, loc
, inner
);
6326 else if (GET_CODE (*inner
) == UNSPEC
)
6327 set_address_segment (info
, loc
, inner
);
6330 /* The only other possibilities are a base or an index. */
6331 rtx
*base_term
= get_base_term (inner
);
6332 rtx
*index_term
= get_index_term (inner
);
6333 gcc_assert (base_term
|| index_term
);
6335 set_address_index (info
, loc
, index_term
);
6336 else if (!index_term
)
6337 set_address_base (info
, loc
, base_term
);
6340 gcc_assert (base_term
== index_term
);
6342 inner_ops
[out
] = base_term
;
6348 /* Classify the remaining OPS members as bases and indexes. */
6351 /* If we haven't seen a base or an index yet, assume that this is
6352 the base. If we were confident that another term was the base
6353 or index, treat the remaining operand as the other kind. */
6355 set_address_base (info
, ops
[0], inner_ops
[0]);
6357 set_address_index (info
, ops
[0], inner_ops
[0]);
6361 /* In the event of a tie, assume the base comes first. */
6362 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6364 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6365 GET_CODE (*ops
[0])))
6367 set_address_base (info
, ops
[0], inner_ops
[0]);
6368 set_address_index (info
, ops
[1], inner_ops
[1]);
6372 set_address_base (info
, ops
[1], inner_ops
[1]);
6373 set_address_index (info
, ops
[0], inner_ops
[0]);
6377 gcc_assert (out
== 0);
6380 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6381 or VOIDmode if not known. AS is the address space associated with LOC.
6382 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6385 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6386 addr_space_t as
, enum rtx_code outer_code
)
6388 memset (info
, 0, sizeof (*info
));
6391 info
->addr_outer_code
= outer_code
;
6393 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6394 info
->base_outer_code
= outer_code
;
6395 switch (GET_CODE (*info
->inner
))
6401 decompose_incdec_address (info
);
6406 decompose_automod_address (info
);
6410 decompose_normal_address (info
);
6415 /* Describe address operand LOC in INFO. */
6418 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6420 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6423 /* Describe the address of MEM X in INFO. */
6426 decompose_mem_address (struct address_info
*info
, rtx x
)
6428 gcc_assert (MEM_P (x
));
6429 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6430 MEM_ADDR_SPACE (x
), MEM
);
6433 /* Update INFO after a change to the address it describes. */
6436 update_address (struct address_info
*info
)
6438 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6439 info
->addr_outer_code
);
6442 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6443 more complicated than that. */
6446 get_index_scale (const struct address_info
*info
)
6448 rtx index
= *info
->index
;
6449 if (GET_CODE (index
) == MULT
6450 && CONST_INT_P (XEXP (index
, 1))
6451 && info
->index_term
== &XEXP (index
, 0))
6452 return INTVAL (XEXP (index
, 1));
6454 if (GET_CODE (index
) == ASHIFT
6455 && CONST_INT_P (XEXP (index
, 1))
6456 && info
->index_term
== &XEXP (index
, 0))
6457 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6459 if (info
->index
== info
->index_term
)
6465 /* Return the "index code" of INFO, in the form required by
6469 get_index_code (const struct address_info
*info
)
6472 return GET_CODE (*info
->index
);
6475 return GET_CODE (*info
->disp
);
6480 /* Return true if RTL X contains a SYMBOL_REF. */
6483 contains_symbol_ref_p (const_rtx x
)
6485 subrtx_iterator::array_type array
;
6486 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6487 if (SYMBOL_REF_P (*iter
))
6493 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6496 contains_symbolic_reference_p (const_rtx x
)
6498 subrtx_iterator::array_type array
;
6499 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6500 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6506 /* Return true if X contains a thread-local symbol. */
6509 tls_referenced_p (const_rtx x
)
6511 if (!targetm
.have_tls
)
6514 subrtx_iterator::array_type array
;
6515 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6516 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)