1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "diagnostic-core.h"
29 #include "insn-config.h"
36 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
37 #include "addresses.h"
40 /* Forward declarations */
41 static void set_of_1 (rtx
, const_rtx
, void *);
42 static bool covers_regno_p (const_rtx
, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
44 static int computed_jump_p_1 (const_rtx
);
45 static void parms_set (rtx
, const_rtx
, void *);
47 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
48 const_rtx
, machine_mode
,
49 unsigned HOST_WIDE_INT
);
50 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
51 const_rtx
, machine_mode
,
52 unsigned HOST_WIDE_INT
);
53 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
56 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
57 machine_mode
, unsigned int);
59 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
60 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
62 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
63 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
64 SIGN_EXTEND then while narrowing we also have to enforce the
65 representation and sign-extend the value to mode DESTINATION_REP.
67 If the value is already sign-extended to DESTINATION_REP mode we
68 can just switch to DESTINATION mode on it. For each pair of
69 integral modes SOURCE and DESTINATION, when truncating from SOURCE
70 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
71 contains the number of high-order bits in SOURCE that have to be
72 copies of the sign-bit so that we can do this mode-switch to
76 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
78 /* Store X into index I of ARRAY. ARRAY is known to have at least I
79 elements. Return the new base of ARRAY. */
82 typename
T::value_type
*
83 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
85 size_t i
, value_type x
)
87 if (base
== array
.stack
)
94 gcc_checking_assert (i
== LOCAL_ELEMS
);
95 /* A previous iteration might also have moved from the stack to the
96 heap, in which case the heap array will already be big enough. */
97 if (vec_safe_length (array
.heap
) <= i
)
98 vec_safe_grow (array
.heap
, i
+ 1);
99 base
= array
.heap
->address ();
100 memcpy (base
, array
.stack
, sizeof (array
.stack
));
101 base
[LOCAL_ELEMS
] = x
;
104 unsigned int length
= array
.heap
->length ();
107 gcc_checking_assert (base
== array
.heap
->address ());
113 gcc_checking_assert (i
== length
);
114 vec_safe_push (array
.heap
, x
);
115 return array
.heap
->address ();
119 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
120 number of elements added to the worklist. */
122 template <typename T
>
124 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
126 size_t end
, rtx_type x
)
128 enum rtx_code code
= GET_CODE (x
);
129 const char *format
= GET_RTX_FORMAT (code
);
130 size_t orig_end
= end
;
131 if (__builtin_expect (INSN_P (x
), false))
133 /* Put the pattern at the top of the queue, since that's what
134 we're likely to want most. It also allows for the SEQUENCE
136 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
137 if (format
[i
] == 'e')
139 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
140 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
143 base
= add_single_to_queue (array
, base
, end
++, subx
);
147 for (int i
= 0; format
[i
]; ++i
)
148 if (format
[i
] == 'e')
150 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
151 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
154 base
= add_single_to_queue (array
, base
, end
++, subx
);
156 else if (format
[i
] == 'E')
158 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
159 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
160 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
161 for (unsigned int j
= 0; j
< length
; j
++)
162 base
[end
++] = T::get_value (vec
[j
]);
164 for (unsigned int j
= 0; j
< length
; j
++)
165 base
= add_single_to_queue (array
, base
, end
++,
166 T::get_value (vec
[j
]));
167 if (code
== SEQUENCE
&& end
== length
)
168 /* If the subrtxes of the sequence fill the entire array then
169 we know that no other parts of a containing insn are queued.
170 The caller is therefore iterating over the sequence as a
171 PATTERN (...), so we also want the patterns of the
173 for (unsigned int j
= 0; j
< length
; j
++)
175 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
177 base
[j
] = T::get_value (PATTERN (x
));
180 return end
- orig_end
;
183 template <typename T
>
185 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
187 vec_free (array
.heap
);
190 template <typename T
>
191 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
193 template class generic_subrtx_iterator
<const_rtx_accessor
>;
194 template class generic_subrtx_iterator
<rtx_var_accessor
>;
195 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
197 /* Return 1 if the value of X is unstable
198 (would be different at a different point in the program).
199 The frame pointer, arg pointer, etc. are considered stable
200 (within one function) and so is anything marked `unchanging'. */
203 rtx_unstable_p (const_rtx x
)
205 const RTX_CODE code
= GET_CODE (x
);
212 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
221 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
222 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
223 /* The arg pointer varies if it is not a fixed register. */
224 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
226 /* ??? When call-clobbered, the value is stable modulo the restore
227 that must happen after a call. This currently screws up local-alloc
228 into believing that the restore is not needed. */
229 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
234 if (MEM_VOLATILE_P (x
))
243 fmt
= GET_RTX_FORMAT (code
);
244 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
247 if (rtx_unstable_p (XEXP (x
, i
)))
250 else if (fmt
[i
] == 'E')
253 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
254 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
261 /* Return 1 if X has a value that can vary even between two
262 executions of the program. 0 means X can be compared reliably
263 against certain constants or near-constants.
264 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
265 zero, we are slightly more conservative.
266 The frame pointer and the arg pointer are considered constant. */
269 rtx_varies_p (const_rtx x
, bool for_alias
)
282 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
291 /* Note that we have to test for the actual rtx used for the frame
292 and arg pointers and not just the register number in case we have
293 eliminated the frame and/or arg pointer and are using it
295 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
296 /* The arg pointer varies if it is not a fixed register. */
297 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
299 if (x
== pic_offset_table_rtx
300 /* ??? When call-clobbered, the value is stable modulo the restore
301 that must happen after a call. This currently screws up
302 local-alloc into believing that the restore is not needed, so we
303 must return 0 only if we are called from alias analysis. */
304 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
309 /* The operand 0 of a LO_SUM is considered constant
310 (in fact it is related specifically to operand 1)
311 during alias analysis. */
312 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
313 || rtx_varies_p (XEXP (x
, 1), for_alias
);
316 if (MEM_VOLATILE_P (x
))
325 fmt
= GET_RTX_FORMAT (code
);
326 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
329 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
332 else if (fmt
[i
] == 'E')
335 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
336 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
343 /* Compute an approximation for the offset between the register
344 FROM and TO for the current function, as it was at the start
348 get_initial_register_offset (int from
, int to
)
350 #ifdef ELIMINABLE_REGS
351 static const struct elim_table_t
355 } table
[] = ELIMINABLE_REGS
;
356 HOST_WIDE_INT offset1
, offset2
;
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
363 before the reload pass. We need to give at least
364 an estimation for the resulting frame size. */
365 if (! reload_completed
)
367 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
371 if (to
== STACK_POINTER_REGNUM
)
373 else if (from
== STACK_POINTER_REGNUM
)
379 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
380 if (table
[i
].from
== from
)
382 if (table
[i
].to
== to
)
384 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
388 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
390 if (table
[j
].to
== to
391 && table
[j
].from
== table
[i
].to
)
393 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
395 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
397 return offset1
+ offset2
;
399 if (table
[j
].from
== to
400 && table
[j
].to
== table
[i
].to
)
402 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
404 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
406 return offset1
- offset2
;
410 else if (table
[i
].to
== from
)
412 if (table
[i
].from
== to
)
414 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
418 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
420 if (table
[j
].to
== to
421 && table
[j
].from
== table
[i
].from
)
423 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
425 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
427 return - offset1
+ offset2
;
429 if (table
[j
].from
== to
430 && table
[j
].to
== table
[i
].from
)
432 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
434 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
436 return - offset1
- offset2
;
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
445 else if (to
== ARG_POINTER_REGNUM
)
446 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
447 else if (from
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
449 else if (to
== HARD_FRAME_POINTER_REGNUM
)
450 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
455 HOST_WIDE_INT offset
;
460 if (reload_completed
)
462 INITIAL_FRAME_POINTER_OFFSET (offset
);
466 offset
= crtl
->outgoing_args_size
+ get_frame_size ();
467 #if !STACK_GROWS_DOWNWARD
472 if (to
== STACK_POINTER_REGNUM
)
474 else if (from
== STACK_POINTER_REGNUM
)
482 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
483 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
484 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
485 references on strict alignment machines. */
488 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
489 machine_mode mode
, bool unaligned_mems
)
491 enum rtx_code code
= GET_CODE (x
);
493 /* The offset must be a multiple of the mode size if we are considering
494 unaligned memory references on strict alignment machines. */
495 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
497 HOST_WIDE_INT actual_offset
= offset
;
499 #ifdef SPARC_STACK_BOUNDARY_HACK
500 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
501 the real alignment of %sp. However, when it does this, the
502 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
503 if (SPARC_STACK_BOUNDARY_HACK
504 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
505 actual_offset
-= STACK_POINTER_OFFSET
;
508 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
515 if (SYMBOL_REF_WEAK (x
))
517 if (!CONSTANT_POOL_ADDRESS_P (x
))
520 HOST_WIDE_INT decl_size
;
525 size
= GET_MODE_SIZE (mode
);
529 /* If the size of the access or of the symbol is unknown,
531 decl
= SYMBOL_REF_DECL (x
);
533 /* Else check that the access is in bounds. TODO: restructure
534 expr_size/tree_expr_size/int_expr_size and just use the latter. */
537 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
538 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
539 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
541 else if (TREE_CODE (decl
) == STRING_CST
)
542 decl_size
= TREE_STRING_LENGTH (decl
);
543 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
544 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
548 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
557 /* Stack references are assumed not to trap, but we need to deal with
558 nonsensical offsets. */
559 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
560 || x
== stack_pointer_rtx
561 /* The arg pointer varies if it is not a fixed register. */
562 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
565 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
567 HOST_WIDE_INT red_zone_size
= 0;
569 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
571 HOST_WIDE_INT low_bound
, high_bound
;
574 size
= GET_MODE_SIZE (mode
);
576 if (x
== frame_pointer_rtx
)
578 if (FRAME_GROWS_DOWNWARD
)
580 high_bound
= STARTING_FRAME_OFFSET
;
581 low_bound
= high_bound
- get_frame_size ();
585 low_bound
= STARTING_FRAME_OFFSET
;
586 high_bound
= low_bound
+ get_frame_size ();
589 else if (x
== hard_frame_pointer_rtx
)
591 HOST_WIDE_INT sp_offset
592 = get_initial_register_offset (STACK_POINTER_REGNUM
,
593 HARD_FRAME_POINTER_REGNUM
);
594 HOST_WIDE_INT ap_offset
595 = get_initial_register_offset (ARG_POINTER_REGNUM
,
596 HARD_FRAME_POINTER_REGNUM
);
598 #if STACK_GROWS_DOWNWARD
599 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
600 high_bound
= ap_offset
601 + FIRST_PARM_OFFSET (current_function_decl
)
602 #if !ARGS_GROW_DOWNWARD
607 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
608 low_bound
= ap_offset
609 + FIRST_PARM_OFFSET (current_function_decl
)
610 #if ARGS_GROW_DOWNWARD
616 else if (x
== stack_pointer_rtx
)
618 HOST_WIDE_INT ap_offset
619 = get_initial_register_offset (ARG_POINTER_REGNUM
,
620 STACK_POINTER_REGNUM
);
622 #if STACK_GROWS_DOWNWARD
623 low_bound
= - red_zone_size
- stack_boundary
;
624 high_bound
= ap_offset
625 + FIRST_PARM_OFFSET (current_function_decl
)
626 #if !ARGS_GROW_DOWNWARD
631 high_bound
= red_zone_size
+ stack_boundary
;
632 low_bound
= ap_offset
633 + FIRST_PARM_OFFSET (current_function_decl
)
634 #if ARGS_GROW_DOWNWARD
642 /* We assume that accesses are safe to at least the
644 Examples are varargs and __builtin_return_address. */
645 #if ARGS_GROW_DOWNWARD
646 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
648 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
649 - crtl
->args
.size
- stack_boundary
;
651 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
653 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
654 + crtl
->args
.size
+ stack_boundary
;
658 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
662 /* All of the virtual frame registers are stack references. */
663 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
664 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
669 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
670 mode
, unaligned_mems
);
673 /* An address is assumed not to trap if:
674 - it is the pic register plus a constant. */
675 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
678 /* - or it is an address that can't trap plus a constant integer. */
679 if (CONST_INT_P (XEXP (x
, 1))
680 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
681 size
, mode
, unaligned_mems
))
688 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
689 mode
, unaligned_mems
);
696 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
697 mode
, unaligned_mems
);
703 /* If it isn't one of the case above, it can cause a trap. */
707 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
710 rtx_addr_can_trap_p (const_rtx x
)
712 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
715 /* Return true if X is an address that is known to not be zero. */
718 nonzero_address_p (const_rtx x
)
720 const enum rtx_code code
= GET_CODE (x
);
725 return !SYMBOL_REF_WEAK (x
);
731 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
732 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
733 || x
== stack_pointer_rtx
734 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
736 /* All of the virtual frame registers are stack references. */
737 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
738 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
743 return nonzero_address_p (XEXP (x
, 0));
746 /* Handle PIC references. */
747 if (XEXP (x
, 0) == pic_offset_table_rtx
748 && CONSTANT_P (XEXP (x
, 1)))
753 /* Similar to the above; allow positive offsets. Further, since
754 auto-inc is only allowed in memories, the register must be a
756 if (CONST_INT_P (XEXP (x
, 1))
757 && INTVAL (XEXP (x
, 1)) > 0)
759 return nonzero_address_p (XEXP (x
, 0));
762 /* Similarly. Further, the offset is always positive. */
769 return nonzero_address_p (XEXP (x
, 0));
772 return nonzero_address_p (XEXP (x
, 1));
778 /* If it isn't one of the case above, might be zero. */
782 /* Return 1 if X refers to a memory location whose address
783 cannot be compared reliably with constant addresses,
784 or if X refers to a BLKmode memory object.
785 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
786 zero, we are slightly more conservative. */
789 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
800 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
802 fmt
= GET_RTX_FORMAT (code
);
803 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
806 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
809 else if (fmt
[i
] == 'E')
812 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
813 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
819 /* Return the CALL in X if there is one. */
822 get_call_rtx_from (rtx x
)
826 if (GET_CODE (x
) == PARALLEL
)
827 x
= XVECEXP (x
, 0, 0);
828 if (GET_CODE (x
) == SET
)
830 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
835 /* Return the value of the integer term in X, if one is apparent;
837 Only obvious integer terms are detected.
838 This is used in cse.c with the `related_value' field. */
841 get_integer_term (const_rtx x
)
843 if (GET_CODE (x
) == CONST
)
846 if (GET_CODE (x
) == MINUS
847 && CONST_INT_P (XEXP (x
, 1)))
848 return - INTVAL (XEXP (x
, 1));
849 if (GET_CODE (x
) == PLUS
850 && CONST_INT_P (XEXP (x
, 1)))
851 return INTVAL (XEXP (x
, 1));
855 /* If X is a constant, return the value sans apparent integer term;
857 Only obvious integer terms are detected. */
860 get_related_value (const_rtx x
)
862 if (GET_CODE (x
) != CONST
)
865 if (GET_CODE (x
) == PLUS
866 && CONST_INT_P (XEXP (x
, 1)))
868 else if (GET_CODE (x
) == MINUS
869 && CONST_INT_P (XEXP (x
, 1)))
874 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
875 to somewhere in the same object or object_block as SYMBOL. */
878 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
882 if (GET_CODE (symbol
) != SYMBOL_REF
)
890 if (CONSTANT_POOL_ADDRESS_P (symbol
)
891 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
894 decl
= SYMBOL_REF_DECL (symbol
);
895 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
899 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
900 && SYMBOL_REF_BLOCK (symbol
)
901 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
902 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
903 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
909 /* Split X into a base and a constant offset, storing them in *BASE_OUT
910 and *OFFSET_OUT respectively. */
913 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
915 if (GET_CODE (x
) == CONST
)
918 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
920 *base_out
= XEXP (x
, 0);
921 *offset_out
= XEXP (x
, 1);
926 *offset_out
= const0_rtx
;
929 /* Return the number of places FIND appears within X. If COUNT_DEST is
930 zero, we do not count occurrences inside the destination of a SET. */
933 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
937 const char *format_ptr
;
956 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
958 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
962 if (MEM_P (find
) && rtx_equal_p (x
, find
))
967 if (SET_DEST (x
) == find
&& ! count_dest
)
968 return count_occurrences (SET_SRC (x
), find
, count_dest
);
975 format_ptr
= GET_RTX_FORMAT (code
);
978 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
980 switch (*format_ptr
++)
983 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
987 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
988 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
996 /* Return TRUE if OP is a register or subreg of a register that
997 holds an unsigned quantity. Otherwise, return FALSE. */
1000 unsigned_reg_p (rtx op
)
1004 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1007 if (GET_CODE (op
) == SUBREG
1008 && SUBREG_PROMOTED_SIGN (op
))
1015 /* Nonzero if register REG appears somewhere within IN.
1016 Also works if REG is not a register; in this case it checks
1017 for a subexpression of IN that is Lisp "equal" to REG. */
1020 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1032 if (GET_CODE (in
) == LABEL_REF
)
1033 return reg
== LABEL_REF_LABEL (in
);
1035 code
= GET_CODE (in
);
1039 /* Compare registers by number. */
1041 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1043 /* These codes have no constituent expressions
1051 /* These are kept unique for a given value. */
1058 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1061 fmt
= GET_RTX_FORMAT (code
);
1063 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1068 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1069 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1072 else if (fmt
[i
] == 'e'
1073 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1079 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1080 no CODE_LABEL insn. */
1083 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1088 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1094 /* Nonzero if register REG is used in an insn between
1095 FROM_INSN and TO_INSN (exclusive of those two). */
1098 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1099 const rtx_insn
*to_insn
)
1103 if (from_insn
== to_insn
)
1106 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1107 if (NONDEBUG_INSN_P (insn
)
1108 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1109 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1114 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1115 is entirely replaced by a new value and the only use is as a SET_DEST,
1116 we do not consider it a reference. */
1119 reg_referenced_p (const_rtx x
, const_rtx body
)
1123 switch (GET_CODE (body
))
1126 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1129 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1130 of a REG that occupies all of the REG, the insn references X if
1131 it is mentioned in the destination. */
1132 if (GET_CODE (SET_DEST (body
)) != CC0
1133 && GET_CODE (SET_DEST (body
)) != PC
1134 && !REG_P (SET_DEST (body
))
1135 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1136 && REG_P (SUBREG_REG (SET_DEST (body
)))
1137 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1138 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1139 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1140 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1141 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1146 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1147 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1154 return reg_overlap_mentioned_p (x
, body
);
1157 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1160 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1163 case UNSPEC_VOLATILE
:
1164 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1165 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1170 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1171 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1176 if (MEM_P (XEXP (body
, 0)))
1177 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1182 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1184 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1191 /* Nonzero if register REG is set or clobbered in an insn between
1192 FROM_INSN and TO_INSN (exclusive of those two). */
1195 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1196 const rtx_insn
*to_insn
)
1198 const rtx_insn
*insn
;
1200 if (from_insn
== to_insn
)
1203 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1204 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1209 /* Internals of reg_set_between_p. */
1211 reg_set_p (const_rtx reg
, const_rtx insn
)
1213 /* After delay slot handling, call and branch insns might be in a
1214 sequence. Check all the elements there. */
1215 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1217 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1218 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1224 /* We can be passed an insn or part of one. If we are passed an insn,
1225 check if a side-effect of the insn clobbers REG. */
1227 && (FIND_REG_INC_NOTE (insn
, reg
)
1230 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1231 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1232 GET_MODE (reg
), REGNO (reg
)))
1234 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1237 return set_of (reg
, insn
) != NULL_RTX
;
1240 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1241 only if none of them are modified between START and END. Return 1 if
1242 X contains a MEM; this routine does use memory aliasing. */
1245 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1247 const enum rtx_code code
= GET_CODE (x
);
1268 if (modified_between_p (XEXP (x
, 0), start
, end
))
1270 if (MEM_READONLY_P (x
))
1272 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1273 if (memory_modified_in_insn_p (x
, insn
))
1279 return reg_set_between_p (x
, start
, end
);
1285 fmt
= GET_RTX_FORMAT (code
);
1286 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1288 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1291 else if (fmt
[i
] == 'E')
1292 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1293 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1300 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1301 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1302 does use memory aliasing. */
1305 modified_in_p (const_rtx x
, const_rtx insn
)
1307 const enum rtx_code code
= GET_CODE (x
);
1324 if (modified_in_p (XEXP (x
, 0), insn
))
1326 if (MEM_READONLY_P (x
))
1328 if (memory_modified_in_insn_p (x
, insn
))
1334 return reg_set_p (x
, insn
);
1340 fmt
= GET_RTX_FORMAT (code
);
1341 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1343 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1346 else if (fmt
[i
] == 'E')
1347 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1348 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1355 /* Helper function for set_of. */
1363 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1365 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1366 if (rtx_equal_p (x
, data
->pat
)
1367 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1371 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1372 (either directly or via STRICT_LOW_PART and similar modifiers). */
1374 set_of (const_rtx pat
, const_rtx insn
)
1376 struct set_of_data data
;
1377 data
.found
= NULL_RTX
;
1379 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1383 /* Add all hard register in X to *PSET. */
1385 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1387 subrtx_iterator::array_type array
;
1388 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1390 const_rtx x
= *iter
;
1391 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1392 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1396 /* This function, called through note_stores, collects sets and
1397 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1400 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1402 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1403 if (REG_P (x
) && HARD_REGISTER_P (x
))
1404 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1407 /* Examine INSN, and compute the set of hard registers written by it.
1408 Store it in *PSET. Should only be called after reload. */
1410 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1414 CLEAR_HARD_REG_SET (*pset
);
1415 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1419 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1421 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1422 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1424 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1425 if (REG_NOTE_KIND (link
) == REG_INC
)
1426 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1429 /* Like record_hard_reg_sets, but called through note_uses. */
1431 record_hard_reg_uses (rtx
*px
, void *data
)
1433 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1436 /* Given an INSN, return a SET expression if this insn has only a single SET.
1437 It may also have CLOBBERs, USEs, or SET whose output
1438 will not be used, which we ignore. */
1441 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1444 int set_verified
= 1;
1447 if (GET_CODE (pat
) == PARALLEL
)
1449 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1451 rtx sub
= XVECEXP (pat
, 0, i
);
1452 switch (GET_CODE (sub
))
1459 /* We can consider insns having multiple sets, where all
1460 but one are dead as single set insns. In common case
1461 only single set is present in the pattern so we want
1462 to avoid checking for REG_UNUSED notes unless necessary.
1464 When we reach set first time, we just expect this is
1465 the single set we are looking for and only when more
1466 sets are found in the insn, we check them. */
1469 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1470 && !side_effects_p (set
))
1476 set
= sub
, set_verified
= 0;
1477 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1478 || side_effects_p (sub
))
1490 /* Given an INSN, return nonzero if it has more than one SET, else return
1494 multiple_sets (const_rtx insn
)
1499 /* INSN must be an insn. */
1500 if (! INSN_P (insn
))
1503 /* Only a PARALLEL can have multiple SETs. */
1504 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1506 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1507 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1509 /* If we have already found a SET, then return now. */
1517 /* Either zero or one SET. */
1521 /* Return nonzero if the destination of SET equals the source
1522 and there are no side effects. */
1525 set_noop_p (const_rtx set
)
1527 rtx src
= SET_SRC (set
);
1528 rtx dst
= SET_DEST (set
);
1530 if (dst
== pc_rtx
&& src
== pc_rtx
)
1533 if (MEM_P (dst
) && MEM_P (src
))
1534 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1536 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1537 return rtx_equal_p (XEXP (dst
, 0), src
)
1538 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1539 && !side_effects_p (src
);
1541 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1542 dst
= XEXP (dst
, 0);
1544 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1546 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1548 src
= SUBREG_REG (src
);
1549 dst
= SUBREG_REG (dst
);
1552 /* It is a NOOP if destination overlaps with selected src vector
1554 if (GET_CODE (src
) == VEC_SELECT
1555 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1556 && HARD_REGISTER_P (XEXP (src
, 0))
1557 && HARD_REGISTER_P (dst
))
1560 rtx par
= XEXP (src
, 1);
1561 rtx src0
= XEXP (src
, 0);
1562 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1563 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1565 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1566 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1569 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1570 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1573 return (REG_P (src
) && REG_P (dst
)
1574 && REGNO (src
) == REGNO (dst
));
1577 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1581 noop_move_p (const rtx_insn
*insn
)
1583 rtx pat
= PATTERN (insn
);
1585 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1588 /* Insns carrying these notes are useful later on. */
1589 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1592 /* Check the code to be executed for COND_EXEC. */
1593 if (GET_CODE (pat
) == COND_EXEC
)
1594 pat
= COND_EXEC_CODE (pat
);
1596 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1599 if (GET_CODE (pat
) == PARALLEL
)
1602 /* If nothing but SETs of registers to themselves,
1603 this insn can also be deleted. */
1604 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1606 rtx tem
= XVECEXP (pat
, 0, i
);
1608 if (GET_CODE (tem
) == USE
1609 || GET_CODE (tem
) == CLOBBER
)
1612 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1622 /* Return nonzero if register in range [REGNO, ENDREGNO)
1623 appears either explicitly or implicitly in X
1624 other than being stored into.
1626 References contained within the substructure at LOC do not count.
1627 LOC may be zero, meaning don't ignore anything. */
1630 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1634 unsigned int x_regno
;
1639 /* The contents of a REG_NONNEG note is always zero, so we must come here
1640 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1644 code
= GET_CODE (x
);
1649 x_regno
= REGNO (x
);
1651 /* If we modifying the stack, frame, or argument pointer, it will
1652 clobber a virtual register. In fact, we could be more precise,
1653 but it isn't worth it. */
1654 if ((x_regno
== STACK_POINTER_REGNUM
1655 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1656 && x_regno
== ARG_POINTER_REGNUM
)
1657 || x_regno
== FRAME_POINTER_REGNUM
)
1658 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1661 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1664 /* If this is a SUBREG of a hard reg, we can see exactly which
1665 registers are being modified. Otherwise, handle normally. */
1666 if (REG_P (SUBREG_REG (x
))
1667 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1669 unsigned int inner_regno
= subreg_regno (x
);
1670 unsigned int inner_endregno
1671 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1672 ? subreg_nregs (x
) : 1);
1674 return endregno
> inner_regno
&& regno
< inner_endregno
;
1680 if (&SET_DEST (x
) != loc
1681 /* Note setting a SUBREG counts as referring to the REG it is in for
1682 a pseudo but not for hard registers since we can
1683 treat each word individually. */
1684 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1685 && loc
!= &SUBREG_REG (SET_DEST (x
))
1686 && REG_P (SUBREG_REG (SET_DEST (x
)))
1687 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1688 && refers_to_regno_p (regno
, endregno
,
1689 SUBREG_REG (SET_DEST (x
)), loc
))
1690 || (!REG_P (SET_DEST (x
))
1691 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1694 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1703 /* X does not match, so try its subexpressions. */
1705 fmt
= GET_RTX_FORMAT (code
);
1706 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1708 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1716 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1719 else if (fmt
[i
] == 'E')
1722 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1723 if (loc
!= &XVECEXP (x
, i
, j
)
1724 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1731 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1732 we check if any register number in X conflicts with the relevant register
1733 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1734 contains a MEM (we don't bother checking for memory addresses that can't
1735 conflict because we expect this to be a rare case. */
1738 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1740 unsigned int regno
, endregno
;
1742 /* If either argument is a constant, then modifying X can not
1743 affect IN. Here we look at IN, we can profitably combine
1744 CONSTANT_P (x) with the switch statement below. */
1745 if (CONSTANT_P (in
))
1749 switch (GET_CODE (x
))
1751 case STRICT_LOW_PART
:
1754 /* Overly conservative. */
1759 regno
= REGNO (SUBREG_REG (x
));
1760 if (regno
< FIRST_PSEUDO_REGISTER
)
1761 regno
= subreg_regno (x
);
1762 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1763 ? subreg_nregs (x
) : 1);
1768 endregno
= END_REGNO (x
);
1770 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1780 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1781 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1784 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1787 else if (fmt
[i
] == 'E')
1790 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1791 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1801 return reg_mentioned_p (x
, in
);
1807 /* If any register in here refers to it we return true. */
1808 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1809 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1810 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1816 gcc_assert (CONSTANT_P (x
));
1821 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1822 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1823 ignored by note_stores, but passed to FUN.
1825 FUN receives three arguments:
1826 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1827 2. the SET or CLOBBER rtx that does the store,
1828 3. the pointer DATA provided to note_stores.
1830 If the item being stored in or clobbered is a SUBREG of a hard register,
1831 the SUBREG will be passed. */
1834 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1838 if (GET_CODE (x
) == COND_EXEC
)
1839 x
= COND_EXEC_CODE (x
);
1841 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1843 rtx dest
= SET_DEST (x
);
1845 while ((GET_CODE (dest
) == SUBREG
1846 && (!REG_P (SUBREG_REG (dest
))
1847 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1848 || GET_CODE (dest
) == ZERO_EXTRACT
1849 || GET_CODE (dest
) == STRICT_LOW_PART
)
1850 dest
= XEXP (dest
, 0);
1852 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1853 each of whose first operand is a register. */
1854 if (GET_CODE (dest
) == PARALLEL
)
1856 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1857 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1858 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1861 (*fun
) (dest
, x
, data
);
1864 else if (GET_CODE (x
) == PARALLEL
)
1865 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1866 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1869 /* Like notes_stores, but call FUN for each expression that is being
1870 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1871 FUN for each expression, not any interior subexpressions. FUN receives a
1872 pointer to the expression and the DATA passed to this function.
1874 Note that this is not quite the same test as that done in reg_referenced_p
1875 since that considers something as being referenced if it is being
1876 partially set, while we do not. */
1879 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1884 switch (GET_CODE (body
))
1887 (*fun
) (&COND_EXEC_TEST (body
), data
);
1888 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1892 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1893 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1897 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1898 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1902 (*fun
) (&XEXP (body
, 0), data
);
1906 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1907 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1911 (*fun
) (&TRAP_CONDITION (body
), data
);
1915 (*fun
) (&XEXP (body
, 0), data
);
1919 case UNSPEC_VOLATILE
:
1920 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1921 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1925 if (MEM_P (XEXP (body
, 0)))
1926 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1931 rtx dest
= SET_DEST (body
);
1933 /* For sets we replace everything in source plus registers in memory
1934 expression in store and operands of a ZERO_EXTRACT. */
1935 (*fun
) (&SET_SRC (body
), data
);
1937 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1939 (*fun
) (&XEXP (dest
, 1), data
);
1940 (*fun
) (&XEXP (dest
, 2), data
);
1943 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1944 dest
= XEXP (dest
, 0);
1947 (*fun
) (&XEXP (dest
, 0), data
);
1952 /* All the other possibilities never store. */
1953 (*fun
) (pbody
, data
);
1958 /* Return nonzero if X's old contents don't survive after INSN.
1959 This will be true if X is (cc0) or if X is a register and
1960 X dies in INSN or because INSN entirely sets X.
1962 "Entirely set" means set directly and not through a SUBREG, or
1963 ZERO_EXTRACT, so no trace of the old contents remains.
1964 Likewise, REG_INC does not count.
1966 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1967 but for this use that makes no difference, since regs don't overlap
1968 during their lifetimes. Therefore, this function may be used
1969 at any time after deaths have been computed.
1971 If REG is a hard reg that occupies multiple machine registers, this
1972 function will only return 1 if each of those registers will be replaced
1976 dead_or_set_p (const_rtx insn
, const_rtx x
)
1978 unsigned int regno
, end_regno
;
1981 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1982 if (GET_CODE (x
) == CC0
)
1985 gcc_assert (REG_P (x
));
1988 end_regno
= END_REGNO (x
);
1989 for (i
= regno
; i
< end_regno
; i
++)
1990 if (! dead_or_set_regno_p (insn
, i
))
1996 /* Return TRUE iff DEST is a register or subreg of a register and
1997 doesn't change the number of words of the inner register, and any
1998 part of the register is TEST_REGNO. */
2001 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2003 unsigned int regno
, endregno
;
2005 if (GET_CODE (dest
) == SUBREG
2006 && (((GET_MODE_SIZE (GET_MODE (dest
))
2007 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
2008 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
2009 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
2010 dest
= SUBREG_REG (dest
);
2015 regno
= REGNO (dest
);
2016 endregno
= END_REGNO (dest
);
2017 return (test_regno
>= regno
&& test_regno
< endregno
);
2020 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2021 any member matches the covers_regno_no_parallel_p criteria. */
2024 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2026 if (GET_CODE (dest
) == PARALLEL
)
2028 /* Some targets place small structures in registers for return
2029 values of functions, and those registers are wrapped in
2030 PARALLELs that we may see as the destination of a SET. */
2033 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2035 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2036 if (inner
!= NULL_RTX
2037 && covers_regno_no_parallel_p (inner
, test_regno
))
2044 return covers_regno_no_parallel_p (dest
, test_regno
);
2047 /* Utility function for dead_or_set_p to check an individual register. */
2050 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
2054 /* See if there is a death note for something that includes TEST_REGNO. */
2055 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2059 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2062 pattern
= PATTERN (insn
);
2064 /* If a COND_EXEC is not executed, the value survives. */
2065 if (GET_CODE (pattern
) == COND_EXEC
)
2068 if (GET_CODE (pattern
) == SET
)
2069 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2070 else if (GET_CODE (pattern
) == PARALLEL
)
2074 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2076 rtx body
= XVECEXP (pattern
, 0, i
);
2078 if (GET_CODE (body
) == COND_EXEC
)
2079 body
= COND_EXEC_CODE (body
);
2081 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2082 && covers_regno_p (SET_DEST (body
), test_regno
))
2090 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2091 If DATUM is nonzero, look for one whose datum is DATUM. */
2094 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2098 gcc_checking_assert (insn
);
2100 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2101 if (! INSN_P (insn
))
2105 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2106 if (REG_NOTE_KIND (link
) == kind
)
2111 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2112 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2117 /* Return the reg-note of kind KIND in insn INSN which applies to register
2118 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2119 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2120 it might be the case that the note overlaps REGNO. */
2123 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2127 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2128 if (! INSN_P (insn
))
2131 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2132 if (REG_NOTE_KIND (link
) == kind
2133 /* Verify that it is a register, so that scratch and MEM won't cause a
2135 && REG_P (XEXP (link
, 0))
2136 && REGNO (XEXP (link
, 0)) <= regno
2137 && END_REGNO (XEXP (link
, 0)) > regno
)
2142 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2146 find_reg_equal_equiv_note (const_rtx insn
)
2153 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2154 if (REG_NOTE_KIND (link
) == REG_EQUAL
2155 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2157 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2158 insns that have multiple sets. Checking single_set to
2159 make sure of this is not the proper check, as explained
2160 in the comment in set_unique_reg_note.
2162 This should be changed into an assert. */
2163 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2170 /* Check whether INSN is a single_set whose source is known to be
2171 equivalent to a constant. Return that constant if so, otherwise
2175 find_constant_src (const rtx_insn
*insn
)
2179 set
= single_set (insn
);
2182 x
= avoid_constant_pool_reference (SET_SRC (set
));
2187 note
= find_reg_equal_equiv_note (insn
);
2188 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2189 return XEXP (note
, 0);
2194 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2195 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2198 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2200 /* If it's not a CALL_INSN, it can't possibly have a
2201 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2211 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2213 link
= XEXP (link
, 1))
2214 if (GET_CODE (XEXP (link
, 0)) == code
2215 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2220 unsigned int regno
= REGNO (datum
);
2222 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2223 to pseudo registers, so don't bother checking. */
2225 if (regno
< FIRST_PSEUDO_REGISTER
)
2227 unsigned int end_regno
= END_REGNO (datum
);
2230 for (i
= regno
; i
< end_regno
; i
++)
2231 if (find_regno_fusage (insn
, code
, i
))
2239 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2240 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2243 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2247 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2248 to pseudo registers, so don't bother checking. */
2250 if (regno
>= FIRST_PSEUDO_REGISTER
2254 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2258 if (GET_CODE (op
= XEXP (link
, 0)) == code
2259 && REG_P (reg
= XEXP (op
, 0))
2260 && REGNO (reg
) <= regno
2261 && END_REGNO (reg
) > regno
)
2269 /* Return true if KIND is an integer REG_NOTE. */
2272 int_reg_note_p (enum reg_note kind
)
2274 return kind
== REG_BR_PROB
;
2277 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2278 stored as the pointer to the next register note. */
2281 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2285 gcc_checking_assert (!int_reg_note_p (kind
));
2290 case REG_LABEL_TARGET
:
2291 case REG_LABEL_OPERAND
:
2293 /* These types of register notes use an INSN_LIST rather than an
2294 EXPR_LIST, so that copying is done right and dumps look
2296 note
= alloc_INSN_LIST (datum
, list
);
2297 PUT_REG_NOTE_KIND (note
, kind
);
2301 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2308 /* Add register note with kind KIND and datum DATUM to INSN. */
2311 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2313 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2316 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2319 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2321 gcc_checking_assert (int_reg_note_p (kind
));
2322 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2323 datum
, REG_NOTES (insn
));
2326 /* Add a register note like NOTE to INSN. */
2329 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2331 if (GET_CODE (note
) == INT_LIST
)
2332 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2334 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2337 /* Remove register note NOTE from the REG_NOTES of INSN. */
2340 remove_note (rtx insn
, const_rtx note
)
2344 if (note
== NULL_RTX
)
2347 if (REG_NOTES (insn
) == note
)
2348 REG_NOTES (insn
) = XEXP (note
, 1);
2350 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2351 if (XEXP (link
, 1) == note
)
2353 XEXP (link
, 1) = XEXP (note
, 1);
2357 switch (REG_NOTE_KIND (note
))
2361 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2368 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2371 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2375 loc
= ®_NOTES (insn
);
2378 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2379 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2380 *loc
= XEXP (*loc
, 1);
2382 loc
= &XEXP (*loc
, 1);
2386 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2389 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2396 /* This loop is a little tricky. We cannot just go down the chain because
2397 it is being modified by some actions in the loop. So we just iterate
2398 over the head. We plan to drain the list anyway. */
2399 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2401 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2402 rtx note
= find_reg_equal_equiv_note (insn
);
2404 /* This assert is generally triggered when someone deletes a REG_EQUAL
2405 or REG_EQUIV note by hacking the list manually rather than calling
2409 remove_note (insn
, note
);
2413 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2414 return 1 if it is found. A simple equality test is used to determine if
2418 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2422 for (x
= listp
; x
; x
= XEXP (x
, 1))
2423 if (node
== XEXP (x
, 0))
2429 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2430 remove that entry from the list if it is found.
2432 A simple equality test is used to determine if NODE matches. */
2435 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2437 rtx_expr_list
*temp
= *listp
;
2438 rtx_expr_list
*prev
= NULL
;
2442 if (node
== temp
->element ())
2444 /* Splice the node out of the list. */
2446 XEXP (prev
, 1) = temp
->next ();
2448 *listp
= temp
->next ();
2454 temp
= temp
->next ();
2458 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2459 remove that entry from the list if it is found.
2461 A simple equality test is used to determine if NODE matches. */
2464 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2466 rtx_insn_list
*temp
= *listp
;
2467 rtx_insn_list
*prev
= NULL
;
2471 if (node
== temp
->insn ())
2473 /* Splice the node out of the list. */
2475 XEXP (prev
, 1) = temp
->next ();
2477 *listp
= temp
->next ();
2483 temp
= temp
->next ();
2487 /* Nonzero if X contains any volatile instructions. These are instructions
2488 which may cause unpredictable machine state instructions, and thus no
2489 instructions or register uses should be moved or combined across them.
2490 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2493 volatile_insn_p (const_rtx x
)
2495 const RTX_CODE code
= GET_CODE (x
);
2513 case UNSPEC_VOLATILE
:
2518 if (MEM_VOLATILE_P (x
))
2525 /* Recursively scan the operands of this expression. */
2528 const char *const fmt
= GET_RTX_FORMAT (code
);
2531 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2535 if (volatile_insn_p (XEXP (x
, i
)))
2538 else if (fmt
[i
] == 'E')
2541 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2542 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2550 /* Nonzero if X contains any volatile memory references
2551 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2554 volatile_refs_p (const_rtx x
)
2556 const RTX_CODE code
= GET_CODE (x
);
2572 case UNSPEC_VOLATILE
:
2578 if (MEM_VOLATILE_P (x
))
2585 /* Recursively scan the operands of this expression. */
2588 const char *const fmt
= GET_RTX_FORMAT (code
);
2591 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2595 if (volatile_refs_p (XEXP (x
, i
)))
2598 else if (fmt
[i
] == 'E')
2601 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2602 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2610 /* Similar to above, except that it also rejects register pre- and post-
2614 side_effects_p (const_rtx x
)
2616 const RTX_CODE code
= GET_CODE (x
);
2633 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2634 when some combination can't be done. If we see one, don't think
2635 that we can simplify the expression. */
2636 return (GET_MODE (x
) != VOIDmode
);
2645 case UNSPEC_VOLATILE
:
2651 if (MEM_VOLATILE_P (x
))
2658 /* Recursively scan the operands of this expression. */
2661 const char *fmt
= GET_RTX_FORMAT (code
);
2664 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2668 if (side_effects_p (XEXP (x
, i
)))
2671 else if (fmt
[i
] == 'E')
2674 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2675 if (side_effects_p (XVECEXP (x
, i
, j
)))
2683 /* Return nonzero if evaluating rtx X might cause a trap.
2684 FLAGS controls how to consider MEMs. A nonzero means the context
2685 of the access may have changed from the original, such that the
2686 address may have become invalid. */
2689 may_trap_p_1 (const_rtx x
, unsigned flags
)
2695 /* We make no distinction currently, but this function is part of
2696 the internal target-hooks ABI so we keep the parameter as
2697 "unsigned flags". */
2698 bool code_changed
= flags
!= 0;
2702 code
= GET_CODE (x
);
2705 /* Handle these cases quickly. */
2717 return targetm
.unspec_may_trap_p (x
, flags
);
2719 case UNSPEC_VOLATILE
:
2725 return MEM_VOLATILE_P (x
);
2727 /* Memory ref can trap unless it's a static var or a stack slot. */
2729 /* Recognize specific pattern of stack checking probes. */
2730 if (flag_stack_check
2731 && MEM_VOLATILE_P (x
)
2732 && XEXP (x
, 0) == stack_pointer_rtx
)
2734 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2735 reference; moving it out of context such as when moving code
2736 when optimizing, might cause its address to become invalid. */
2738 || !MEM_NOTRAP_P (x
))
2740 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2741 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2742 GET_MODE (x
), code_changed
);
2747 /* Division by a non-constant might trap. */
2752 if (HONOR_SNANS (x
))
2754 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2755 return flag_trapping_math
;
2756 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2761 /* An EXPR_LIST is used to represent a function call. This
2762 certainly may trap. */
2771 /* Some floating point comparisons may trap. */
2772 if (!flag_trapping_math
)
2774 /* ??? There is no machine independent way to check for tests that trap
2775 when COMPARE is used, though many targets do make this distinction.
2776 For instance, sparc uses CCFPE for compares which generate exceptions
2777 and CCFP for compares which do not generate exceptions. */
2780 /* But often the compare has some CC mode, so check operand
2782 if (HONOR_NANS (XEXP (x
, 0))
2783 || HONOR_NANS (XEXP (x
, 1)))
2789 if (HONOR_SNANS (x
))
2791 /* Often comparison is CC mode, so check operand modes. */
2792 if (HONOR_SNANS (XEXP (x
, 0))
2793 || HONOR_SNANS (XEXP (x
, 1)))
2798 /* Conversion of floating point might trap. */
2799 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2806 /* These operations don't trap even with floating point. */
2810 /* Any floating arithmetic may trap. */
2811 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2815 fmt
= GET_RTX_FORMAT (code
);
2816 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2820 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2823 else if (fmt
[i
] == 'E')
2826 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2827 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2834 /* Return nonzero if evaluating rtx X might cause a trap. */
2837 may_trap_p (const_rtx x
)
2839 return may_trap_p_1 (x
, 0);
2842 /* Same as above, but additionally return nonzero if evaluating rtx X might
2843 cause a fault. We define a fault for the purpose of this function as a
2844 erroneous execution condition that cannot be encountered during the normal
2845 execution of a valid program; the typical example is an unaligned memory
2846 access on a strict alignment machine. The compiler guarantees that it
2847 doesn't generate code that will fault from a valid program, but this
2848 guarantee doesn't mean anything for individual instructions. Consider
2849 the following example:
2851 struct S { int d; union { char *cp; int *ip; }; };
2853 int foo(struct S *s)
2861 on a strict alignment machine. In a valid program, foo will never be
2862 invoked on a structure for which d is equal to 1 and the underlying
2863 unique field of the union not aligned on a 4-byte boundary, but the
2864 expression *s->ip might cause a fault if considered individually.
2866 At the RTL level, potentially problematic expressions will almost always
2867 verify may_trap_p; for example, the above dereference can be emitted as
2868 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2869 However, suppose that foo is inlined in a caller that causes s->cp to
2870 point to a local character variable and guarantees that s->d is not set
2871 to 1; foo may have been effectively translated into pseudo-RTL as:
2874 (set (reg:SI) (mem:SI (%fp - 7)))
2876 (set (reg:QI) (mem:QI (%fp - 7)))
2878 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2879 memory reference to a stack slot, but it will certainly cause a fault
2880 on a strict alignment machine. */
2883 may_trap_or_fault_p (const_rtx x
)
2885 return may_trap_p_1 (x
, 1);
2888 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2889 i.e., an inequality. */
2892 inequality_comparisons_p (const_rtx x
)
2896 const enum rtx_code code
= GET_CODE (x
);
2924 len
= GET_RTX_LENGTH (code
);
2925 fmt
= GET_RTX_FORMAT (code
);
2927 for (i
= 0; i
< len
; i
++)
2931 if (inequality_comparisons_p (XEXP (x
, i
)))
2934 else if (fmt
[i
] == 'E')
2937 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2938 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2946 /* Replace any occurrence of FROM in X with TO. The function does
2947 not enter into CONST_DOUBLE for the replace.
2949 Note that copying is not done so X must not be shared unless all copies
2950 are to be modified. */
2953 replace_rtx (rtx x
, rtx from
, rtx to
)
2961 /* Allow this function to make replacements in EXPR_LISTs. */
2965 if (GET_CODE (x
) == SUBREG
)
2967 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2969 if (CONST_INT_P (new_rtx
))
2971 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2972 GET_MODE (SUBREG_REG (x
)),
2977 SUBREG_REG (x
) = new_rtx
;
2981 else if (GET_CODE (x
) == ZERO_EXTEND
)
2983 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2985 if (CONST_INT_P (new_rtx
))
2987 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2988 new_rtx
, GET_MODE (XEXP (x
, 0)));
2992 XEXP (x
, 0) = new_rtx
;
2997 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2998 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3001 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
3002 else if (fmt
[i
] == 'E')
3003 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3004 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
3010 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3011 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3014 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3016 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3018 if (JUMP_TABLE_DATA_P (x
))
3021 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3022 int len
= GET_NUM_ELEM (vec
);
3023 for (int i
= 0; i
< len
; ++i
)
3025 rtx ref
= RTVEC_ELT (vec
, i
);
3026 if (XEXP (ref
, 0) == old_label
)
3028 XEXP (ref
, 0) = new_label
;
3029 if (update_label_nuses
)
3031 ++LABEL_NUSES (new_label
);
3032 --LABEL_NUSES (old_label
);
3039 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3040 field. This is not handled by the iterator because it doesn't
3041 handle unprinted ('0') fields. */
3042 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3043 JUMP_LABEL (x
) = new_label
;
3045 subrtx_ptr_iterator::array_type array
;
3046 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3051 if (GET_CODE (x
) == SYMBOL_REF
3052 && CONSTANT_POOL_ADDRESS_P (x
))
3054 rtx c
= get_pool_constant (x
);
3055 if (rtx_referenced_p (old_label
, c
))
3057 /* Create a copy of constant C; replace the label inside
3058 but do not update LABEL_NUSES because uses in constant pool
3060 rtx new_c
= copy_rtx (c
);
3061 replace_label (&new_c
, old_label
, new_label
, false);
3063 /* Add the new constant NEW_C to constant pool and replace
3064 the old reference to constant by new reference. */
3065 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3066 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3070 if ((GET_CODE (x
) == LABEL_REF
3071 || GET_CODE (x
) == INSN_LIST
)
3072 && XEXP (x
, 0) == old_label
)
3074 XEXP (x
, 0) = new_label
;
3075 if (update_label_nuses
)
3077 ++LABEL_NUSES (new_label
);
3078 --LABEL_NUSES (old_label
);
3086 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
3087 bool update_label_nuses
)
3089 rtx insn_as_rtx
= insn
;
3090 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3091 gcc_checking_assert (insn_as_rtx
== insn
);
3094 /* Return true if X is referenced in BODY. */
3097 rtx_referenced_p (const_rtx x
, const_rtx body
)
3099 subrtx_iterator::array_type array
;
3100 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3101 if (const_rtx y
= *iter
)
3103 /* Check if a label_ref Y refers to label X. */
3104 if (GET_CODE (y
) == LABEL_REF
3106 && LABEL_REF_LABEL (y
) == x
)
3109 if (rtx_equal_p (x
, y
))
3112 /* If Y is a reference to pool constant traverse the constant. */
3113 if (GET_CODE (y
) == SYMBOL_REF
3114 && CONSTANT_POOL_ADDRESS_P (y
))
3115 iter
.substitute (get_pool_constant (y
));
3120 /* If INSN is a tablejump return true and store the label (before jump table) to
3121 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3124 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
3132 label
= JUMP_LABEL (insn
);
3133 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
3134 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
3135 && JUMP_TABLE_DATA_P (table
))
3140 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3146 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3147 constant that is not in the constant pool and not in the condition
3148 of an IF_THEN_ELSE. */
3151 computed_jump_p_1 (const_rtx x
)
3153 const enum rtx_code code
= GET_CODE (x
);
3170 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3171 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3174 return (computed_jump_p_1 (XEXP (x
, 1))
3175 || computed_jump_p_1 (XEXP (x
, 2)));
3181 fmt
= GET_RTX_FORMAT (code
);
3182 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3185 && computed_jump_p_1 (XEXP (x
, i
)))
3188 else if (fmt
[i
] == 'E')
3189 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3190 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3197 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3199 Tablejumps and casesi insns are not considered indirect jumps;
3200 we can recognize them by a (use (label_ref)). */
3203 computed_jump_p (const rtx_insn
*insn
)
3208 rtx pat
= PATTERN (insn
);
3210 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3211 if (JUMP_LABEL (insn
) != NULL
)
3214 if (GET_CODE (pat
) == PARALLEL
)
3216 int len
= XVECLEN (pat
, 0);
3217 int has_use_labelref
= 0;
3219 for (i
= len
- 1; i
>= 0; i
--)
3220 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3221 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3224 has_use_labelref
= 1;
3228 if (! has_use_labelref
)
3229 for (i
= len
- 1; i
>= 0; i
--)
3230 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3231 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3232 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3235 else if (GET_CODE (pat
) == SET
3236 && SET_DEST (pat
) == pc_rtx
3237 && computed_jump_p_1 (SET_SRC (pat
)))
3245 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3246 the equivalent add insn and pass the result to FN, using DATA as the
3250 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3252 rtx x
= XEXP (mem
, 0);
3253 switch (GET_CODE (x
))
3258 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3259 rtx r1
= XEXP (x
, 0);
3260 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3261 return fn (mem
, x
, r1
, r1
, c
, data
);
3267 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3268 rtx r1
= XEXP (x
, 0);
3269 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3270 return fn (mem
, x
, r1
, r1
, c
, data
);
3276 rtx r1
= XEXP (x
, 0);
3277 rtx add
= XEXP (x
, 1);
3278 return fn (mem
, x
, r1
, add
, NULL
, data
);
3286 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3287 For each such autoinc operation found, call FN, passing it
3288 the innermost enclosing MEM, the operation itself, the RTX modified
3289 by the operation, two RTXs (the second may be NULL) that, once
3290 added, represent the value to be held by the modified RTX
3291 afterwards, and DATA. FN is to return 0 to continue the
3292 traversal or any other value to have it returned to the caller of
3293 for_each_inc_dec. */
3296 for_each_inc_dec (rtx x
,
3297 for_each_inc_dec_fn fn
,
3300 subrtx_var_iterator::array_type array
;
3301 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3306 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3308 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3311 iter
.skip_subrtxes ();
3318 /* Searches X for any reference to REGNO, returning the rtx of the
3319 reference found if any. Otherwise, returns NULL_RTX. */
3322 regno_use_in (unsigned int regno
, rtx x
)
3328 if (REG_P (x
) && REGNO (x
) == regno
)
3331 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3332 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3336 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3339 else if (fmt
[i
] == 'E')
3340 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3341 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3348 /* Return a value indicating whether OP, an operand of a commutative
3349 operation, is preferred as the first or second operand. The more
3350 positive the value, the stronger the preference for being the first
3354 commutative_operand_precedence (rtx op
)
3356 enum rtx_code code
= GET_CODE (op
);
3358 /* Constants always become the second operand. Prefer "nice" constants. */
3359 if (code
== CONST_INT
)
3361 if (code
== CONST_WIDE_INT
)
3363 if (code
== CONST_DOUBLE
)
3365 if (code
== CONST_FIXED
)
3367 op
= avoid_constant_pool_reference (op
);
3368 code
= GET_CODE (op
);
3370 switch (GET_RTX_CLASS (code
))
3373 if (code
== CONST_INT
)
3375 if (code
== CONST_WIDE_INT
)
3377 if (code
== CONST_DOUBLE
)
3379 if (code
== CONST_FIXED
)
3384 /* SUBREGs of objects should come second. */
3385 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3390 /* Complex expressions should be the first, so decrease priority
3391 of objects. Prefer pointer objects over non pointer objects. */
3392 if ((REG_P (op
) && REG_POINTER (op
))
3393 || (MEM_P (op
) && MEM_POINTER (op
)))
3397 case RTX_COMM_ARITH
:
3398 /* Prefer operands that are themselves commutative to be first.
3399 This helps to make things linear. In particular,
3400 (and (and (reg) (reg)) (not (reg))) is canonical. */
3404 /* If only one operand is a binary expression, it will be the first
3405 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3406 is canonical, although it will usually be further simplified. */
3410 /* Then prefer NEG and NOT. */
3411 if (code
== NEG
|| code
== NOT
)
3419 /* Return 1 iff it is necessary to swap operands of commutative operation
3420 in order to canonicalize expression. */
3423 swap_commutative_operands_p (rtx x
, rtx y
)
3425 return (commutative_operand_precedence (x
)
3426 < commutative_operand_precedence (y
));
3429 /* Return 1 if X is an autoincrement side effect and the register is
3430 not the stack pointer. */
3432 auto_inc_p (const_rtx x
)
3434 switch (GET_CODE (x
))
3442 /* There are no REG_INC notes for SP. */
3443 if (XEXP (x
, 0) != stack_pointer_rtx
)
3451 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3453 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3462 code
= GET_CODE (in
);
3463 fmt
= GET_RTX_FORMAT (code
);
3464 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3468 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3471 else if (fmt
[i
] == 'E')
3472 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3473 if (loc
== &XVECEXP (in
, i
, j
)
3474 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3480 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3481 and SUBREG_BYTE, return the bit offset where the subreg begins
3482 (counting from the least significant bit of the operand). */
3485 subreg_lsb_1 (machine_mode outer_mode
,
3486 machine_mode inner_mode
,
3487 unsigned int subreg_byte
)
3489 unsigned int bitpos
;
3493 /* A paradoxical subreg begins at bit position 0. */
3494 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3497 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3498 /* If the subreg crosses a word boundary ensure that
3499 it also begins and ends on a word boundary. */
3500 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3501 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3502 && (subreg_byte
% UNITS_PER_WORD
3503 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3505 if (WORDS_BIG_ENDIAN
)
3506 word
= (GET_MODE_SIZE (inner_mode
)
3507 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3509 word
= subreg_byte
/ UNITS_PER_WORD
;
3510 bitpos
= word
* BITS_PER_WORD
;
3512 if (BYTES_BIG_ENDIAN
)
3513 byte
= (GET_MODE_SIZE (inner_mode
)
3514 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3516 byte
= subreg_byte
% UNITS_PER_WORD
;
3517 bitpos
+= byte
* BITS_PER_UNIT
;
3522 /* Given a subreg X, return the bit offset where the subreg begins
3523 (counting from the least significant bit of the reg). */
3526 subreg_lsb (const_rtx x
)
3528 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3532 /* Fill in information about a subreg of a hard register.
3533 xregno - A regno of an inner hard subreg_reg (or what will become one).
3534 xmode - The mode of xregno.
3535 offset - The byte offset.
3536 ymode - The mode of a top level SUBREG (or what may become one).
3537 info - Pointer to structure to fill in.
3539 Rather than considering one particular inner register (and thus one
3540 particular "outer" register) in isolation, this function really uses
3541 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3542 function does not check whether adding INFO->offset to XREGNO gives
3543 a valid hard register; even if INFO->offset + XREGNO is out of range,
3544 there might be another register of the same type that is in range.
3545 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3546 register, since that can depend on things like whether the final
3547 register number is even or odd. Callers that want to check whether
3548 this particular subreg can be replaced by a simple (reg ...) should
3549 use simplify_subreg_regno. */
3552 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3553 unsigned int offset
, machine_mode ymode
,
3554 struct subreg_info
*info
)
3556 int nregs_xmode
, nregs_ymode
;
3557 int mode_multiple
, nregs_multiple
;
3558 int offset_adj
, y_offset
, y_offset_adj
;
3559 int regsize_xmode
, regsize_ymode
;
3562 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3566 /* If there are holes in a non-scalar mode in registers, we expect
3567 that it is made up of its units concatenated together. */
3568 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3570 machine_mode xmode_unit
;
3572 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3573 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3576 xmode_unit
= GET_MODE_INNER (xmode
);
3577 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3578 gcc_assert (nregs_xmode
3579 == (GET_MODE_NUNITS (xmode
)
3580 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3581 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3582 == (hard_regno_nregs
[xregno
][xmode_unit
]
3583 * GET_MODE_NUNITS (xmode
)));
3585 /* You can only ask for a SUBREG of a value with holes in the middle
3586 if you don't cross the holes. (Such a SUBREG should be done by
3587 picking a different register class, or doing it in memory if
3588 necessary.) An example of a value with holes is XCmode on 32-bit
3589 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3590 3 for each part, but in memory it's two 128-bit parts.
3591 Padding is assumed to be at the end (not necessarily the 'high part')
3593 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3594 < GET_MODE_NUNITS (xmode
))
3595 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3596 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3597 / GET_MODE_SIZE (xmode_unit
))))
3599 info
->representable_p
= false;
3604 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3606 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3608 /* Paradoxical subregs are otherwise valid. */
3611 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3613 info
->representable_p
= true;
3614 /* If this is a big endian paradoxical subreg, which uses more
3615 actual hard registers than the original register, we must
3616 return a negative offset so that we find the proper highpart
3618 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3619 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3620 info
->offset
= nregs_xmode
- nregs_ymode
;
3623 info
->nregs
= nregs_ymode
;
3627 /* If registers store different numbers of bits in the different
3628 modes, we cannot generally form this subreg. */
3629 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3630 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3631 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3632 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3634 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3635 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3636 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3638 info
->representable_p
= false;
3640 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3641 info
->offset
= offset
/ regsize_xmode
;
3644 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3646 info
->representable_p
= false;
3648 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3649 info
->offset
= offset
/ regsize_xmode
;
3652 /* Quick exit for the simple and common case of extracting whole
3653 subregisters from a multiregister value. */
3654 /* ??? It would be better to integrate this into the code below,
3655 if we can generalize the concept enough and figure out how
3656 odd-sized modes can coexist with the other weird cases we support. */
3658 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3659 && regsize_xmode
== regsize_ymode
3660 && (offset
% regsize_ymode
) == 0)
3662 info
->representable_p
= true;
3663 info
->nregs
= nregs_ymode
;
3664 info
->offset
= offset
/ regsize_ymode
;
3665 gcc_assert (info
->offset
+ info
->nregs
<= nregs_xmode
);
3670 /* Lowpart subregs are otherwise valid. */
3671 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3673 info
->representable_p
= true;
3676 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3679 info
->nregs
= nregs_ymode
;
3684 /* This should always pass, otherwise we don't know how to verify
3685 the constraint. These conditions may be relaxed but
3686 subreg_regno_offset would need to be redesigned. */
3687 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3688 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3690 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3691 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3693 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3694 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3695 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3696 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3697 offset
= (xsize
- ysize
- off_high
) | off_low
;
3699 /* The XMODE value can be seen as a vector of NREGS_XMODE
3700 values. The subreg must represent a lowpart of given field.
3701 Compute what field it is. */
3702 offset_adj
= offset
;
3703 offset_adj
-= subreg_lowpart_offset (ymode
,
3704 mode_for_size (GET_MODE_BITSIZE (xmode
)
3708 /* Size of ymode must not be greater than the size of xmode. */
3709 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3710 gcc_assert (mode_multiple
!= 0);
3712 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3713 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3714 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3716 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3717 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3721 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3724 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3725 info
->nregs
= nregs_ymode
;
3728 /* This function returns the regno offset of a subreg expression.
3729 xregno - A regno of an inner hard subreg_reg (or what will become one).
3730 xmode - The mode of xregno.
3731 offset - The byte offset.
3732 ymode - The mode of a top level SUBREG (or what may become one).
3733 RETURN - The regno offset which would be used. */
3735 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3736 unsigned int offset
, machine_mode ymode
)
3738 struct subreg_info info
;
3739 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3743 /* This function returns true when the offset is representable via
3744 subreg_offset in the given regno.
3745 xregno - A regno of an inner hard subreg_reg (or what will become one).
3746 xmode - The mode of xregno.
3747 offset - The byte offset.
3748 ymode - The mode of a top level SUBREG (or what may become one).
3749 RETURN - Whether the offset is representable. */
3751 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3752 unsigned int offset
, machine_mode ymode
)
3754 struct subreg_info info
;
3755 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3756 return info
.representable_p
;
3759 /* Return the number of a YMODE register to which
3761 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3763 can be simplified. Return -1 if the subreg can't be simplified.
3765 XREGNO is a hard register number. */
3768 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3769 unsigned int offset
, machine_mode ymode
)
3771 struct subreg_info info
;
3772 unsigned int yregno
;
3774 #ifdef CANNOT_CHANGE_MODE_CLASS
3775 /* Give the backend a chance to disallow the mode change. */
3776 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3777 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3778 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3779 /* We can use mode change in LRA for some transformations. */
3780 && ! lra_in_progress
)
3784 /* We shouldn't simplify stack-related registers. */
3785 if ((!reload_completed
|| frame_pointer_needed
)
3786 && xregno
== FRAME_POINTER_REGNUM
)
3789 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3790 && xregno
== ARG_POINTER_REGNUM
)
3793 if (xregno
== STACK_POINTER_REGNUM
3794 /* We should convert hard stack register in LRA if it is
3796 && ! lra_in_progress
)
3799 /* Try to get the register offset. */
3800 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3801 if (!info
.representable_p
)
3804 /* Make sure that the offsetted register value is in range. */
3805 yregno
= xregno
+ info
.offset
;
3806 if (!HARD_REGISTER_NUM_P (yregno
))
3809 /* See whether (reg:YMODE YREGNO) is valid.
3811 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3812 This is a kludge to work around how complex FP arguments are passed
3813 on IA-64 and should be fixed. See PR target/49226. */
3814 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3815 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3818 return (int) yregno
;
3821 /* Return the final regno that a subreg expression refers to. */
3823 subreg_regno (const_rtx x
)
3826 rtx subreg
= SUBREG_REG (x
);
3827 int regno
= REGNO (subreg
);
3829 ret
= regno
+ subreg_regno_offset (regno
,
3837 /* Return the number of registers that a subreg expression refers
3840 subreg_nregs (const_rtx x
)
3842 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3845 /* Return the number of registers that a subreg REG with REGNO
3846 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3847 changed so that the regno can be passed in. */
3850 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3852 struct subreg_info info
;
3853 rtx subreg
= SUBREG_REG (x
);
3855 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3861 struct parms_set_data
3867 /* Helper function for noticing stores to parameter registers. */
3869 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3871 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3872 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3873 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3875 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3880 /* Look backward for first parameter to be loaded.
3881 Note that loads of all parameters will not necessarily be
3882 found if CSE has eliminated some of them (e.g., an argument
3883 to the outer function is passed down as a parameter).
3884 Do not skip BOUNDARY. */
3886 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3888 struct parms_set_data parm
;
3890 rtx_insn
*before
, *first_set
;
3892 /* Since different machines initialize their parameter registers
3893 in different orders, assume nothing. Collect the set of all
3894 parameter registers. */
3895 CLEAR_HARD_REG_SET (parm
.regs
);
3897 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3898 if (GET_CODE (XEXP (p
, 0)) == USE
3899 && REG_P (XEXP (XEXP (p
, 0), 0)))
3901 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3903 /* We only care about registers which can hold function
3905 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3908 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3912 first_set
= call_insn
;
3914 /* Search backward for the first set of a register in this set. */
3915 while (parm
.nregs
&& before
!= boundary
)
3917 before
= PREV_INSN (before
);
3919 /* It is possible that some loads got CSEed from one call to
3920 another. Stop in that case. */
3921 if (CALL_P (before
))
3924 /* Our caller needs either ensure that we will find all sets
3925 (in case code has not been optimized yet), or take care
3926 for possible labels in a way by setting boundary to preceding
3928 if (LABEL_P (before
))
3930 gcc_assert (before
== boundary
);
3934 if (INSN_P (before
))
3936 int nregs_old
= parm
.nregs
;
3937 note_stores (PATTERN (before
), parms_set
, &parm
);
3938 /* If we found something that did not set a parameter reg,
3939 we're done. Do not keep going, as that might result
3940 in hoisting an insn before the setting of a pseudo
3941 that is used by the hoisted insn. */
3942 if (nregs_old
!= parm
.nregs
)
3951 /* Return true if we should avoid inserting code between INSN and preceding
3952 call instruction. */
3955 keep_with_call_p (const rtx_insn
*insn
)
3959 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3961 if (REG_P (SET_DEST (set
))
3962 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3963 && fixed_regs
[REGNO (SET_DEST (set
))]
3964 && general_operand (SET_SRC (set
), VOIDmode
))
3966 if (REG_P (SET_SRC (set
))
3967 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3968 && REG_P (SET_DEST (set
))
3969 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3971 /* There may be a stack pop just after the call and before the store
3972 of the return register. Search for the actual store when deciding
3973 if we can break or not. */
3974 if (SET_DEST (set
) == stack_pointer_rtx
)
3976 /* This CONST_CAST is okay because next_nonnote_insn just
3977 returns its argument and we assign it to a const_rtx
3980 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3981 if (i2
&& keep_with_call_p (i2
))
3988 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3989 to non-complex jumps. That is, direct unconditional, conditional,
3990 and tablejumps, but not computed jumps or returns. It also does
3991 not apply to the fallthru case of a conditional jump. */
3994 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3996 rtx tmp
= JUMP_LABEL (jump_insn
);
3997 rtx_jump_table_data
*table
;
4002 if (tablejump_p (jump_insn
, NULL
, &table
))
4004 rtvec vec
= table
->get_labels ();
4005 int i
, veclen
= GET_NUM_ELEM (vec
);
4007 for (i
= 0; i
< veclen
; ++i
)
4008 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4012 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4019 /* Return an estimate of the cost of computing rtx X.
4020 One use is in cse, to decide which expression to keep in the hash table.
4021 Another is in rtl generation, to pick the cheapest way to multiply.
4022 Other uses like the latter are expected in the future.
4024 X appears as operand OPNO in an expression with code OUTER_CODE.
4025 SPEED specifies whether costs optimized for speed or size should
4029 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
4040 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4041 many insns, taking N times as long. */
4042 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
4046 /* Compute the default costs of certain things.
4047 Note that targetm.rtx_costs can override the defaults. */
4049 code
= GET_CODE (x
);
4053 /* Multiplication has time-complexity O(N*N), where N is the
4054 number of units (translated from digits) when using
4055 schoolbook long multiplication. */
4056 total
= factor
* factor
* COSTS_N_INSNS (5);
4062 /* Similarly, complexity for schoolbook long division. */
4063 total
= factor
* factor
* COSTS_N_INSNS (7);
4066 /* Used in combine.c as a marker. */
4070 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4071 the mode for the factor. */
4072 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
4077 total
= factor
* COSTS_N_INSNS (1);
4087 /* If we can't tie these modes, make this expensive. The larger
4088 the mode, the more expensive it is. */
4089 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
4090 return COSTS_N_INSNS (2 + factor
);
4094 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
4099 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4100 which is already in total. */
4102 fmt
= GET_RTX_FORMAT (code
);
4103 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4105 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
4106 else if (fmt
[i
] == 'E')
4107 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4108 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
4113 /* Fill in the structure C with information about both speed and size rtx
4114 costs for X, which is operand OPNO in an expression with code OUTER. */
4117 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
4118 struct full_rtx_costs
*c
)
4120 c
->speed
= rtx_cost (x
, outer
, opno
, true);
4121 c
->size
= rtx_cost (x
, outer
, opno
, false);
4125 /* Return cost of address expression X.
4126 Expect that X is properly formed address reference.
4128 SPEED parameter specify whether costs optimized for speed or size should
4132 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4134 /* We may be asked for cost of various unusual addresses, such as operands
4135 of push instruction. It is not worthwhile to complicate writing
4136 of the target hook by such cases. */
4138 if (!memory_address_addr_space_p (mode
, x
, as
))
4141 return targetm
.address_cost (x
, mode
, as
, speed
);
4144 /* If the target doesn't override, compute the cost as with arithmetic. */
4147 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4149 return rtx_cost (x
, MEM
, 0, speed
);
4153 unsigned HOST_WIDE_INT
4154 nonzero_bits (const_rtx x
, machine_mode mode
)
4156 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4160 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4162 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4165 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4166 It avoids exponential behavior in nonzero_bits1 when X has
4167 identical subexpressions on the first or the second level. */
4169 static unsigned HOST_WIDE_INT
4170 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4171 machine_mode known_mode
,
4172 unsigned HOST_WIDE_INT known_ret
)
4174 if (x
== known_x
&& mode
== known_mode
)
4177 /* Try to find identical subexpressions. If found call
4178 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4179 precomputed value for the subexpression as KNOWN_RET. */
4181 if (ARITHMETIC_P (x
))
4183 rtx x0
= XEXP (x
, 0);
4184 rtx x1
= XEXP (x
, 1);
4186 /* Check the first level. */
4188 return nonzero_bits1 (x
, mode
, x0
, mode
,
4189 cached_nonzero_bits (x0
, mode
, known_x
,
4190 known_mode
, known_ret
));
4192 /* Check the second level. */
4193 if (ARITHMETIC_P (x0
)
4194 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4195 return nonzero_bits1 (x
, mode
, x1
, mode
,
4196 cached_nonzero_bits (x1
, mode
, known_x
,
4197 known_mode
, known_ret
));
4199 if (ARITHMETIC_P (x1
)
4200 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4201 return nonzero_bits1 (x
, mode
, x0
, mode
,
4202 cached_nonzero_bits (x0
, mode
, known_x
,
4203 known_mode
, known_ret
));
4206 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4209 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4210 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4211 is less useful. We can't allow both, because that results in exponential
4212 run time recursion. There is a nullstone testcase that triggered
4213 this. This macro avoids accidental uses of num_sign_bit_copies. */
4214 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4216 /* Given an expression, X, compute which bits in X can be nonzero.
4217 We don't care about bits outside of those defined in MODE.
4219 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4220 an arithmetic operation, we can do better. */
4222 static unsigned HOST_WIDE_INT
4223 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4224 machine_mode known_mode
,
4225 unsigned HOST_WIDE_INT known_ret
)
4227 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4228 unsigned HOST_WIDE_INT inner_nz
;
4230 machine_mode inner_mode
;
4231 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4233 /* For floating-point and vector values, assume all bits are needed. */
4234 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4235 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4238 /* If X is wider than MODE, use its mode instead. */
4239 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4241 mode
= GET_MODE (x
);
4242 nonzero
= GET_MODE_MASK (mode
);
4243 mode_width
= GET_MODE_PRECISION (mode
);
4246 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4247 /* Our only callers in this case look for single bit values. So
4248 just return the mode mask. Those tests will then be false. */
4251 #ifndef WORD_REGISTER_OPERATIONS
4252 /* If MODE is wider than X, but both are a single word for both the host
4253 and target machines, we can compute this from which bits of the
4254 object might be nonzero in its own mode, taking into account the fact
4255 that on many CISC machines, accessing an object in a wider mode
4256 causes the high-order bits to become undefined. So they are
4257 not known to be zero. */
4259 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4260 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4261 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4262 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4264 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4265 known_x
, known_mode
, known_ret
);
4266 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4271 code
= GET_CODE (x
);
4275 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4276 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4277 all the bits above ptr_mode are known to be zero. */
4278 /* As we do not know which address space the pointer is referring to,
4279 we can do this only if the target does not support different pointer
4280 or address modes depending on the address space. */
4281 if (target_default_pointer_address_modes_p ()
4282 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4284 nonzero
&= GET_MODE_MASK (ptr_mode
);
4287 /* Include declared information about alignment of pointers. */
4288 /* ??? We don't properly preserve REG_POINTER changes across
4289 pointer-to-integer casts, so we can't trust it except for
4290 things that we know must be pointers. See execute/960116-1.c. */
4291 if ((x
== stack_pointer_rtx
4292 || x
== frame_pointer_rtx
4293 || x
== arg_pointer_rtx
)
4294 && REGNO_POINTER_ALIGN (REGNO (x
)))
4296 unsigned HOST_WIDE_INT alignment
4297 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4299 #ifdef PUSH_ROUNDING
4300 /* If PUSH_ROUNDING is defined, it is possible for the
4301 stack to be momentarily aligned only to that amount,
4302 so we pick the least alignment. */
4303 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4304 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4308 nonzero
&= ~(alignment
- 1);
4312 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4313 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4314 known_mode
, known_ret
,
4318 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4319 known_mode
, known_ret
);
4321 return nonzero_for_hook
;
4325 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4326 /* If X is negative in MODE, sign-extend the value. */
4328 && mode_width
< BITS_PER_WORD
4329 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4331 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4337 #ifdef LOAD_EXTEND_OP
4338 /* In many, if not most, RISC machines, reading a byte from memory
4339 zeros the rest of the register. Noticing that fact saves a lot
4340 of extra zero-extends. */
4341 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4342 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4347 case UNEQ
: case LTGT
:
4348 case GT
: case GTU
: case UNGT
:
4349 case LT
: case LTU
: case UNLT
:
4350 case GE
: case GEU
: case UNGE
:
4351 case LE
: case LEU
: case UNLE
:
4352 case UNORDERED
: case ORDERED
:
4353 /* If this produces an integer result, we know which bits are set.
4354 Code here used to clear bits outside the mode of X, but that is
4356 /* Mind that MODE is the mode the caller wants to look at this
4357 operation in, and not the actual operation mode. We can wind
4358 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4359 that describes the results of a vector compare. */
4360 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4361 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4362 nonzero
= STORE_FLAG_VALUE
;
4367 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4368 and num_sign_bit_copies. */
4369 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4370 == GET_MODE_PRECISION (GET_MODE (x
)))
4374 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4375 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4380 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4381 and num_sign_bit_copies. */
4382 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4383 == GET_MODE_PRECISION (GET_MODE (x
)))
4389 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4390 known_x
, known_mode
, known_ret
)
4391 & GET_MODE_MASK (mode
));
4395 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4396 known_x
, known_mode
, known_ret
);
4397 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4398 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4402 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4403 Otherwise, show all the bits in the outer mode but not the inner
4405 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4406 known_x
, known_mode
, known_ret
);
4407 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4409 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4410 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4411 inner_nz
|= (GET_MODE_MASK (mode
)
4412 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4415 nonzero
&= inner_nz
;
4419 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4420 known_x
, known_mode
, known_ret
)
4421 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4422 known_x
, known_mode
, known_ret
);
4426 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4428 unsigned HOST_WIDE_INT nonzero0
4429 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4430 known_x
, known_mode
, known_ret
);
4432 /* Don't call nonzero_bits for the second time if it cannot change
4434 if ((nonzero
& nonzero0
) != nonzero
)
4436 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4437 known_x
, known_mode
, known_ret
);
4441 case PLUS
: case MINUS
:
4443 case DIV
: case UDIV
:
4444 case MOD
: case UMOD
:
4445 /* We can apply the rules of arithmetic to compute the number of
4446 high- and low-order zero bits of these operations. We start by
4447 computing the width (position of the highest-order nonzero bit)
4448 and the number of low-order zero bits for each value. */
4450 unsigned HOST_WIDE_INT nz0
4451 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4452 known_x
, known_mode
, known_ret
);
4453 unsigned HOST_WIDE_INT nz1
4454 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4455 known_x
, known_mode
, known_ret
);
4456 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4457 int width0
= floor_log2 (nz0
) + 1;
4458 int width1
= floor_log2 (nz1
) + 1;
4459 int low0
= floor_log2 (nz0
& -nz0
);
4460 int low1
= floor_log2 (nz1
& -nz1
);
4461 unsigned HOST_WIDE_INT op0_maybe_minusp
4462 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4463 unsigned HOST_WIDE_INT op1_maybe_minusp
4464 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4465 unsigned int result_width
= mode_width
;
4471 result_width
= MAX (width0
, width1
) + 1;
4472 result_low
= MIN (low0
, low1
);
4475 result_low
= MIN (low0
, low1
);
4478 result_width
= width0
+ width1
;
4479 result_low
= low0
+ low1
;
4484 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4485 result_width
= width0
;
4490 result_width
= width0
;
4495 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4496 result_width
= MIN (width0
, width1
);
4497 result_low
= MIN (low0
, low1
);
4502 result_width
= MIN (width0
, width1
);
4503 result_low
= MIN (low0
, low1
);
4509 if (result_width
< mode_width
)
4510 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4513 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4518 if (CONST_INT_P (XEXP (x
, 1))
4519 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4520 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4524 /* If this is a SUBREG formed for a promoted variable that has
4525 been zero-extended, we know that at least the high-order bits
4526 are zero, though others might be too. */
4528 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4529 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4530 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4531 known_x
, known_mode
, known_ret
);
4533 inner_mode
= GET_MODE (SUBREG_REG (x
));
4534 /* If the inner mode is a single word for both the host and target
4535 machines, we can compute this from which bits of the inner
4536 object might be nonzero. */
4537 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4538 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4540 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4541 known_x
, known_mode
, known_ret
);
4543 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4544 /* If this is a typical RISC machine, we only have to worry
4545 about the way loads are extended. */
4546 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4547 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4548 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4549 || !MEM_P (SUBREG_REG (x
)))
4552 /* On many CISC machines, accessing an object in a wider mode
4553 causes the high-order bits to become undefined. So they are
4554 not known to be zero. */
4555 if (GET_MODE_PRECISION (GET_MODE (x
))
4556 > GET_MODE_PRECISION (inner_mode
))
4557 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4558 & ~GET_MODE_MASK (inner_mode
));
4567 /* The nonzero bits are in two classes: any bits within MODE
4568 that aren't in GET_MODE (x) are always significant. The rest of the
4569 nonzero bits are those that are significant in the operand of
4570 the shift when shifted the appropriate number of bits. This
4571 shows that high-order bits are cleared by the right shift and
4572 low-order bits by left shifts. */
4573 if (CONST_INT_P (XEXP (x
, 1))
4574 && INTVAL (XEXP (x
, 1)) >= 0
4575 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4576 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4578 machine_mode inner_mode
= GET_MODE (x
);
4579 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4580 int count
= INTVAL (XEXP (x
, 1));
4581 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4582 unsigned HOST_WIDE_INT op_nonzero
4583 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4584 known_x
, known_mode
, known_ret
);
4585 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4586 unsigned HOST_WIDE_INT outer
= 0;
4588 if (mode_width
> width
)
4589 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4591 if (code
== LSHIFTRT
)
4593 else if (code
== ASHIFTRT
)
4597 /* If the sign bit may have been nonzero before the shift, we
4598 need to mark all the places it could have been copied to
4599 by the shift as possibly nonzero. */
4600 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4601 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4604 else if (code
== ASHIFT
)
4607 inner
= ((inner
<< (count
% width
)
4608 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4610 nonzero
&= (outer
| inner
);
4616 /* This is at most the number of bits in the mode. */
4617 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4621 /* If CLZ has a known value at zero, then the nonzero bits are
4622 that value, plus the number of bits in the mode minus one. */
4623 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4625 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4631 /* If CTZ has a known value at zero, then the nonzero bits are
4632 that value, plus the number of bits in the mode minus one. */
4633 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4635 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4641 /* This is at most the number of bits in the mode minus 1. */
4642 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4651 unsigned HOST_WIDE_INT nonzero_true
4652 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4653 known_x
, known_mode
, known_ret
);
4655 /* Don't call nonzero_bits for the second time if it cannot change
4657 if ((nonzero
& nonzero_true
) != nonzero
)
4658 nonzero
&= nonzero_true
4659 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4660 known_x
, known_mode
, known_ret
);
4671 /* See the macro definition above. */
4672 #undef cached_num_sign_bit_copies
4675 /* The function cached_num_sign_bit_copies is a wrapper around
4676 num_sign_bit_copies1. It avoids exponential behavior in
4677 num_sign_bit_copies1 when X has identical subexpressions on the
4678 first or the second level. */
4681 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4682 machine_mode known_mode
,
4683 unsigned int known_ret
)
4685 if (x
== known_x
&& mode
== known_mode
)
4688 /* Try to find identical subexpressions. If found call
4689 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4690 the precomputed value for the subexpression as KNOWN_RET. */
4692 if (ARITHMETIC_P (x
))
4694 rtx x0
= XEXP (x
, 0);
4695 rtx x1
= XEXP (x
, 1);
4697 /* Check the first level. */
4700 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4701 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4705 /* Check the second level. */
4706 if (ARITHMETIC_P (x0
)
4707 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4709 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4710 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4714 if (ARITHMETIC_P (x1
)
4715 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4717 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4718 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4723 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4726 /* Return the number of bits at the high-order end of X that are known to
4727 be equal to the sign bit. X will be used in mode MODE; if MODE is
4728 VOIDmode, X will be used in its own mode. The returned value will always
4729 be between 1 and the number of bits in MODE. */
4732 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4733 machine_mode known_mode
,
4734 unsigned int known_ret
)
4736 enum rtx_code code
= GET_CODE (x
);
4737 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4738 int num0
, num1
, result
;
4739 unsigned HOST_WIDE_INT nonzero
;
4741 /* If we weren't given a mode, use the mode of X. If the mode is still
4742 VOIDmode, we don't know anything. Likewise if one of the modes is
4745 if (mode
== VOIDmode
)
4746 mode
= GET_MODE (x
);
4748 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4749 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4752 /* For a smaller object, just ignore the high bits. */
4753 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4755 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4756 known_x
, known_mode
, known_ret
);
4758 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4761 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4763 #ifndef WORD_REGISTER_OPERATIONS
4764 /* If this machine does not do all register operations on the entire
4765 register and MODE is wider than the mode of X, we can say nothing
4766 at all about the high-order bits. */
4769 /* Likewise on machines that do, if the mode of the object is smaller
4770 than a word and loads of that size don't sign extend, we can say
4771 nothing about the high order bits. */
4772 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4773 #ifdef LOAD_EXTEND_OP
4774 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4785 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4786 /* If pointers extend signed and this is a pointer in Pmode, say that
4787 all the bits above ptr_mode are known to be sign bit copies. */
4788 /* As we do not know which address space the pointer is referring to,
4789 we can do this only if the target does not support different pointer
4790 or address modes depending on the address space. */
4791 if (target_default_pointer_address_modes_p ()
4792 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4793 && mode
== Pmode
&& REG_POINTER (x
))
4794 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4798 unsigned int copies_for_hook
= 1, copies
= 1;
4799 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4800 known_mode
, known_ret
,
4804 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4805 known_mode
, known_ret
);
4807 if (copies
> 1 || copies_for_hook
> 1)
4808 return MAX (copies
, copies_for_hook
);
4810 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4815 #ifdef LOAD_EXTEND_OP
4816 /* Some RISC machines sign-extend all loads of smaller than a word. */
4817 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4818 return MAX (1, ((int) bitwidth
4819 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4824 /* If the constant is negative, take its 1's complement and remask.
4825 Then see how many zero bits we have. */
4826 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4827 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4828 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4829 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4831 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4834 /* If this is a SUBREG for a promoted object that is sign-extended
4835 and we are looking at it in a wider mode, we know that at least the
4836 high-order bits are known to be sign bit copies. */
4838 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4840 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4841 known_x
, known_mode
, known_ret
);
4842 return MAX ((int) bitwidth
4843 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4847 /* For a smaller object, just ignore the high bits. */
4848 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4850 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4851 known_x
, known_mode
, known_ret
);
4852 return MAX (1, (num0
4853 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4857 #ifdef WORD_REGISTER_OPERATIONS
4858 #ifdef LOAD_EXTEND_OP
4859 /* For paradoxical SUBREGs on machines where all register operations
4860 affect the entire register, just look inside. Note that we are
4861 passing MODE to the recursive call, so the number of sign bit copies
4862 will remain relative to that mode, not the inner mode. */
4864 /* This works only if loads sign extend. Otherwise, if we get a
4865 reload for the inner part, it may be loaded from the stack, and
4866 then we lose all sign bit copies that existed before the store
4869 if (paradoxical_subreg_p (x
)
4870 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4871 && MEM_P (SUBREG_REG (x
)))
4872 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4873 known_x
, known_mode
, known_ret
);
4879 if (CONST_INT_P (XEXP (x
, 1)))
4880 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4884 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4885 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4886 known_x
, known_mode
, known_ret
));
4889 /* For a smaller object, just ignore the high bits. */
4890 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4891 known_x
, known_mode
, known_ret
);
4892 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4896 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4897 known_x
, known_mode
, known_ret
);
4899 case ROTATE
: case ROTATERT
:
4900 /* If we are rotating left by a number of bits less than the number
4901 of sign bit copies, we can just subtract that amount from the
4903 if (CONST_INT_P (XEXP (x
, 1))
4904 && INTVAL (XEXP (x
, 1)) >= 0
4905 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4907 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4908 known_x
, known_mode
, known_ret
);
4909 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4910 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4915 /* In general, this subtracts one sign bit copy. But if the value
4916 is known to be positive, the number of sign bit copies is the
4917 same as that of the input. Finally, if the input has just one bit
4918 that might be nonzero, all the bits are copies of the sign bit. */
4919 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4920 known_x
, known_mode
, known_ret
);
4921 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4922 return num0
> 1 ? num0
- 1 : 1;
4924 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4929 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4934 case IOR
: case AND
: case XOR
:
4935 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4936 /* Logical operations will preserve the number of sign-bit copies.
4937 MIN and MAX operations always return one of the operands. */
4938 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4939 known_x
, known_mode
, known_ret
);
4940 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4941 known_x
, known_mode
, known_ret
);
4943 /* If num1 is clearing some of the top bits then regardless of
4944 the other term, we are guaranteed to have at least that many
4945 high-order zero bits. */
4948 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4949 && CONST_INT_P (XEXP (x
, 1))
4950 && (UINTVAL (XEXP (x
, 1))
4951 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4954 /* Similarly for IOR when setting high-order bits. */
4957 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4958 && CONST_INT_P (XEXP (x
, 1))
4959 && (UINTVAL (XEXP (x
, 1))
4960 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4963 return MIN (num0
, num1
);
4965 case PLUS
: case MINUS
:
4966 /* For addition and subtraction, we can have a 1-bit carry. However,
4967 if we are subtracting 1 from a positive number, there will not
4968 be such a carry. Furthermore, if the positive number is known to
4969 be 0 or 1, we know the result is either -1 or 0. */
4971 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4972 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4974 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4975 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4976 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4977 : bitwidth
- floor_log2 (nonzero
) - 1);
4980 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4981 known_x
, known_mode
, known_ret
);
4982 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4983 known_x
, known_mode
, known_ret
);
4984 result
= MAX (1, MIN (num0
, num1
) - 1);
4989 /* The number of bits of the product is the sum of the number of
4990 bits of both terms. However, unless one of the terms if known
4991 to be positive, we must allow for an additional bit since negating
4992 a negative number can remove one sign bit copy. */
4994 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4995 known_x
, known_mode
, known_ret
);
4996 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4997 known_x
, known_mode
, known_ret
);
4999 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5001 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5002 || (((nonzero_bits (XEXP (x
, 0), mode
)
5003 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
5004 && ((nonzero_bits (XEXP (x
, 1), mode
)
5005 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
5009 return MAX (1, result
);
5012 /* The result must be <= the first operand. If the first operand
5013 has the high bit set, we know nothing about the number of sign
5015 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5017 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5018 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
5021 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5022 known_x
, known_mode
, known_ret
);
5025 /* The result must be <= the second operand. If the second operand
5026 has (or just might have) the high bit set, we know nothing about
5027 the number of sign bit copies. */
5028 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5030 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5031 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
5034 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5035 known_x
, known_mode
, known_ret
);
5038 /* Similar to unsigned division, except that we have to worry about
5039 the case where the divisor is negative, in which case we have
5041 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5042 known_x
, known_mode
, known_ret
);
5044 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5045 || (nonzero_bits (XEXP (x
, 1), mode
)
5046 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
5052 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5053 known_x
, known_mode
, known_ret
);
5055 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5056 || (nonzero_bits (XEXP (x
, 1), mode
)
5057 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
5063 /* Shifts by a constant add to the number of bits equal to the
5065 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5066 known_x
, known_mode
, known_ret
);
5067 if (CONST_INT_P (XEXP (x
, 1))
5068 && INTVAL (XEXP (x
, 1)) > 0
5069 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
5070 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5075 /* Left shifts destroy copies. */
5076 if (!CONST_INT_P (XEXP (x
, 1))
5077 || INTVAL (XEXP (x
, 1)) < 0
5078 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5079 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
5082 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5083 known_x
, known_mode
, known_ret
);
5084 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5087 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5088 known_x
, known_mode
, known_ret
);
5089 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5090 known_x
, known_mode
, known_ret
);
5091 return MIN (num0
, num1
);
5093 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5094 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5095 case GEU
: case GTU
: case LEU
: case LTU
:
5096 case UNORDERED
: case ORDERED
:
5097 /* If the constant is negative, take its 1's complement and remask.
5098 Then see how many zero bits we have. */
5099 nonzero
= STORE_FLAG_VALUE
;
5100 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5101 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
5102 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5104 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5110 /* If we haven't been able to figure it out by one of the above rules,
5111 see if some of the high-order bits are known to be zero. If so,
5112 count those bits and return one less than that amount. If we can't
5113 safely compute the mask for this mode, always return BITWIDTH. */
5115 bitwidth
= GET_MODE_PRECISION (mode
);
5116 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5119 nonzero
= nonzero_bits (x
, mode
);
5120 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
5121 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5124 /* Calculate the rtx_cost of a single instruction. A return value of
5125 zero indicates an instruction pattern without a known cost. */
5128 insn_rtx_cost (rtx pat
, bool speed
)
5133 /* Extract the single set rtx from the instruction pattern.
5134 We can't use single_set since we only have the pattern. */
5135 if (GET_CODE (pat
) == SET
)
5137 else if (GET_CODE (pat
) == PARALLEL
)
5140 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5142 rtx x
= XVECEXP (pat
, 0, i
);
5143 if (GET_CODE (x
) == SET
)
5156 cost
= set_src_cost (SET_SRC (set
), speed
);
5157 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5160 /* Returns estimate on cost of computing SEQ. */
5163 seq_cost (const rtx_insn
*seq
, bool speed
)
5168 for (; seq
; seq
= NEXT_INSN (seq
))
5170 set
= single_set (seq
);
5172 cost
+= set_rtx_cost (set
, speed
);
5180 /* Given an insn INSN and condition COND, return the condition in a
5181 canonical form to simplify testing by callers. Specifically:
5183 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5184 (2) Both operands will be machine operands; (cc0) will have been replaced.
5185 (3) If an operand is a constant, it will be the second operand.
5186 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5187 for GE, GEU, and LEU.
5189 If the condition cannot be understood, or is an inequality floating-point
5190 comparison which needs to be reversed, 0 will be returned.
5192 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5194 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5195 insn used in locating the condition was found. If a replacement test
5196 of the condition is desired, it should be placed in front of that
5197 insn and we will be sure that the inputs are still valid.
5199 If WANT_REG is nonzero, we wish the condition to be relative to that
5200 register, if possible. Therefore, do not canonicalize the condition
5201 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5202 to be a compare to a CC mode register.
5204 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5208 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5209 rtx_insn
**earliest
,
5210 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5213 rtx_insn
*prev
= insn
;
5217 int reverse_code
= 0;
5219 basic_block bb
= BLOCK_FOR_INSN (insn
);
5221 code
= GET_CODE (cond
);
5222 mode
= GET_MODE (cond
);
5223 op0
= XEXP (cond
, 0);
5224 op1
= XEXP (cond
, 1);
5227 code
= reversed_comparison_code (cond
, insn
);
5228 if (code
== UNKNOWN
)
5234 /* If we are comparing a register with zero, see if the register is set
5235 in the previous insn to a COMPARE or a comparison operation. Perform
5236 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5239 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5240 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5241 && op1
== CONST0_RTX (GET_MODE (op0
))
5244 /* Set nonzero when we find something of interest. */
5247 /* If comparison with cc0, import actual comparison from compare
5251 if ((prev
= prev_nonnote_insn (prev
)) == 0
5252 || !NONJUMP_INSN_P (prev
)
5253 || (set
= single_set (prev
)) == 0
5254 || SET_DEST (set
) != cc0_rtx
)
5257 op0
= SET_SRC (set
);
5258 op1
= CONST0_RTX (GET_MODE (op0
));
5263 /* If this is a COMPARE, pick up the two things being compared. */
5264 if (GET_CODE (op0
) == COMPARE
)
5266 op1
= XEXP (op0
, 1);
5267 op0
= XEXP (op0
, 0);
5270 else if (!REG_P (op0
))
5273 /* Go back to the previous insn. Stop if it is not an INSN. We also
5274 stop if it isn't a single set or if it has a REG_INC note because
5275 we don't want to bother dealing with it. */
5277 prev
= prev_nonnote_nondebug_insn (prev
);
5280 || !NONJUMP_INSN_P (prev
)
5281 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5282 /* In cfglayout mode, there do not have to be labels at the
5283 beginning of a block, or jumps at the end, so the previous
5284 conditions would not stop us when we reach bb boundary. */
5285 || BLOCK_FOR_INSN (prev
) != bb
)
5288 set
= set_of (op0
, prev
);
5291 && (GET_CODE (set
) != SET
5292 || !rtx_equal_p (SET_DEST (set
), op0
)))
5295 /* If this is setting OP0, get what it sets it to if it looks
5299 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5300 #ifdef FLOAT_STORE_FLAG_VALUE
5301 REAL_VALUE_TYPE fsfv
;
5304 /* ??? We may not combine comparisons done in a CCmode with
5305 comparisons not done in a CCmode. This is to aid targets
5306 like Alpha that have an IEEE compliant EQ instruction, and
5307 a non-IEEE compliant BEQ instruction. The use of CCmode is
5308 actually artificial, simply to prevent the combination, but
5309 should not affect other platforms.
5311 However, we must allow VOIDmode comparisons to match either
5312 CCmode or non-CCmode comparison, because some ports have
5313 modeless comparisons inside branch patterns.
5315 ??? This mode check should perhaps look more like the mode check
5316 in simplify_comparison in combine. */
5317 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5318 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5320 && inner_mode
!= VOIDmode
)
5322 if (GET_CODE (SET_SRC (set
)) == COMPARE
5325 && val_signbit_known_set_p (inner_mode
,
5327 #ifdef FLOAT_STORE_FLAG_VALUE
5329 && SCALAR_FLOAT_MODE_P (inner_mode
)
5330 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5331 REAL_VALUE_NEGATIVE (fsfv
)))
5334 && COMPARISON_P (SET_SRC (set
))))
5336 else if (((code
== EQ
5338 && val_signbit_known_set_p (inner_mode
,
5340 #ifdef FLOAT_STORE_FLAG_VALUE
5342 && SCALAR_FLOAT_MODE_P (inner_mode
)
5343 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5344 REAL_VALUE_NEGATIVE (fsfv
)))
5347 && COMPARISON_P (SET_SRC (set
)))
5352 else if ((code
== EQ
|| code
== NE
)
5353 && GET_CODE (SET_SRC (set
)) == XOR
)
5354 /* Handle sequences like:
5357 ...(eq|ne op0 (const_int 0))...
5361 (eq op0 (const_int 0)) reduces to (eq X Y)
5362 (ne op0 (const_int 0)) reduces to (ne X Y)
5364 This is the form used by MIPS16, for example. */
5370 else if (reg_set_p (op0
, prev
))
5371 /* If this sets OP0, but not directly, we have to give up. */
5376 /* If the caller is expecting the condition to be valid at INSN,
5377 make sure X doesn't change before INSN. */
5378 if (valid_at_insn_p
)
5379 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5381 if (COMPARISON_P (x
))
5382 code
= GET_CODE (x
);
5385 code
= reversed_comparison_code (x
, prev
);
5386 if (code
== UNKNOWN
)
5391 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5397 /* If constant is first, put it last. */
5398 if (CONSTANT_P (op0
))
5399 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5401 /* If OP0 is the result of a comparison, we weren't able to find what
5402 was really being compared, so fail. */
5404 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5407 /* Canonicalize any ordered comparison with integers involving equality
5408 if we can do computations in the relevant mode and we do not
5411 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5412 && CONST_INT_P (op1
)
5413 && GET_MODE (op0
) != VOIDmode
5414 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5416 HOST_WIDE_INT const_val
= INTVAL (op1
);
5417 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5418 unsigned HOST_WIDE_INT max_val
5419 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5424 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5425 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5428 /* When cross-compiling, const_val might be sign-extended from
5429 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5431 if ((const_val
& max_val
)
5432 != ((unsigned HOST_WIDE_INT
) 1
5433 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5434 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5438 if (uconst_val
< max_val
)
5439 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5443 if (uconst_val
!= 0)
5444 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5452 /* Never return CC0; return zero instead. */
5456 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5459 /* Given a jump insn JUMP, return the condition that will cause it to branch
5460 to its JUMP_LABEL. If the condition cannot be understood, or is an
5461 inequality floating-point comparison which needs to be reversed, 0 will
5464 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5465 insn used in locating the condition was found. If a replacement test
5466 of the condition is desired, it should be placed in front of that
5467 insn and we will be sure that the inputs are still valid. If EARLIEST
5468 is null, the returned condition will be valid at INSN.
5470 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5471 compare CC mode register.
5473 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5476 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5477 int valid_at_insn_p
)
5483 /* If this is not a standard conditional jump, we can't parse it. */
5485 || ! any_condjump_p (jump
))
5487 set
= pc_set (jump
);
5489 cond
= XEXP (SET_SRC (set
), 0);
5491 /* If this branches to JUMP_LABEL when the condition is false, reverse
5494 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5495 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5497 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5498 allow_cc_mode
, valid_at_insn_p
);
5501 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5502 TARGET_MODE_REP_EXTENDED.
5504 Note that we assume that the property of
5505 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5506 narrower than mode B. I.e., if A is a mode narrower than B then in
5507 order to be able to operate on it in mode B, mode A needs to
5508 satisfy the requirements set by the representation of mode B. */
5511 init_num_sign_bit_copies_in_rep (void)
5513 machine_mode mode
, in_mode
;
5515 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5516 in_mode
= GET_MODE_WIDER_MODE (mode
))
5517 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5518 mode
= GET_MODE_WIDER_MODE (mode
))
5522 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5523 extends to the next widest mode. */
5524 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5525 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5527 /* We are in in_mode. Count how many bits outside of mode
5528 have to be copies of the sign-bit. */
5529 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5531 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5533 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5534 /* We can only check sign-bit copies starting from the
5535 top-bit. In order to be able to check the bits we
5536 have already seen we pretend that subsequent bits
5537 have to be sign-bit copies too. */
5538 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5539 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5540 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5545 /* Suppose that truncation from the machine mode of X to MODE is not a
5546 no-op. See if there is anything special about X so that we can
5547 assume it already contains a truncated value of MODE. */
5550 truncated_to_mode (machine_mode mode
, const_rtx x
)
5552 /* This register has already been used in MODE without explicit
5554 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5557 /* See if we already satisfy the requirements of MODE. If yes we
5558 can just switch to MODE. */
5559 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5560 && (num_sign_bit_copies (x
, GET_MODE (x
))
5561 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5567 /* Return true if RTX code CODE has a single sequence of zero or more
5568 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5569 entry in that case. */
5572 setup_reg_subrtx_bounds (unsigned int code
)
5574 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5576 for (; format
[i
] != 'e'; ++i
)
5579 /* No subrtxes. Leave start and count as 0. */
5581 if (format
[i
] == 'E' || format
[i
] == 'V')
5585 /* Record the sequence of 'e's. */
5586 rtx_all_subrtx_bounds
[code
].start
= i
;
5589 while (format
[i
] == 'e');
5590 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5591 /* rtl-iter.h relies on this. */
5592 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5594 for (; format
[i
]; ++i
)
5595 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5601 /* Initialize rtx_all_subrtx_bounds. */
5606 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5608 if (!setup_reg_subrtx_bounds (i
))
5609 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5610 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5611 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5614 init_num_sign_bit_copies_in_rep ();
5617 /* Check whether this is a constant pool constant. */
5619 constant_pool_constant_p (rtx x
)
5621 x
= avoid_constant_pool_reference (x
);
5622 return CONST_DOUBLE_P (x
);
5625 /* If M is a bitmask that selects a field of low-order bits within an item but
5626 not the entire word, return the length of the field. Return -1 otherwise.
5627 M is used in machine mode MODE. */
5630 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5632 if (mode
!= VOIDmode
)
5634 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5636 m
&= GET_MODE_MASK (mode
);
5639 return exact_log2 (m
+ 1);
5642 /* Return the mode of MEM's address. */
5645 get_address_mode (rtx mem
)
5649 gcc_assert (MEM_P (mem
));
5650 mode
= GET_MODE (XEXP (mem
, 0));
5651 if (mode
!= VOIDmode
)
5653 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5656 /* Split up a CONST_DOUBLE or integer constant rtx
5657 into two rtx's for single words,
5658 storing in *FIRST the word that comes first in memory in the target
5659 and in *SECOND the other.
5661 TODO: This function needs to be rewritten to work on any size
5665 split_double (rtx value
, rtx
*first
, rtx
*second
)
5667 if (CONST_INT_P (value
))
5669 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5671 /* In this case the CONST_INT holds both target words.
5672 Extract the bits from it into two word-sized pieces.
5673 Sign extend each half to HOST_WIDE_INT. */
5674 unsigned HOST_WIDE_INT low
, high
;
5675 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5676 unsigned bits_per_word
= BITS_PER_WORD
;
5678 /* Set sign_bit to the most significant bit of a word. */
5680 sign_bit
<<= bits_per_word
- 1;
5682 /* Set mask so that all bits of the word are set. We could
5683 have used 1 << BITS_PER_WORD instead of basing the
5684 calculation on sign_bit. However, on machines where
5685 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5686 compiler warning, even though the code would never be
5688 mask
= sign_bit
<< 1;
5691 /* Set sign_extend as any remaining bits. */
5692 sign_extend
= ~mask
;
5694 /* Pick the lower word and sign-extend it. */
5695 low
= INTVAL (value
);
5700 /* Pick the higher word, shifted to the least significant
5701 bits, and sign-extend it. */
5702 high
= INTVAL (value
);
5703 high
>>= bits_per_word
- 1;
5706 if (high
& sign_bit
)
5707 high
|= sign_extend
;
5709 /* Store the words in the target machine order. */
5710 if (WORDS_BIG_ENDIAN
)
5712 *first
= GEN_INT (high
);
5713 *second
= GEN_INT (low
);
5717 *first
= GEN_INT (low
);
5718 *second
= GEN_INT (high
);
5723 /* The rule for using CONST_INT for a wider mode
5724 is that we regard the value as signed.
5725 So sign-extend it. */
5726 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5727 if (WORDS_BIG_ENDIAN
)
5739 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5741 /* All of this is scary code and needs to be converted to
5742 properly work with any size integer. */
5743 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5744 if (WORDS_BIG_ENDIAN
)
5746 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5747 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5751 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5752 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5755 else if (!CONST_DOUBLE_P (value
))
5757 if (WORDS_BIG_ENDIAN
)
5759 *first
= const0_rtx
;
5765 *second
= const0_rtx
;
5768 else if (GET_MODE (value
) == VOIDmode
5769 /* This is the old way we did CONST_DOUBLE integers. */
5770 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5772 /* In an integer, the words are defined as most and least significant.
5773 So order them by the target's convention. */
5774 if (WORDS_BIG_ENDIAN
)
5776 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5777 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5781 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5782 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5789 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5791 /* Note, this converts the REAL_VALUE_TYPE to the target's
5792 format, splits up the floating point double and outputs
5793 exactly 32 bits of it into each of l[0] and l[1] --
5794 not necessarily BITS_PER_WORD bits. */
5795 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5797 /* If 32 bits is an entire word for the target, but not for the host,
5798 then sign-extend on the host so that the number will look the same
5799 way on the host that it would on the target. See for instance
5800 simplify_unary_operation. The #if is needed to avoid compiler
5803 #if HOST_BITS_PER_LONG > 32
5804 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5806 if (l
[0] & ((long) 1 << 31))
5807 l
[0] |= ((long) (-1) << 32);
5808 if (l
[1] & ((long) 1 << 31))
5809 l
[1] |= ((long) (-1) << 32);
5813 *first
= GEN_INT (l
[0]);
5814 *second
= GEN_INT (l
[1]);
5818 /* Return true if X is a sign_extract or zero_extract from the least
5822 lsb_bitfield_op_p (rtx x
)
5824 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5826 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5827 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5828 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5830 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5835 /* Strip outer address "mutations" from LOC and return a pointer to the
5836 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5837 stripped expression there.
5839 "Mutations" either convert between modes or apply some kind of
5840 extension, truncation or alignment. */
5843 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5847 enum rtx_code code
= GET_CODE (*loc
);
5848 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5849 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5850 used to convert between pointer sizes. */
5851 loc
= &XEXP (*loc
, 0);
5852 else if (lsb_bitfield_op_p (*loc
))
5853 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5854 acts as a combined truncation and extension. */
5855 loc
= &XEXP (*loc
, 0);
5856 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5857 /* (and ... (const_int -X)) is used to align to X bytes. */
5858 loc
= &XEXP (*loc
, 0);
5859 else if (code
== SUBREG
5860 && !OBJECT_P (SUBREG_REG (*loc
))
5861 && subreg_lowpart_p (*loc
))
5862 /* (subreg (operator ...) ...) inside and is used for mode
5864 loc
= &SUBREG_REG (*loc
);
5872 /* Return true if CODE applies some kind of scale. The scaled value is
5873 is the first operand and the scale is the second. */
5876 binary_scale_code_p (enum rtx_code code
)
5878 return (code
== MULT
5880 /* Needed by ARM targets. */
5884 || code
== ROTATERT
);
5887 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5888 (see address_info). Return null otherwise. */
5891 get_base_term (rtx
*inner
)
5893 if (GET_CODE (*inner
) == LO_SUM
)
5894 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5897 || GET_CODE (*inner
) == SUBREG
5898 || GET_CODE (*inner
) == SCRATCH
)
5903 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5904 (see address_info). Return null otherwise. */
5907 get_index_term (rtx
*inner
)
5909 /* At present, only constant scales are allowed. */
5910 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5911 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5914 || GET_CODE (*inner
) == SUBREG
5915 || GET_CODE (*inner
) == SCRATCH
)
5920 /* Set the segment part of address INFO to LOC, given that INNER is the
5924 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5926 gcc_assert (!info
->segment
);
5927 info
->segment
= loc
;
5928 info
->segment_term
= inner
;
5931 /* Set the base part of address INFO to LOC, given that INNER is the
5935 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5937 gcc_assert (!info
->base
);
5939 info
->base_term
= inner
;
5942 /* Set the index part of address INFO to LOC, given that INNER is the
5946 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5948 gcc_assert (!info
->index
);
5950 info
->index_term
= inner
;
5953 /* Set the displacement part of address INFO to LOC, given that INNER
5954 is the constant term. */
5957 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5959 gcc_assert (!info
->disp
);
5961 info
->disp_term
= inner
;
5964 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5965 rest of INFO accordingly. */
5968 decompose_incdec_address (struct address_info
*info
)
5970 info
->autoinc_p
= true;
5972 rtx
*base
= &XEXP (*info
->inner
, 0);
5973 set_address_base (info
, base
, base
);
5974 gcc_checking_assert (info
->base
== info
->base_term
);
5976 /* These addresses are only valid when the size of the addressed
5978 gcc_checking_assert (info
->mode
!= VOIDmode
);
5981 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5982 of INFO accordingly. */
5985 decompose_automod_address (struct address_info
*info
)
5987 info
->autoinc_p
= true;
5989 rtx
*base
= &XEXP (*info
->inner
, 0);
5990 set_address_base (info
, base
, base
);
5991 gcc_checking_assert (info
->base
== info
->base_term
);
5993 rtx plus
= XEXP (*info
->inner
, 1);
5994 gcc_assert (GET_CODE (plus
) == PLUS
);
5996 info
->base_term2
= &XEXP (plus
, 0);
5997 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5999 rtx
*step
= &XEXP (plus
, 1);
6000 rtx
*inner_step
= strip_address_mutations (step
);
6001 if (CONSTANT_P (*inner_step
))
6002 set_address_disp (info
, step
, inner_step
);
6004 set_address_index (info
, step
, inner_step
);
6007 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6008 values in [PTR, END). Return a pointer to the end of the used array. */
6011 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6014 if (GET_CODE (x
) == PLUS
)
6016 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6017 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6021 gcc_assert (ptr
!= end
);
6027 /* Evaluate the likelihood of X being a base or index value, returning
6028 positive if it is likely to be a base, negative if it is likely to be
6029 an index, and 0 if we can't tell. Make the magnitude of the return
6030 value reflect the amount of confidence we have in the answer.
6032 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6035 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6036 enum rtx_code outer_code
, enum rtx_code index_code
)
6038 /* Believe *_POINTER unless the address shape requires otherwise. */
6039 if (REG_P (x
) && REG_POINTER (x
))
6041 if (MEM_P (x
) && MEM_POINTER (x
))
6044 if (REG_P (x
) && HARD_REGISTER_P (x
))
6046 /* X is a hard register. If it only fits one of the base
6047 or index classes, choose that interpretation. */
6048 int regno
= REGNO (x
);
6049 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6050 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6051 if (base_p
!= index_p
)
6052 return base_p
? 1 : -1;
6057 /* INFO->INNER describes a normal, non-automodified address.
6058 Fill in the rest of INFO accordingly. */
6061 decompose_normal_address (struct address_info
*info
)
6063 /* Treat the address as the sum of up to four values. */
6065 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6066 ops
+ ARRAY_SIZE (ops
)) - ops
;
6068 /* If there is more than one component, any base component is in a PLUS. */
6070 info
->base_outer_code
= PLUS
;
6072 /* Try to classify each sum operand now. Leave those that could be
6073 either a base or an index in OPS. */
6076 for (size_t in
= 0; in
< n_ops
; ++in
)
6079 rtx
*inner
= strip_address_mutations (loc
);
6080 if (CONSTANT_P (*inner
))
6081 set_address_disp (info
, loc
, inner
);
6082 else if (GET_CODE (*inner
) == UNSPEC
)
6083 set_address_segment (info
, loc
, inner
);
6086 /* The only other possibilities are a base or an index. */
6087 rtx
*base_term
= get_base_term (inner
);
6088 rtx
*index_term
= get_index_term (inner
);
6089 gcc_assert (base_term
|| index_term
);
6091 set_address_index (info
, loc
, index_term
);
6092 else if (!index_term
)
6093 set_address_base (info
, loc
, base_term
);
6096 gcc_assert (base_term
== index_term
);
6098 inner_ops
[out
] = base_term
;
6104 /* Classify the remaining OPS members as bases and indexes. */
6107 /* If we haven't seen a base or an index yet, assume that this is
6108 the base. If we were confident that another term was the base
6109 or index, treat the remaining operand as the other kind. */
6111 set_address_base (info
, ops
[0], inner_ops
[0]);
6113 set_address_index (info
, ops
[0], inner_ops
[0]);
6117 /* In the event of a tie, assume the base comes first. */
6118 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6120 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6121 GET_CODE (*ops
[0])))
6123 set_address_base (info
, ops
[0], inner_ops
[0]);
6124 set_address_index (info
, ops
[1], inner_ops
[1]);
6128 set_address_base (info
, ops
[1], inner_ops
[1]);
6129 set_address_index (info
, ops
[0], inner_ops
[0]);
6133 gcc_assert (out
== 0);
6136 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6137 or VOIDmode if not known. AS is the address space associated with LOC.
6138 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6141 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6142 addr_space_t as
, enum rtx_code outer_code
)
6144 memset (info
, 0, sizeof (*info
));
6147 info
->addr_outer_code
= outer_code
;
6149 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6150 info
->base_outer_code
= outer_code
;
6151 switch (GET_CODE (*info
->inner
))
6157 decompose_incdec_address (info
);
6162 decompose_automod_address (info
);
6166 decompose_normal_address (info
);
6171 /* Describe address operand LOC in INFO. */
6174 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6176 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6179 /* Describe the address of MEM X in INFO. */
6182 decompose_mem_address (struct address_info
*info
, rtx x
)
6184 gcc_assert (MEM_P (x
));
6185 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6186 MEM_ADDR_SPACE (x
), MEM
);
6189 /* Update INFO after a change to the address it describes. */
6192 update_address (struct address_info
*info
)
6194 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6195 info
->addr_outer_code
);
6198 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6199 more complicated than that. */
6202 get_index_scale (const struct address_info
*info
)
6204 rtx index
= *info
->index
;
6205 if (GET_CODE (index
) == MULT
6206 && CONST_INT_P (XEXP (index
, 1))
6207 && info
->index_term
== &XEXP (index
, 0))
6208 return INTVAL (XEXP (index
, 1));
6210 if (GET_CODE (index
) == ASHIFT
6211 && CONST_INT_P (XEXP (index
, 1))
6212 && info
->index_term
== &XEXP (index
, 0))
6213 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
6215 if (info
->index
== info
->index_term
)
6221 /* Return the "index code" of INFO, in the form required by
6225 get_index_code (const struct address_info
*info
)
6228 return GET_CODE (*info
->index
);
6231 return GET_CODE (*info
->disp
);
6236 /* Return true if X contains a thread-local symbol. */
6239 tls_referenced_p (const_rtx x
)
6241 if (!targetm
.have_tls
)
6244 subrtx_iterator::array_type array
;
6245 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6246 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)