1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx
, const_rtx
, void *);
41 static bool covers_regno_p (const_rtx
, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
43 static int computed_jump_p_1 (const_rtx
);
44 static void parms_set (rtx
, const_rtx
, void *);
46 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
47 const_rtx
, machine_mode
,
48 unsigned HOST_WIDE_INT
);
49 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
50 const_rtx
, machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
55 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
56 machine_mode
, unsigned int);
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
75 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
81 typename
T::value_type
*
82 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
84 size_t i
, value_type x
)
86 if (base
== array
.stack
)
93 gcc_checking_assert (i
== LOCAL_ELEMS
);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array
.heap
) <= i
)
97 vec_safe_grow (array
.heap
, i
+ 1);
98 base
= array
.heap
->address ();
99 memcpy (base
, array
.stack
, sizeof (array
.stack
));
100 base
[LOCAL_ELEMS
] = x
;
103 unsigned int length
= array
.heap
->length ();
106 gcc_checking_assert (base
== array
.heap
->address ());
112 gcc_checking_assert (i
== length
);
113 vec_safe_push (array
.heap
, x
);
114 return array
.heap
->address ();
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
121 template <typename T
>
123 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
125 size_t end
, rtx_type x
)
127 enum rtx_code code
= GET_CODE (x
);
128 const char *format
= GET_RTX_FORMAT (code
);
129 size_t orig_end
= end
;
130 if (__builtin_expect (INSN_P (x
), false))
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
135 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
136 if (format
[i
] == 'e')
138 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
139 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
142 base
= add_single_to_queue (array
, base
, end
++, subx
);
146 for (int i
= 0; format
[i
]; ++i
)
147 if (format
[i
] == 'e')
149 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
150 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
153 base
= add_single_to_queue (array
, base
, end
++, subx
);
155 else if (format
[i
] == 'E')
157 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
158 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
159 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
160 for (unsigned int j
= 0; j
< length
; j
++)
161 base
[end
++] = T::get_value (vec
[j
]);
163 for (unsigned int j
= 0; j
< length
; j
++)
164 base
= add_single_to_queue (array
, base
, end
++,
165 T::get_value (vec
[j
]));
166 if (code
== SEQUENCE
&& end
== length
)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
172 for (unsigned int j
= 0; j
< length
; j
++)
174 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
176 base
[j
] = T::get_value (PATTERN (x
));
179 return end
- orig_end
;
182 template <typename T
>
184 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
186 vec_free (array
.heap
);
189 template <typename T
>
190 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
192 template class generic_subrtx_iterator
<const_rtx_accessor
>;
193 template class generic_subrtx_iterator
<rtx_var_accessor
>;
194 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
202 rtx_unstable_p (const_rtx x
)
204 const RTX_CODE code
= GET_CODE (x
);
211 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
233 if (MEM_VOLATILE_P (x
))
242 fmt
= GET_RTX_FORMAT (code
);
243 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
246 if (rtx_unstable_p (XEXP (x
, i
)))
249 else if (fmt
[i
] == 'E')
252 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
253 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
268 rtx_varies_p (const_rtx x
, bool for_alias
)
281 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
294 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
298 if (x
== pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
312 || rtx_varies_p (XEXP (x
, 1), for_alias
);
315 if (MEM_VOLATILE_P (x
))
324 fmt
= GET_RTX_FORMAT (code
);
325 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
328 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
331 else if (fmt
[i
] == 'E')
334 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
335 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
347 get_initial_register_offset (int from
, int to
)
349 static const struct elim_table_t
353 } table
[] = ELIMINABLE_REGS
;
354 HOST_WIDE_INT offset1
, offset2
;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed
)
365 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
369 if (to
== STACK_POINTER_REGNUM
)
371 else if (from
== STACK_POINTER_REGNUM
)
377 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
378 if (table
[i
].from
== from
)
380 if (table
[i
].to
== to
)
382 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
386 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
388 if (table
[j
].to
== to
389 && table
[j
].from
== table
[i
].to
)
391 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
393 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
395 return offset1
+ offset2
;
397 if (table
[j
].from
== to
398 && table
[j
].to
== table
[i
].to
)
400 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
402 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
404 return offset1
- offset2
;
408 else if (table
[i
].to
== from
)
410 if (table
[i
].from
== to
)
412 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
416 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
418 if (table
[j
].to
== to
419 && table
[j
].from
== table
[i
].from
)
421 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
423 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
425 return - offset1
+ offset2
;
427 if (table
[j
].from
== to
428 && table
[j
].to
== table
[i
].from
)
430 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
432 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
434 return - offset1
- offset2
;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from
== ARG_POINTER_REGNUM
)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
443 else if (to
== ARG_POINTER_REGNUM
)
444 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
445 else if (from
== HARD_FRAME_POINTER_REGNUM
)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
447 else if (to
== HARD_FRAME_POINTER_REGNUM
)
448 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
459 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
460 machine_mode mode
, bool unaligned_mems
)
462 enum rtx_code code
= GET_CODE (x
);
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
468 HOST_WIDE_INT actual_offset
= offset
;
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
476 actual_offset
-= STACK_POINTER_OFFSET
;
479 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
486 if (SYMBOL_REF_WEAK (x
))
488 if (!CONSTANT_POOL_ADDRESS_P (x
))
491 HOST_WIDE_INT decl_size
;
496 size
= GET_MODE_SIZE (mode
);
500 /* If the size of the access or of the symbol is unknown,
502 decl
= SYMBOL_REF_DECL (x
);
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
508 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
509 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
512 else if (TREE_CODE (decl
) == STRING_CST
)
513 decl_size
= TREE_STRING_LENGTH (decl
);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
515 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
519 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
531 || x
== stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
536 HOST_WIDE_INT red_zone_size
= RED_ZONE_SIZE
;
538 HOST_WIDE_INT red_zone_size
= 0;
540 HOST_WIDE_INT stack_boundary
= PREFERRED_STACK_BOUNDARY
542 HOST_WIDE_INT low_bound
, high_bound
;
545 size
= GET_MODE_SIZE (mode
);
547 if (x
== frame_pointer_rtx
)
549 if (FRAME_GROWS_DOWNWARD
)
551 high_bound
= STARTING_FRAME_OFFSET
;
552 low_bound
= high_bound
- get_frame_size ();
556 low_bound
= STARTING_FRAME_OFFSET
;
557 high_bound
= low_bound
+ get_frame_size ();
560 else if (x
== hard_frame_pointer_rtx
)
562 HOST_WIDE_INT sp_offset
563 = get_initial_register_offset (STACK_POINTER_REGNUM
,
564 HARD_FRAME_POINTER_REGNUM
);
565 HOST_WIDE_INT ap_offset
566 = get_initial_register_offset (ARG_POINTER_REGNUM
,
567 HARD_FRAME_POINTER_REGNUM
);
569 #if STACK_GROWS_DOWNWARD
570 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
571 high_bound
= ap_offset
572 + FIRST_PARM_OFFSET (current_function_decl
)
573 #if !ARGS_GROW_DOWNWARD
578 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
579 low_bound
= ap_offset
580 + FIRST_PARM_OFFSET (current_function_decl
)
581 #if ARGS_GROW_DOWNWARD
587 else if (x
== stack_pointer_rtx
)
589 HOST_WIDE_INT ap_offset
590 = get_initial_register_offset (ARG_POINTER_REGNUM
,
591 STACK_POINTER_REGNUM
);
593 #if STACK_GROWS_DOWNWARD
594 low_bound
= - red_zone_size
- stack_boundary
;
595 high_bound
= ap_offset
596 + FIRST_PARM_OFFSET (current_function_decl
)
597 #if !ARGS_GROW_DOWNWARD
602 high_bound
= red_zone_size
+ stack_boundary
;
603 low_bound
= ap_offset
604 + FIRST_PARM_OFFSET (current_function_decl
)
605 #if ARGS_GROW_DOWNWARD
613 /* We assume that accesses are safe to at least the
615 Examples are varargs and __builtin_return_address. */
616 #if ARGS_GROW_DOWNWARD
617 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
619 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
620 - crtl
->args
.size
- stack_boundary
;
622 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
624 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
625 + crtl
->args
.size
+ stack_boundary
;
629 if (offset
>= low_bound
&& offset
<= high_bound
- size
)
633 /* All of the virtual frame registers are stack references. */
634 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
635 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
640 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
641 mode
, unaligned_mems
);
644 /* An address is assumed not to trap if:
645 - it is the pic register plus a constant. */
646 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
649 /* - or it is an address that can't trap plus a constant integer. */
650 if (CONST_INT_P (XEXP (x
, 1))
651 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
652 size
, mode
, unaligned_mems
))
659 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
660 mode
, unaligned_mems
);
667 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
668 mode
, unaligned_mems
);
674 /* If it isn't one of the case above, it can cause a trap. */
678 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
681 rtx_addr_can_trap_p (const_rtx x
)
683 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
686 /* Return true if X is an address that is known to not be zero. */
689 nonzero_address_p (const_rtx x
)
691 const enum rtx_code code
= GET_CODE (x
);
696 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
702 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
703 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
704 || x
== stack_pointer_rtx
705 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
707 /* All of the virtual frame registers are stack references. */
708 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
709 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
714 return nonzero_address_p (XEXP (x
, 0));
717 /* Handle PIC references. */
718 if (XEXP (x
, 0) == pic_offset_table_rtx
719 && CONSTANT_P (XEXP (x
, 1)))
724 /* Similar to the above; allow positive offsets. Further, since
725 auto-inc is only allowed in memories, the register must be a
727 if (CONST_INT_P (XEXP (x
, 1))
728 && INTVAL (XEXP (x
, 1)) > 0)
730 return nonzero_address_p (XEXP (x
, 0));
733 /* Similarly. Further, the offset is always positive. */
740 return nonzero_address_p (XEXP (x
, 0));
743 return nonzero_address_p (XEXP (x
, 1));
749 /* If it isn't one of the case above, might be zero. */
753 /* Return 1 if X refers to a memory location whose address
754 cannot be compared reliably with constant addresses,
755 or if X refers to a BLKmode memory object.
756 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
757 zero, we are slightly more conservative. */
760 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
771 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
773 fmt
= GET_RTX_FORMAT (code
);
774 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
777 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
780 else if (fmt
[i
] == 'E')
783 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
784 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
790 /* Return the CALL in X if there is one. */
793 get_call_rtx_from (rtx x
)
797 if (GET_CODE (x
) == PARALLEL
)
798 x
= XVECEXP (x
, 0, 0);
799 if (GET_CODE (x
) == SET
)
801 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
806 /* Return the value of the integer term in X, if one is apparent;
808 Only obvious integer terms are detected.
809 This is used in cse.c with the `related_value' field. */
812 get_integer_term (const_rtx x
)
814 if (GET_CODE (x
) == CONST
)
817 if (GET_CODE (x
) == MINUS
818 && CONST_INT_P (XEXP (x
, 1)))
819 return - INTVAL (XEXP (x
, 1));
820 if (GET_CODE (x
) == PLUS
821 && CONST_INT_P (XEXP (x
, 1)))
822 return INTVAL (XEXP (x
, 1));
826 /* If X is a constant, return the value sans apparent integer term;
828 Only obvious integer terms are detected. */
831 get_related_value (const_rtx x
)
833 if (GET_CODE (x
) != CONST
)
836 if (GET_CODE (x
) == PLUS
837 && CONST_INT_P (XEXP (x
, 1)))
839 else if (GET_CODE (x
) == MINUS
840 && CONST_INT_P (XEXP (x
, 1)))
845 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
846 to somewhere in the same object or object_block as SYMBOL. */
849 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
853 if (GET_CODE (symbol
) != SYMBOL_REF
)
861 if (CONSTANT_POOL_ADDRESS_P (symbol
)
862 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
865 decl
= SYMBOL_REF_DECL (symbol
);
866 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
870 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
871 && SYMBOL_REF_BLOCK (symbol
)
872 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
873 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
874 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
880 /* Split X into a base and a constant offset, storing them in *BASE_OUT
881 and *OFFSET_OUT respectively. */
884 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
886 if (GET_CODE (x
) == CONST
)
889 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
891 *base_out
= XEXP (x
, 0);
892 *offset_out
= XEXP (x
, 1);
897 *offset_out
= const0_rtx
;
900 /* Return the number of places FIND appears within X. If COUNT_DEST is
901 zero, we do not count occurrences inside the destination of a SET. */
904 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
908 const char *format_ptr
;
927 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
929 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
933 if (MEM_P (find
) && rtx_equal_p (x
, find
))
938 if (SET_DEST (x
) == find
&& ! count_dest
)
939 return count_occurrences (SET_SRC (x
), find
, count_dest
);
946 format_ptr
= GET_RTX_FORMAT (code
);
949 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
951 switch (*format_ptr
++)
954 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
958 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
959 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
967 /* Return TRUE if OP is a register or subreg of a register that
968 holds an unsigned quantity. Otherwise, return FALSE. */
971 unsigned_reg_p (rtx op
)
975 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
978 if (GET_CODE (op
) == SUBREG
979 && SUBREG_PROMOTED_SIGN (op
))
986 /* Nonzero if register REG appears somewhere within IN.
987 Also works if REG is not a register; in this case it checks
988 for a subexpression of IN that is Lisp "equal" to REG. */
991 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1003 if (GET_CODE (in
) == LABEL_REF
)
1004 return reg
== LABEL_REF_LABEL (in
);
1006 code
= GET_CODE (in
);
1010 /* Compare registers by number. */
1012 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1014 /* These codes have no constituent expressions
1022 /* These are kept unique for a given value. */
1029 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1032 fmt
= GET_RTX_FORMAT (code
);
1034 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1039 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1040 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1043 else if (fmt
[i
] == 'e'
1044 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1050 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1051 no CODE_LABEL insn. */
1054 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1059 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1065 /* Nonzero if register REG is used in an insn between
1066 FROM_INSN and TO_INSN (exclusive of those two). */
1069 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1070 const rtx_insn
*to_insn
)
1074 if (from_insn
== to_insn
)
1077 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1078 if (NONDEBUG_INSN_P (insn
)
1079 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1080 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1085 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1086 is entirely replaced by a new value and the only use is as a SET_DEST,
1087 we do not consider it a reference. */
1090 reg_referenced_p (const_rtx x
, const_rtx body
)
1094 switch (GET_CODE (body
))
1097 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1100 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1101 of a REG that occupies all of the REG, the insn references X if
1102 it is mentioned in the destination. */
1103 if (GET_CODE (SET_DEST (body
)) != CC0
1104 && GET_CODE (SET_DEST (body
)) != PC
1105 && !REG_P (SET_DEST (body
))
1106 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1107 && REG_P (SUBREG_REG (SET_DEST (body
)))
1108 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
1109 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
1110 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
1111 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
1112 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1117 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1118 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1125 return reg_overlap_mentioned_p (x
, body
);
1128 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1131 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1134 case UNSPEC_VOLATILE
:
1135 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1136 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1141 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1142 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1147 if (MEM_P (XEXP (body
, 0)))
1148 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1153 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1155 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1162 /* Nonzero if register REG is set or clobbered in an insn between
1163 FROM_INSN and TO_INSN (exclusive of those two). */
1166 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1167 const rtx_insn
*to_insn
)
1169 const rtx_insn
*insn
;
1171 if (from_insn
== to_insn
)
1174 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1175 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1180 /* Return true if REG is set or clobbered inside INSN. */
1183 reg_set_p (const_rtx reg
, const_rtx insn
)
1185 /* After delay slot handling, call and branch insns might be in a
1186 sequence. Check all the elements there. */
1187 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1189 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1190 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1196 /* We can be passed an insn or part of one. If we are passed an insn,
1197 check if a side-effect of the insn clobbers REG. */
1199 && (FIND_REG_INC_NOTE (insn
, reg
)
1202 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1203 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1204 GET_MODE (reg
), REGNO (reg
)))
1206 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1209 return set_of (reg
, insn
) != NULL_RTX
;
1212 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1213 only if none of them are modified between START and END. Return 1 if
1214 X contains a MEM; this routine does use memory aliasing. */
1217 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1219 const enum rtx_code code
= GET_CODE (x
);
1240 if (modified_between_p (XEXP (x
, 0), start
, end
))
1242 if (MEM_READONLY_P (x
))
1244 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1245 if (memory_modified_in_insn_p (x
, insn
))
1250 return reg_set_between_p (x
, start
, end
);
1256 fmt
= GET_RTX_FORMAT (code
);
1257 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1259 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1262 else if (fmt
[i
] == 'E')
1263 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1264 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1271 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1272 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1273 does use memory aliasing. */
1276 modified_in_p (const_rtx x
, const_rtx insn
)
1278 const enum rtx_code code
= GET_CODE (x
);
1295 if (modified_in_p (XEXP (x
, 0), insn
))
1297 if (MEM_READONLY_P (x
))
1299 if (memory_modified_in_insn_p (x
, insn
))
1304 return reg_set_p (x
, insn
);
1310 fmt
= GET_RTX_FORMAT (code
);
1311 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1313 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1316 else if (fmt
[i
] == 'E')
1317 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1318 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1325 /* Helper function for set_of. */
1333 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1335 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1336 if (rtx_equal_p (x
, data
->pat
)
1337 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1341 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1342 (either directly or via STRICT_LOW_PART and similar modifiers). */
1344 set_of (const_rtx pat
, const_rtx insn
)
1346 struct set_of_data data
;
1347 data
.found
= NULL_RTX
;
1349 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1353 /* Add all hard register in X to *PSET. */
1355 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1357 subrtx_iterator::array_type array
;
1358 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1360 const_rtx x
= *iter
;
1361 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1362 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1366 /* This function, called through note_stores, collects sets and
1367 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1370 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1372 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1373 if (REG_P (x
) && HARD_REGISTER_P (x
))
1374 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1377 /* Examine INSN, and compute the set of hard registers written by it.
1378 Store it in *PSET. Should only be called after reload. */
1380 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1384 CLEAR_HARD_REG_SET (*pset
);
1385 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1389 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1391 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1392 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1394 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1395 if (REG_NOTE_KIND (link
) == REG_INC
)
1396 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1399 /* Like record_hard_reg_sets, but called through note_uses. */
1401 record_hard_reg_uses (rtx
*px
, void *data
)
1403 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1406 /* Given an INSN, return a SET expression if this insn has only a single SET.
1407 It may also have CLOBBERs, USEs, or SET whose output
1408 will not be used, which we ignore. */
1411 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1414 int set_verified
= 1;
1417 if (GET_CODE (pat
) == PARALLEL
)
1419 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1421 rtx sub
= XVECEXP (pat
, 0, i
);
1422 switch (GET_CODE (sub
))
1429 /* We can consider insns having multiple sets, where all
1430 but one are dead as single set insns. In common case
1431 only single set is present in the pattern so we want
1432 to avoid checking for REG_UNUSED notes unless necessary.
1434 When we reach set first time, we just expect this is
1435 the single set we are looking for and only when more
1436 sets are found in the insn, we check them. */
1439 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1440 && !side_effects_p (set
))
1446 set
= sub
, set_verified
= 0;
1447 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1448 || side_effects_p (sub
))
1460 /* Given an INSN, return nonzero if it has more than one SET, else return
1464 multiple_sets (const_rtx insn
)
1469 /* INSN must be an insn. */
1470 if (! INSN_P (insn
))
1473 /* Only a PARALLEL can have multiple SETs. */
1474 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1476 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1477 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1479 /* If we have already found a SET, then return now. */
1487 /* Either zero or one SET. */
1491 /* Return nonzero if the destination of SET equals the source
1492 and there are no side effects. */
1495 set_noop_p (const_rtx set
)
1497 rtx src
= SET_SRC (set
);
1498 rtx dst
= SET_DEST (set
);
1500 if (dst
== pc_rtx
&& src
== pc_rtx
)
1503 if (MEM_P (dst
) && MEM_P (src
))
1504 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1506 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1507 return rtx_equal_p (XEXP (dst
, 0), src
)
1508 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1509 && !side_effects_p (src
);
1511 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1512 dst
= XEXP (dst
, 0);
1514 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1516 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1518 src
= SUBREG_REG (src
);
1519 dst
= SUBREG_REG (dst
);
1522 /* It is a NOOP if destination overlaps with selected src vector
1524 if (GET_CODE (src
) == VEC_SELECT
1525 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1526 && HARD_REGISTER_P (XEXP (src
, 0))
1527 && HARD_REGISTER_P (dst
))
1530 rtx par
= XEXP (src
, 1);
1531 rtx src0
= XEXP (src
, 0);
1532 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1533 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1535 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1536 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1539 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1540 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1543 return (REG_P (src
) && REG_P (dst
)
1544 && REGNO (src
) == REGNO (dst
));
1547 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1551 noop_move_p (const rtx_insn
*insn
)
1553 rtx pat
= PATTERN (insn
);
1555 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1558 /* Insns carrying these notes are useful later on. */
1559 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1562 /* Check the code to be executed for COND_EXEC. */
1563 if (GET_CODE (pat
) == COND_EXEC
)
1564 pat
= COND_EXEC_CODE (pat
);
1566 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1569 if (GET_CODE (pat
) == PARALLEL
)
1572 /* If nothing but SETs of registers to themselves,
1573 this insn can also be deleted. */
1574 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1576 rtx tem
= XVECEXP (pat
, 0, i
);
1578 if (GET_CODE (tem
) == USE
1579 || GET_CODE (tem
) == CLOBBER
)
1582 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1592 /* Return nonzero if register in range [REGNO, ENDREGNO)
1593 appears either explicitly or implicitly in X
1594 other than being stored into.
1596 References contained within the substructure at LOC do not count.
1597 LOC may be zero, meaning don't ignore anything. */
1600 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1604 unsigned int x_regno
;
1609 /* The contents of a REG_NONNEG note is always zero, so we must come here
1610 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1614 code
= GET_CODE (x
);
1619 x_regno
= REGNO (x
);
1621 /* If we modifying the stack, frame, or argument pointer, it will
1622 clobber a virtual register. In fact, we could be more precise,
1623 but it isn't worth it. */
1624 if ((x_regno
== STACK_POINTER_REGNUM
1625 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1626 && x_regno
== ARG_POINTER_REGNUM
)
1627 || x_regno
== FRAME_POINTER_REGNUM
)
1628 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1631 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1634 /* If this is a SUBREG of a hard reg, we can see exactly which
1635 registers are being modified. Otherwise, handle normally. */
1636 if (REG_P (SUBREG_REG (x
))
1637 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1639 unsigned int inner_regno
= subreg_regno (x
);
1640 unsigned int inner_endregno
1641 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1642 ? subreg_nregs (x
) : 1);
1644 return endregno
> inner_regno
&& regno
< inner_endregno
;
1650 if (&SET_DEST (x
) != loc
1651 /* Note setting a SUBREG counts as referring to the REG it is in for
1652 a pseudo but not for hard registers since we can
1653 treat each word individually. */
1654 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1655 && loc
!= &SUBREG_REG (SET_DEST (x
))
1656 && REG_P (SUBREG_REG (SET_DEST (x
)))
1657 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1658 && refers_to_regno_p (regno
, endregno
,
1659 SUBREG_REG (SET_DEST (x
)), loc
))
1660 || (!REG_P (SET_DEST (x
))
1661 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1664 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1673 /* X does not match, so try its subexpressions. */
1675 fmt
= GET_RTX_FORMAT (code
);
1676 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1678 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1686 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1689 else if (fmt
[i
] == 'E')
1692 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1693 if (loc
!= &XVECEXP (x
, i
, j
)
1694 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1701 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1702 we check if any register number in X conflicts with the relevant register
1703 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1704 contains a MEM (we don't bother checking for memory addresses that can't
1705 conflict because we expect this to be a rare case. */
1708 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1710 unsigned int regno
, endregno
;
1712 /* If either argument is a constant, then modifying X can not
1713 affect IN. Here we look at IN, we can profitably combine
1714 CONSTANT_P (x) with the switch statement below. */
1715 if (CONSTANT_P (in
))
1719 switch (GET_CODE (x
))
1721 case STRICT_LOW_PART
:
1724 /* Overly conservative. */
1729 regno
= REGNO (SUBREG_REG (x
));
1730 if (regno
< FIRST_PSEUDO_REGISTER
)
1731 regno
= subreg_regno (x
);
1732 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1733 ? subreg_nregs (x
) : 1);
1738 endregno
= END_REGNO (x
);
1740 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1750 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1751 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1754 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1757 else if (fmt
[i
] == 'E')
1760 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1761 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1771 return reg_mentioned_p (x
, in
);
1777 /* If any register in here refers to it we return true. */
1778 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1779 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1780 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1786 gcc_assert (CONSTANT_P (x
));
1791 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1792 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1793 ignored by note_stores, but passed to FUN.
1795 FUN receives three arguments:
1796 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1797 2. the SET or CLOBBER rtx that does the store,
1798 3. the pointer DATA provided to note_stores.
1800 If the item being stored in or clobbered is a SUBREG of a hard register,
1801 the SUBREG will be passed. */
1804 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1808 if (GET_CODE (x
) == COND_EXEC
)
1809 x
= COND_EXEC_CODE (x
);
1811 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1813 rtx dest
= SET_DEST (x
);
1815 while ((GET_CODE (dest
) == SUBREG
1816 && (!REG_P (SUBREG_REG (dest
))
1817 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1818 || GET_CODE (dest
) == ZERO_EXTRACT
1819 || GET_CODE (dest
) == STRICT_LOW_PART
)
1820 dest
= XEXP (dest
, 0);
1822 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1823 each of whose first operand is a register. */
1824 if (GET_CODE (dest
) == PARALLEL
)
1826 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1827 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1828 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1831 (*fun
) (dest
, x
, data
);
1834 else if (GET_CODE (x
) == PARALLEL
)
1835 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1836 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1839 /* Like notes_stores, but call FUN for each expression that is being
1840 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1841 FUN for each expression, not any interior subexpressions. FUN receives a
1842 pointer to the expression and the DATA passed to this function.
1844 Note that this is not quite the same test as that done in reg_referenced_p
1845 since that considers something as being referenced if it is being
1846 partially set, while we do not. */
1849 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1854 switch (GET_CODE (body
))
1857 (*fun
) (&COND_EXEC_TEST (body
), data
);
1858 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1862 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1863 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1867 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1868 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1872 (*fun
) (&XEXP (body
, 0), data
);
1876 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1877 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1881 (*fun
) (&TRAP_CONDITION (body
), data
);
1885 (*fun
) (&XEXP (body
, 0), data
);
1889 case UNSPEC_VOLATILE
:
1890 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1891 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1895 if (MEM_P (XEXP (body
, 0)))
1896 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1901 rtx dest
= SET_DEST (body
);
1903 /* For sets we replace everything in source plus registers in memory
1904 expression in store and operands of a ZERO_EXTRACT. */
1905 (*fun
) (&SET_SRC (body
), data
);
1907 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1909 (*fun
) (&XEXP (dest
, 1), data
);
1910 (*fun
) (&XEXP (dest
, 2), data
);
1913 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1914 dest
= XEXP (dest
, 0);
1917 (*fun
) (&XEXP (dest
, 0), data
);
1922 /* All the other possibilities never store. */
1923 (*fun
) (pbody
, data
);
1928 /* Return nonzero if X's old contents don't survive after INSN.
1929 This will be true if X is (cc0) or if X is a register and
1930 X dies in INSN or because INSN entirely sets X.
1932 "Entirely set" means set directly and not through a SUBREG, or
1933 ZERO_EXTRACT, so no trace of the old contents remains.
1934 Likewise, REG_INC does not count.
1936 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1937 but for this use that makes no difference, since regs don't overlap
1938 during their lifetimes. Therefore, this function may be used
1939 at any time after deaths have been computed.
1941 If REG is a hard reg that occupies multiple machine registers, this
1942 function will only return 1 if each of those registers will be replaced
1946 dead_or_set_p (const_rtx insn
, const_rtx x
)
1948 unsigned int regno
, end_regno
;
1951 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1952 if (GET_CODE (x
) == CC0
)
1955 gcc_assert (REG_P (x
));
1958 end_regno
= END_REGNO (x
);
1959 for (i
= regno
; i
< end_regno
; i
++)
1960 if (! dead_or_set_regno_p (insn
, i
))
1966 /* Return TRUE iff DEST is a register or subreg of a register and
1967 doesn't change the number of words of the inner register, and any
1968 part of the register is TEST_REGNO. */
1971 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1973 unsigned int regno
, endregno
;
1975 if (GET_CODE (dest
) == SUBREG
1976 && (((GET_MODE_SIZE (GET_MODE (dest
))
1977 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1978 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1979 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1980 dest
= SUBREG_REG (dest
);
1985 regno
= REGNO (dest
);
1986 endregno
= END_REGNO (dest
);
1987 return (test_regno
>= regno
&& test_regno
< endregno
);
1990 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1991 any member matches the covers_regno_no_parallel_p criteria. */
1994 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1996 if (GET_CODE (dest
) == PARALLEL
)
1998 /* Some targets place small structures in registers for return
1999 values of functions, and those registers are wrapped in
2000 PARALLELs that we may see as the destination of a SET. */
2003 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2005 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2006 if (inner
!= NULL_RTX
2007 && covers_regno_no_parallel_p (inner
, test_regno
))
2014 return covers_regno_no_parallel_p (dest
, test_regno
);
2017 /* Utility function for dead_or_set_p to check an individual register. */
2020 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
2024 /* See if there is a death note for something that includes TEST_REGNO. */
2025 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2029 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2032 pattern
= PATTERN (insn
);
2034 /* If a COND_EXEC is not executed, the value survives. */
2035 if (GET_CODE (pattern
) == COND_EXEC
)
2038 if (GET_CODE (pattern
) == SET
)
2039 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2040 else if (GET_CODE (pattern
) == PARALLEL
)
2044 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2046 rtx body
= XVECEXP (pattern
, 0, i
);
2048 if (GET_CODE (body
) == COND_EXEC
)
2049 body
= COND_EXEC_CODE (body
);
2051 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2052 && covers_regno_p (SET_DEST (body
), test_regno
))
2060 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2061 If DATUM is nonzero, look for one whose datum is DATUM. */
2064 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2068 gcc_checking_assert (insn
);
2070 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2071 if (! INSN_P (insn
))
2075 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2076 if (REG_NOTE_KIND (link
) == kind
)
2081 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2082 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2087 /* Return the reg-note of kind KIND in insn INSN which applies to register
2088 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2089 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2090 it might be the case that the note overlaps REGNO. */
2093 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2097 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2098 if (! INSN_P (insn
))
2101 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2102 if (REG_NOTE_KIND (link
) == kind
2103 /* Verify that it is a register, so that scratch and MEM won't cause a
2105 && REG_P (XEXP (link
, 0))
2106 && REGNO (XEXP (link
, 0)) <= regno
2107 && END_REGNO (XEXP (link
, 0)) > regno
)
2112 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2116 find_reg_equal_equiv_note (const_rtx insn
)
2123 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2124 if (REG_NOTE_KIND (link
) == REG_EQUAL
2125 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2127 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2128 insns that have multiple sets. Checking single_set to
2129 make sure of this is not the proper check, as explained
2130 in the comment in set_unique_reg_note.
2132 This should be changed into an assert. */
2133 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2140 /* Check whether INSN is a single_set whose source is known to be
2141 equivalent to a constant. Return that constant if so, otherwise
2145 find_constant_src (const rtx_insn
*insn
)
2149 set
= single_set (insn
);
2152 x
= avoid_constant_pool_reference (SET_SRC (set
));
2157 note
= find_reg_equal_equiv_note (insn
);
2158 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2159 return XEXP (note
, 0);
2164 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2165 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2168 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2170 /* If it's not a CALL_INSN, it can't possibly have a
2171 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2181 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2183 link
= XEXP (link
, 1))
2184 if (GET_CODE (XEXP (link
, 0)) == code
2185 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2190 unsigned int regno
= REGNO (datum
);
2192 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2193 to pseudo registers, so don't bother checking. */
2195 if (regno
< FIRST_PSEUDO_REGISTER
)
2197 unsigned int end_regno
= END_REGNO (datum
);
2200 for (i
= regno
; i
< end_regno
; i
++)
2201 if (find_regno_fusage (insn
, code
, i
))
2209 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2210 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2213 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2217 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2218 to pseudo registers, so don't bother checking. */
2220 if (regno
>= FIRST_PSEUDO_REGISTER
2224 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2228 if (GET_CODE (op
= XEXP (link
, 0)) == code
2229 && REG_P (reg
= XEXP (op
, 0))
2230 && REGNO (reg
) <= regno
2231 && END_REGNO (reg
) > regno
)
2239 /* Return true if KIND is an integer REG_NOTE. */
2242 int_reg_note_p (enum reg_note kind
)
2244 return kind
== REG_BR_PROB
;
2247 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2248 stored as the pointer to the next register note. */
2251 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2255 gcc_checking_assert (!int_reg_note_p (kind
));
2260 case REG_LABEL_TARGET
:
2261 case REG_LABEL_OPERAND
:
2263 /* These types of register notes use an INSN_LIST rather than an
2264 EXPR_LIST, so that copying is done right and dumps look
2266 note
= alloc_INSN_LIST (datum
, list
);
2267 PUT_REG_NOTE_KIND (note
, kind
);
2271 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2278 /* Add register note with kind KIND and datum DATUM to INSN. */
2281 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2283 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2286 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2289 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2291 gcc_checking_assert (int_reg_note_p (kind
));
2292 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2293 datum
, REG_NOTES (insn
));
2296 /* Add a register note like NOTE to INSN. */
2299 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2301 if (GET_CODE (note
) == INT_LIST
)
2302 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2304 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2307 /* Remove register note NOTE from the REG_NOTES of INSN. */
2310 remove_note (rtx insn
, const_rtx note
)
2314 if (note
== NULL_RTX
)
2317 if (REG_NOTES (insn
) == note
)
2318 REG_NOTES (insn
) = XEXP (note
, 1);
2320 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2321 if (XEXP (link
, 1) == note
)
2323 XEXP (link
, 1) = XEXP (note
, 1);
2327 switch (REG_NOTE_KIND (note
))
2331 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2338 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2341 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2345 loc
= ®_NOTES (insn
);
2348 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2349 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2350 *loc
= XEXP (*loc
, 1);
2352 loc
= &XEXP (*loc
, 1);
2356 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2359 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2366 /* This loop is a little tricky. We cannot just go down the chain because
2367 it is being modified by some actions in the loop. So we just iterate
2368 over the head. We plan to drain the list anyway. */
2369 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2371 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2372 rtx note
= find_reg_equal_equiv_note (insn
);
2374 /* This assert is generally triggered when someone deletes a REG_EQUAL
2375 or REG_EQUIV note by hacking the list manually rather than calling
2379 remove_note (insn
, note
);
2383 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2384 return 1 if it is found. A simple equality test is used to determine if
2388 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2392 for (x
= listp
; x
; x
= XEXP (x
, 1))
2393 if (node
== XEXP (x
, 0))
2399 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2400 remove that entry from the list if it is found.
2402 A simple equality test is used to determine if NODE matches. */
2405 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2407 rtx_expr_list
*temp
= *listp
;
2408 rtx_expr_list
*prev
= NULL
;
2412 if (node
== temp
->element ())
2414 /* Splice the node out of the list. */
2416 XEXP (prev
, 1) = temp
->next ();
2418 *listp
= temp
->next ();
2424 temp
= temp
->next ();
2428 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2429 remove that entry from the list if it is found.
2431 A simple equality test is used to determine if NODE matches. */
2434 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2436 rtx_insn_list
*temp
= *listp
;
2437 rtx_insn_list
*prev
= NULL
;
2441 if (node
== temp
->insn ())
2443 /* Splice the node out of the list. */
2445 XEXP (prev
, 1) = temp
->next ();
2447 *listp
= temp
->next ();
2453 temp
= temp
->next ();
2457 /* Nonzero if X contains any volatile instructions. These are instructions
2458 which may cause unpredictable machine state instructions, and thus no
2459 instructions or register uses should be moved or combined across them.
2460 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2463 volatile_insn_p (const_rtx x
)
2465 const RTX_CODE code
= GET_CODE (x
);
2483 case UNSPEC_VOLATILE
:
2488 if (MEM_VOLATILE_P (x
))
2495 /* Recursively scan the operands of this expression. */
2498 const char *const fmt
= GET_RTX_FORMAT (code
);
2501 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2505 if (volatile_insn_p (XEXP (x
, i
)))
2508 else if (fmt
[i
] == 'E')
2511 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2512 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2520 /* Nonzero if X contains any volatile memory references
2521 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2524 volatile_refs_p (const_rtx x
)
2526 const RTX_CODE code
= GET_CODE (x
);
2542 case UNSPEC_VOLATILE
:
2548 if (MEM_VOLATILE_P (x
))
2555 /* Recursively scan the operands of this expression. */
2558 const char *const fmt
= GET_RTX_FORMAT (code
);
2561 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2565 if (volatile_refs_p (XEXP (x
, i
)))
2568 else if (fmt
[i
] == 'E')
2571 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2572 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2580 /* Similar to above, except that it also rejects register pre- and post-
2584 side_effects_p (const_rtx x
)
2586 const RTX_CODE code
= GET_CODE (x
);
2603 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2604 when some combination can't be done. If we see one, don't think
2605 that we can simplify the expression. */
2606 return (GET_MODE (x
) != VOIDmode
);
2615 case UNSPEC_VOLATILE
:
2621 if (MEM_VOLATILE_P (x
))
2628 /* Recursively scan the operands of this expression. */
2631 const char *fmt
= GET_RTX_FORMAT (code
);
2634 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2638 if (side_effects_p (XEXP (x
, i
)))
2641 else if (fmt
[i
] == 'E')
2644 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2645 if (side_effects_p (XVECEXP (x
, i
, j
)))
2653 /* Return nonzero if evaluating rtx X might cause a trap.
2654 FLAGS controls how to consider MEMs. A nonzero means the context
2655 of the access may have changed from the original, such that the
2656 address may have become invalid. */
2659 may_trap_p_1 (const_rtx x
, unsigned flags
)
2665 /* We make no distinction currently, but this function is part of
2666 the internal target-hooks ABI so we keep the parameter as
2667 "unsigned flags". */
2668 bool code_changed
= flags
!= 0;
2672 code
= GET_CODE (x
);
2675 /* Handle these cases quickly. */
2687 return targetm
.unspec_may_trap_p (x
, flags
);
2689 case UNSPEC_VOLATILE
:
2695 return MEM_VOLATILE_P (x
);
2697 /* Memory ref can trap unless it's a static var or a stack slot. */
2699 /* Recognize specific pattern of stack checking probes. */
2700 if (flag_stack_check
2701 && MEM_VOLATILE_P (x
)
2702 && XEXP (x
, 0) == stack_pointer_rtx
)
2704 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2705 reference; moving it out of context such as when moving code
2706 when optimizing, might cause its address to become invalid. */
2708 || !MEM_NOTRAP_P (x
))
2710 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2711 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2712 GET_MODE (x
), code_changed
);
2717 /* Division by a non-constant might trap. */
2722 if (HONOR_SNANS (x
))
2724 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2725 return flag_trapping_math
;
2726 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2731 /* An EXPR_LIST is used to represent a function call. This
2732 certainly may trap. */
2741 /* Some floating point comparisons may trap. */
2742 if (!flag_trapping_math
)
2744 /* ??? There is no machine independent way to check for tests that trap
2745 when COMPARE is used, though many targets do make this distinction.
2746 For instance, sparc uses CCFPE for compares which generate exceptions
2747 and CCFP for compares which do not generate exceptions. */
2750 /* But often the compare has some CC mode, so check operand
2752 if (HONOR_NANS (XEXP (x
, 0))
2753 || HONOR_NANS (XEXP (x
, 1)))
2759 if (HONOR_SNANS (x
))
2761 /* Often comparison is CC mode, so check operand modes. */
2762 if (HONOR_SNANS (XEXP (x
, 0))
2763 || HONOR_SNANS (XEXP (x
, 1)))
2768 /* Conversion of floating point might trap. */
2769 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2776 /* These operations don't trap even with floating point. */
2780 /* Any floating arithmetic may trap. */
2781 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2785 fmt
= GET_RTX_FORMAT (code
);
2786 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2790 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2793 else if (fmt
[i
] == 'E')
2796 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2797 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2804 /* Return nonzero if evaluating rtx X might cause a trap. */
2807 may_trap_p (const_rtx x
)
2809 return may_trap_p_1 (x
, 0);
2812 /* Same as above, but additionally return nonzero if evaluating rtx X might
2813 cause a fault. We define a fault for the purpose of this function as a
2814 erroneous execution condition that cannot be encountered during the normal
2815 execution of a valid program; the typical example is an unaligned memory
2816 access on a strict alignment machine. The compiler guarantees that it
2817 doesn't generate code that will fault from a valid program, but this
2818 guarantee doesn't mean anything for individual instructions. Consider
2819 the following example:
2821 struct S { int d; union { char *cp; int *ip; }; };
2823 int foo(struct S *s)
2831 on a strict alignment machine. In a valid program, foo will never be
2832 invoked on a structure for which d is equal to 1 and the underlying
2833 unique field of the union not aligned on a 4-byte boundary, but the
2834 expression *s->ip might cause a fault if considered individually.
2836 At the RTL level, potentially problematic expressions will almost always
2837 verify may_trap_p; for example, the above dereference can be emitted as
2838 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2839 However, suppose that foo is inlined in a caller that causes s->cp to
2840 point to a local character variable and guarantees that s->d is not set
2841 to 1; foo may have been effectively translated into pseudo-RTL as:
2844 (set (reg:SI) (mem:SI (%fp - 7)))
2846 (set (reg:QI) (mem:QI (%fp - 7)))
2848 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2849 memory reference to a stack slot, but it will certainly cause a fault
2850 on a strict alignment machine. */
2853 may_trap_or_fault_p (const_rtx x
)
2855 return may_trap_p_1 (x
, 1);
2858 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2859 i.e., an inequality. */
2862 inequality_comparisons_p (const_rtx x
)
2866 const enum rtx_code code
= GET_CODE (x
);
2894 len
= GET_RTX_LENGTH (code
);
2895 fmt
= GET_RTX_FORMAT (code
);
2897 for (i
= 0; i
< len
; i
++)
2901 if (inequality_comparisons_p (XEXP (x
, i
)))
2904 else if (fmt
[i
] == 'E')
2907 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2908 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2916 /* Replace any occurrence of FROM in X with TO. The function does
2917 not enter into CONST_DOUBLE for the replace.
2919 Note that copying is not done so X must not be shared unless all copies
2922 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2923 those pointer-equal ones. */
2926 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
2934 /* Allow this function to make replacements in EXPR_LISTs. */
2941 && REGNO (x
) == REGNO (from
))
2943 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
2946 else if (GET_CODE (x
) == SUBREG
)
2948 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
2950 if (CONST_INT_P (new_rtx
))
2952 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2953 GET_MODE (SUBREG_REG (x
)),
2958 SUBREG_REG (x
) = new_rtx
;
2962 else if (GET_CODE (x
) == ZERO_EXTEND
)
2964 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
2966 if (CONST_INT_P (new_rtx
))
2968 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2969 new_rtx
, GET_MODE (XEXP (x
, 0)));
2973 XEXP (x
, 0) = new_rtx
;
2978 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2979 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2982 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
2983 else if (fmt
[i
] == 'E')
2984 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2985 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
2986 from
, to
, all_regs
);
2992 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2993 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2996 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2998 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3000 if (JUMP_TABLE_DATA_P (x
))
3003 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3004 int len
= GET_NUM_ELEM (vec
);
3005 for (int i
= 0; i
< len
; ++i
)
3007 rtx ref
= RTVEC_ELT (vec
, i
);
3008 if (XEXP (ref
, 0) == old_label
)
3010 XEXP (ref
, 0) = new_label
;
3011 if (update_label_nuses
)
3013 ++LABEL_NUSES (new_label
);
3014 --LABEL_NUSES (old_label
);
3021 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3022 field. This is not handled by the iterator because it doesn't
3023 handle unprinted ('0') fields. */
3024 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3025 JUMP_LABEL (x
) = new_label
;
3027 subrtx_ptr_iterator::array_type array
;
3028 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3033 if (GET_CODE (x
) == SYMBOL_REF
3034 && CONSTANT_POOL_ADDRESS_P (x
))
3036 rtx c
= get_pool_constant (x
);
3037 if (rtx_referenced_p (old_label
, c
))
3039 /* Create a copy of constant C; replace the label inside
3040 but do not update LABEL_NUSES because uses in constant pool
3042 rtx new_c
= copy_rtx (c
);
3043 replace_label (&new_c
, old_label
, new_label
, false);
3045 /* Add the new constant NEW_C to constant pool and replace
3046 the old reference to constant by new reference. */
3047 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3048 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3052 if ((GET_CODE (x
) == LABEL_REF
3053 || GET_CODE (x
) == INSN_LIST
)
3054 && XEXP (x
, 0) == old_label
)
3056 XEXP (x
, 0) = new_label
;
3057 if (update_label_nuses
)
3059 ++LABEL_NUSES (new_label
);
3060 --LABEL_NUSES (old_label
);
3068 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
3069 bool update_label_nuses
)
3071 rtx insn_as_rtx
= insn
;
3072 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3073 gcc_checking_assert (insn_as_rtx
== insn
);
3076 /* Return true if X is referenced in BODY. */
3079 rtx_referenced_p (const_rtx x
, const_rtx body
)
3081 subrtx_iterator::array_type array
;
3082 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3083 if (const_rtx y
= *iter
)
3085 /* Check if a label_ref Y refers to label X. */
3086 if (GET_CODE (y
) == LABEL_REF
3088 && LABEL_REF_LABEL (y
) == x
)
3091 if (rtx_equal_p (x
, y
))
3094 /* If Y is a reference to pool constant traverse the constant. */
3095 if (GET_CODE (y
) == SYMBOL_REF
3096 && CONSTANT_POOL_ADDRESS_P (y
))
3097 iter
.substitute (get_pool_constant (y
));
3102 /* If INSN is a tablejump return true and store the label (before jump table) to
3103 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3106 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
3114 label
= JUMP_LABEL (insn
);
3115 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
3116 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
3117 && JUMP_TABLE_DATA_P (table
))
3122 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3128 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3129 constant that is not in the constant pool and not in the condition
3130 of an IF_THEN_ELSE. */
3133 computed_jump_p_1 (const_rtx x
)
3135 const enum rtx_code code
= GET_CODE (x
);
3152 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3153 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3156 return (computed_jump_p_1 (XEXP (x
, 1))
3157 || computed_jump_p_1 (XEXP (x
, 2)));
3163 fmt
= GET_RTX_FORMAT (code
);
3164 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3167 && computed_jump_p_1 (XEXP (x
, i
)))
3170 else if (fmt
[i
] == 'E')
3171 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3172 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3179 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3181 Tablejumps and casesi insns are not considered indirect jumps;
3182 we can recognize them by a (use (label_ref)). */
3185 computed_jump_p (const rtx_insn
*insn
)
3190 rtx pat
= PATTERN (insn
);
3192 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3193 if (JUMP_LABEL (insn
) != NULL
)
3196 if (GET_CODE (pat
) == PARALLEL
)
3198 int len
= XVECLEN (pat
, 0);
3199 int has_use_labelref
= 0;
3201 for (i
= len
- 1; i
>= 0; i
--)
3202 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3203 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3206 has_use_labelref
= 1;
3210 if (! has_use_labelref
)
3211 for (i
= len
- 1; i
>= 0; i
--)
3212 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3213 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3214 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3217 else if (GET_CODE (pat
) == SET
3218 && SET_DEST (pat
) == pc_rtx
3219 && computed_jump_p_1 (SET_SRC (pat
)))
3227 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3228 the equivalent add insn and pass the result to FN, using DATA as the
3232 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3234 rtx x
= XEXP (mem
, 0);
3235 switch (GET_CODE (x
))
3240 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3241 rtx r1
= XEXP (x
, 0);
3242 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3243 return fn (mem
, x
, r1
, r1
, c
, data
);
3249 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3250 rtx r1
= XEXP (x
, 0);
3251 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3252 return fn (mem
, x
, r1
, r1
, c
, data
);
3258 rtx r1
= XEXP (x
, 0);
3259 rtx add
= XEXP (x
, 1);
3260 return fn (mem
, x
, r1
, add
, NULL
, data
);
3268 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3269 For each such autoinc operation found, call FN, passing it
3270 the innermost enclosing MEM, the operation itself, the RTX modified
3271 by the operation, two RTXs (the second may be NULL) that, once
3272 added, represent the value to be held by the modified RTX
3273 afterwards, and DATA. FN is to return 0 to continue the
3274 traversal or any other value to have it returned to the caller of
3275 for_each_inc_dec. */
3278 for_each_inc_dec (rtx x
,
3279 for_each_inc_dec_fn fn
,
3282 subrtx_var_iterator::array_type array
;
3283 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3288 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3290 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3293 iter
.skip_subrtxes ();
3300 /* Searches X for any reference to REGNO, returning the rtx of the
3301 reference found if any. Otherwise, returns NULL_RTX. */
3304 regno_use_in (unsigned int regno
, rtx x
)
3310 if (REG_P (x
) && REGNO (x
) == regno
)
3313 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3314 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3318 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3321 else if (fmt
[i
] == 'E')
3322 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3323 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3330 /* Return a value indicating whether OP, an operand of a commutative
3331 operation, is preferred as the first or second operand. The more
3332 positive the value, the stronger the preference for being the first
3336 commutative_operand_precedence (rtx op
)
3338 enum rtx_code code
= GET_CODE (op
);
3340 /* Constants always become the second operand. Prefer "nice" constants. */
3341 if (code
== CONST_INT
)
3343 if (code
== CONST_WIDE_INT
)
3345 if (code
== CONST_DOUBLE
)
3347 if (code
== CONST_FIXED
)
3349 op
= avoid_constant_pool_reference (op
);
3350 code
= GET_CODE (op
);
3352 switch (GET_RTX_CLASS (code
))
3355 if (code
== CONST_INT
)
3357 if (code
== CONST_WIDE_INT
)
3359 if (code
== CONST_DOUBLE
)
3361 if (code
== CONST_FIXED
)
3366 /* SUBREGs of objects should come second. */
3367 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3372 /* Complex expressions should be the first, so decrease priority
3373 of objects. Prefer pointer objects over non pointer objects. */
3374 if ((REG_P (op
) && REG_POINTER (op
))
3375 || (MEM_P (op
) && MEM_POINTER (op
)))
3379 case RTX_COMM_ARITH
:
3380 /* Prefer operands that are themselves commutative to be first.
3381 This helps to make things linear. In particular,
3382 (and (and (reg) (reg)) (not (reg))) is canonical. */
3386 /* If only one operand is a binary expression, it will be the first
3387 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3388 is canonical, although it will usually be further simplified. */
3392 /* Then prefer NEG and NOT. */
3393 if (code
== NEG
|| code
== NOT
)
3402 /* Return 1 iff it is necessary to swap operands of commutative operation
3403 in order to canonicalize expression. */
3406 swap_commutative_operands_p (rtx x
, rtx y
)
3408 return (commutative_operand_precedence (x
)
3409 < commutative_operand_precedence (y
));
3412 /* Return 1 if X is an autoincrement side effect and the register is
3413 not the stack pointer. */
3415 auto_inc_p (const_rtx x
)
3417 switch (GET_CODE (x
))
3425 /* There are no REG_INC notes for SP. */
3426 if (XEXP (x
, 0) != stack_pointer_rtx
)
3434 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3436 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3445 code
= GET_CODE (in
);
3446 fmt
= GET_RTX_FORMAT (code
);
3447 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3451 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3454 else if (fmt
[i
] == 'E')
3455 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3456 if (loc
== &XVECEXP (in
, i
, j
)
3457 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3463 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3464 and SUBREG_BYTE, return the bit offset where the subreg begins
3465 (counting from the least significant bit of the operand). */
3468 subreg_lsb_1 (machine_mode outer_mode
,
3469 machine_mode inner_mode
,
3470 unsigned int subreg_byte
)
3472 unsigned int bitpos
;
3476 /* A paradoxical subreg begins at bit position 0. */
3477 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3480 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3481 /* If the subreg crosses a word boundary ensure that
3482 it also begins and ends on a word boundary. */
3483 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3484 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3485 && (subreg_byte
% UNITS_PER_WORD
3486 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3488 if (WORDS_BIG_ENDIAN
)
3489 word
= (GET_MODE_SIZE (inner_mode
)
3490 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3492 word
= subreg_byte
/ UNITS_PER_WORD
;
3493 bitpos
= word
* BITS_PER_WORD
;
3495 if (BYTES_BIG_ENDIAN
)
3496 byte
= (GET_MODE_SIZE (inner_mode
)
3497 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3499 byte
= subreg_byte
% UNITS_PER_WORD
;
3500 bitpos
+= byte
* BITS_PER_UNIT
;
3505 /* Given a subreg X, return the bit offset where the subreg begins
3506 (counting from the least significant bit of the reg). */
3509 subreg_lsb (const_rtx x
)
3511 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3515 /* Fill in information about a subreg of a hard register.
3516 xregno - A regno of an inner hard subreg_reg (or what will become one).
3517 xmode - The mode of xregno.
3518 offset - The byte offset.
3519 ymode - The mode of a top level SUBREG (or what may become one).
3520 info - Pointer to structure to fill in.
3522 Rather than considering one particular inner register (and thus one
3523 particular "outer" register) in isolation, this function really uses
3524 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3525 function does not check whether adding INFO->offset to XREGNO gives
3526 a valid hard register; even if INFO->offset + XREGNO is out of range,
3527 there might be another register of the same type that is in range.
3528 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3529 register, since that can depend on things like whether the final
3530 register number is even or odd. Callers that want to check whether
3531 this particular subreg can be replaced by a simple (reg ...) should
3532 use simplify_subreg_regno. */
3535 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3536 unsigned int offset
, machine_mode ymode
,
3537 struct subreg_info
*info
)
3539 int nregs_xmode
, nregs_ymode
;
3540 int mode_multiple
, nregs_multiple
;
3541 int offset_adj
, y_offset
, y_offset_adj
;
3542 int regsize_xmode
, regsize_ymode
;
3545 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3549 /* If there are holes in a non-scalar mode in registers, we expect
3550 that it is made up of its units concatenated together. */
3551 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3553 machine_mode xmode_unit
;
3555 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3556 xmode_unit
= GET_MODE_INNER (xmode
);
3557 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3558 gcc_assert (nregs_xmode
3559 == (GET_MODE_NUNITS (xmode
)
3560 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3561 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3562 == (hard_regno_nregs
[xregno
][xmode_unit
]
3563 * GET_MODE_NUNITS (xmode
)));
3565 /* You can only ask for a SUBREG of a value with holes in the middle
3566 if you don't cross the holes. (Such a SUBREG should be done by
3567 picking a different register class, or doing it in memory if
3568 necessary.) An example of a value with holes is XCmode on 32-bit
3569 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3570 3 for each part, but in memory it's two 128-bit parts.
3571 Padding is assumed to be at the end (not necessarily the 'high part')
3573 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3574 < GET_MODE_NUNITS (xmode
))
3575 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3576 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3577 / GET_MODE_SIZE (xmode_unit
))))
3579 info
->representable_p
= false;
3584 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3586 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3588 /* Paradoxical subregs are otherwise valid. */
3591 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3593 info
->representable_p
= true;
3594 /* If this is a big endian paradoxical subreg, which uses more
3595 actual hard registers than the original register, we must
3596 return a negative offset so that we find the proper highpart
3598 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3599 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3600 info
->offset
= nregs_xmode
- nregs_ymode
;
3603 info
->nregs
= nregs_ymode
;
3607 /* If registers store different numbers of bits in the different
3608 modes, we cannot generally form this subreg. */
3609 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3610 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3611 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3612 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3614 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3615 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3616 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3618 info
->representable_p
= false;
3620 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3621 info
->offset
= offset
/ regsize_xmode
;
3624 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3626 info
->representable_p
= false;
3628 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3629 info
->offset
= offset
/ regsize_xmode
;
3632 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3633 would go outside of XMODE. */
3635 && GET_MODE_SIZE (ymode
) + offset
> GET_MODE_SIZE (xmode
))
3637 info
->representable_p
= false;
3638 info
->nregs
= nregs_ymode
;
3639 info
->offset
= offset
/ regsize_xmode
;
3642 /* Quick exit for the simple and common case of extracting whole
3643 subregisters from a multiregister value. */
3644 /* ??? It would be better to integrate this into the code below,
3645 if we can generalize the concept enough and figure out how
3646 odd-sized modes can coexist with the other weird cases we support. */
3648 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3649 && regsize_xmode
== regsize_ymode
3650 && (offset
% regsize_ymode
) == 0)
3652 info
->representable_p
= true;
3653 info
->nregs
= nregs_ymode
;
3654 info
->offset
= offset
/ regsize_ymode
;
3655 gcc_assert (info
->offset
+ info
->nregs
<= nregs_xmode
);
3660 /* Lowpart subregs are otherwise valid. */
3661 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3663 info
->representable_p
= true;
3666 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3669 info
->nregs
= nregs_ymode
;
3674 /* This should always pass, otherwise we don't know how to verify
3675 the constraint. These conditions may be relaxed but
3676 subreg_regno_offset would need to be redesigned. */
3677 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3678 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3680 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3681 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3683 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3684 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3685 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3686 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3687 offset
= (xsize
- ysize
- off_high
) | off_low
;
3689 /* The XMODE value can be seen as a vector of NREGS_XMODE
3690 values. The subreg must represent a lowpart of given field.
3691 Compute what field it is. */
3692 offset_adj
= offset
;
3693 offset_adj
-= subreg_lowpart_offset (ymode
,
3694 mode_for_size (GET_MODE_BITSIZE (xmode
)
3698 /* Size of ymode must not be greater than the size of xmode. */
3699 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3700 gcc_assert (mode_multiple
!= 0);
3702 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3703 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3704 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3706 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3707 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3711 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3714 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3715 info
->nregs
= nregs_ymode
;
3718 /* This function returns the regno offset of a subreg expression.
3719 xregno - A regno of an inner hard subreg_reg (or what will become one).
3720 xmode - The mode of xregno.
3721 offset - The byte offset.
3722 ymode - The mode of a top level SUBREG (or what may become one).
3723 RETURN - The regno offset which would be used. */
3725 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3726 unsigned int offset
, machine_mode ymode
)
3728 struct subreg_info info
;
3729 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3733 /* This function returns true when the offset is representable via
3734 subreg_offset in the given regno.
3735 xregno - A regno of an inner hard subreg_reg (or what will become one).
3736 xmode - The mode of xregno.
3737 offset - The byte offset.
3738 ymode - The mode of a top level SUBREG (or what may become one).
3739 RETURN - Whether the offset is representable. */
3741 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3742 unsigned int offset
, machine_mode ymode
)
3744 struct subreg_info info
;
3745 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3746 return info
.representable_p
;
3749 /* Return the number of a YMODE register to which
3751 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3753 can be simplified. Return -1 if the subreg can't be simplified.
3755 XREGNO is a hard register number. */
3758 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3759 unsigned int offset
, machine_mode ymode
)
3761 struct subreg_info info
;
3762 unsigned int yregno
;
3764 #ifdef CANNOT_CHANGE_MODE_CLASS
3765 /* Give the backend a chance to disallow the mode change. */
3766 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3767 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3768 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3769 /* We can use mode change in LRA for some transformations. */
3770 && ! lra_in_progress
)
3774 /* We shouldn't simplify stack-related registers. */
3775 if ((!reload_completed
|| frame_pointer_needed
)
3776 && xregno
== FRAME_POINTER_REGNUM
)
3779 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3780 && xregno
== ARG_POINTER_REGNUM
)
3783 if (xregno
== STACK_POINTER_REGNUM
3784 /* We should convert hard stack register in LRA if it is
3786 && ! lra_in_progress
)
3789 /* Try to get the register offset. */
3790 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3791 if (!info
.representable_p
)
3794 /* Make sure that the offsetted register value is in range. */
3795 yregno
= xregno
+ info
.offset
;
3796 if (!HARD_REGISTER_NUM_P (yregno
))
3799 /* See whether (reg:YMODE YREGNO) is valid.
3801 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3802 This is a kludge to work around how complex FP arguments are passed
3803 on IA-64 and should be fixed. See PR target/49226. */
3804 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3805 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3808 return (int) yregno
;
3811 /* Return the final regno that a subreg expression refers to. */
3813 subreg_regno (const_rtx x
)
3816 rtx subreg
= SUBREG_REG (x
);
3817 int regno
= REGNO (subreg
);
3819 ret
= regno
+ subreg_regno_offset (regno
,
3827 /* Return the number of registers that a subreg expression refers
3830 subreg_nregs (const_rtx x
)
3832 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3835 /* Return the number of registers that a subreg REG with REGNO
3836 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3837 changed so that the regno can be passed in. */
3840 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3842 struct subreg_info info
;
3843 rtx subreg
= SUBREG_REG (x
);
3845 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3851 struct parms_set_data
3857 /* Helper function for noticing stores to parameter registers. */
3859 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3861 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3862 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3863 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3865 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3870 /* Look backward for first parameter to be loaded.
3871 Note that loads of all parameters will not necessarily be
3872 found if CSE has eliminated some of them (e.g., an argument
3873 to the outer function is passed down as a parameter).
3874 Do not skip BOUNDARY. */
3876 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3878 struct parms_set_data parm
;
3880 rtx_insn
*before
, *first_set
;
3882 /* Since different machines initialize their parameter registers
3883 in different orders, assume nothing. Collect the set of all
3884 parameter registers. */
3885 CLEAR_HARD_REG_SET (parm
.regs
);
3887 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3888 if (GET_CODE (XEXP (p
, 0)) == USE
3889 && REG_P (XEXP (XEXP (p
, 0), 0))
3890 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
3892 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3894 /* We only care about registers which can hold function
3896 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3899 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3903 first_set
= call_insn
;
3905 /* Search backward for the first set of a register in this set. */
3906 while (parm
.nregs
&& before
!= boundary
)
3908 before
= PREV_INSN (before
);
3910 /* It is possible that some loads got CSEed from one call to
3911 another. Stop in that case. */
3912 if (CALL_P (before
))
3915 /* Our caller needs either ensure that we will find all sets
3916 (in case code has not been optimized yet), or take care
3917 for possible labels in a way by setting boundary to preceding
3919 if (LABEL_P (before
))
3921 gcc_assert (before
== boundary
);
3925 if (INSN_P (before
))
3927 int nregs_old
= parm
.nregs
;
3928 note_stores (PATTERN (before
), parms_set
, &parm
);
3929 /* If we found something that did not set a parameter reg,
3930 we're done. Do not keep going, as that might result
3931 in hoisting an insn before the setting of a pseudo
3932 that is used by the hoisted insn. */
3933 if (nregs_old
!= parm
.nregs
)
3942 /* Return true if we should avoid inserting code between INSN and preceding
3943 call instruction. */
3946 keep_with_call_p (const rtx_insn
*insn
)
3950 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3952 if (REG_P (SET_DEST (set
))
3953 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3954 && fixed_regs
[REGNO (SET_DEST (set
))]
3955 && general_operand (SET_SRC (set
), VOIDmode
))
3957 if (REG_P (SET_SRC (set
))
3958 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3959 && REG_P (SET_DEST (set
))
3960 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3962 /* There may be a stack pop just after the call and before the store
3963 of the return register. Search for the actual store when deciding
3964 if we can break or not. */
3965 if (SET_DEST (set
) == stack_pointer_rtx
)
3967 /* This CONST_CAST is okay because next_nonnote_insn just
3968 returns its argument and we assign it to a const_rtx
3971 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3972 if (i2
&& keep_with_call_p (i2
))
3979 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3980 to non-complex jumps. That is, direct unconditional, conditional,
3981 and tablejumps, but not computed jumps or returns. It also does
3982 not apply to the fallthru case of a conditional jump. */
3985 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3987 rtx tmp
= JUMP_LABEL (jump_insn
);
3988 rtx_jump_table_data
*table
;
3993 if (tablejump_p (jump_insn
, NULL
, &table
))
3995 rtvec vec
= table
->get_labels ();
3996 int i
, veclen
= GET_NUM_ELEM (vec
);
3998 for (i
= 0; i
< veclen
; ++i
)
3999 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4003 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4010 /* Return an estimate of the cost of computing rtx X.
4011 One use is in cse, to decide which expression to keep in the hash table.
4012 Another is in rtl generation, to pick the cheapest way to multiply.
4013 Other uses like the latter are expected in the future.
4015 X appears as operand OPNO in an expression with code OUTER_CODE.
4016 SPEED specifies whether costs optimized for speed or size should
4020 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4021 int opno
, bool speed
)
4032 if (GET_MODE (x
) != VOIDmode
)
4033 mode
= GET_MODE (x
);
4035 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4036 many insns, taking N times as long. */
4037 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4041 /* Compute the default costs of certain things.
4042 Note that targetm.rtx_costs can override the defaults. */
4044 code
= GET_CODE (x
);
4048 /* Multiplication has time-complexity O(N*N), where N is the
4049 number of units (translated from digits) when using
4050 schoolbook long multiplication. */
4051 total
= factor
* factor
* COSTS_N_INSNS (5);
4057 /* Similarly, complexity for schoolbook long division. */
4058 total
= factor
* factor
* COSTS_N_INSNS (7);
4061 /* Used in combine.c as a marker. */
4065 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4066 the mode for the factor. */
4067 mode
= GET_MODE (SET_DEST (x
));
4068 factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
4073 total
= factor
* COSTS_N_INSNS (1);
4083 /* If we can't tie these modes, make this expensive. The larger
4084 the mode, the more expensive it is. */
4085 if (! MODES_TIEABLE_P (mode
, GET_MODE (SUBREG_REG (x
))))
4086 return COSTS_N_INSNS (2 + factor
);
4090 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4095 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4096 which is already in total. */
4098 fmt
= GET_RTX_FORMAT (code
);
4099 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4101 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4102 else if (fmt
[i
] == 'E')
4103 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4104 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4109 /* Fill in the structure C with information about both speed and size rtx
4110 costs for X, which is operand OPNO in an expression with code OUTER. */
4113 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4114 struct full_rtx_costs
*c
)
4116 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4117 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4121 /* Return cost of address expression X.
4122 Expect that X is properly formed address reference.
4124 SPEED parameter specify whether costs optimized for speed or size should
4128 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4130 /* We may be asked for cost of various unusual addresses, such as operands
4131 of push instruction. It is not worthwhile to complicate writing
4132 of the target hook by such cases. */
4134 if (!memory_address_addr_space_p (mode
, x
, as
))
4137 return targetm
.address_cost (x
, mode
, as
, speed
);
4140 /* If the target doesn't override, compute the cost as with arithmetic. */
4143 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4145 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4149 unsigned HOST_WIDE_INT
4150 nonzero_bits (const_rtx x
, machine_mode mode
)
4152 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4156 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4158 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4161 /* Return true if nonzero_bits1 might recurse into both operands
4165 nonzero_bits_binary_arith_p (const_rtx x
)
4167 if (!ARITHMETIC_P (x
))
4169 switch (GET_CODE (x
))
4191 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4192 It avoids exponential behavior in nonzero_bits1 when X has
4193 identical subexpressions on the first or the second level. */
4195 static unsigned HOST_WIDE_INT
4196 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4197 machine_mode known_mode
,
4198 unsigned HOST_WIDE_INT known_ret
)
4200 if (x
== known_x
&& mode
== known_mode
)
4203 /* Try to find identical subexpressions. If found call
4204 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4205 precomputed value for the subexpression as KNOWN_RET. */
4207 if (nonzero_bits_binary_arith_p (x
))
4209 rtx x0
= XEXP (x
, 0);
4210 rtx x1
= XEXP (x
, 1);
4212 /* Check the first level. */
4214 return nonzero_bits1 (x
, mode
, x0
, mode
,
4215 cached_nonzero_bits (x0
, mode
, known_x
,
4216 known_mode
, known_ret
));
4218 /* Check the second level. */
4219 if (nonzero_bits_binary_arith_p (x0
)
4220 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4221 return nonzero_bits1 (x
, mode
, x1
, mode
,
4222 cached_nonzero_bits (x1
, mode
, known_x
,
4223 known_mode
, known_ret
));
4225 if (nonzero_bits_binary_arith_p (x1
)
4226 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4227 return nonzero_bits1 (x
, mode
, x0
, mode
,
4228 cached_nonzero_bits (x0
, mode
, known_x
,
4229 known_mode
, known_ret
));
4232 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4235 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4236 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4237 is less useful. We can't allow both, because that results in exponential
4238 run time recursion. There is a nullstone testcase that triggered
4239 this. This macro avoids accidental uses of num_sign_bit_copies. */
4240 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4242 /* Given an expression, X, compute which bits in X can be nonzero.
4243 We don't care about bits outside of those defined in MODE.
4245 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4246 an arithmetic operation, we can do better. */
4248 static unsigned HOST_WIDE_INT
4249 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4250 machine_mode known_mode
,
4251 unsigned HOST_WIDE_INT known_ret
)
4253 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4254 unsigned HOST_WIDE_INT inner_nz
;
4256 machine_mode inner_mode
;
4257 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4259 /* For floating-point and vector values, assume all bits are needed. */
4260 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4261 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4264 /* If X is wider than MODE, use its mode instead. */
4265 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4267 mode
= GET_MODE (x
);
4268 nonzero
= GET_MODE_MASK (mode
);
4269 mode_width
= GET_MODE_PRECISION (mode
);
4272 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4273 /* Our only callers in this case look for single bit values. So
4274 just return the mode mask. Those tests will then be false. */
4277 /* If MODE is wider than X, but both are a single word for both the host
4278 and target machines, we can compute this from which bits of the
4279 object might be nonzero in its own mode, taking into account the fact
4280 that on many CISC machines, accessing an object in a wider mode
4281 causes the high-order bits to become undefined. So they are
4282 not known to be zero. */
4284 if (!WORD_REGISTER_OPERATIONS
4285 && GET_MODE (x
) != VOIDmode
4286 && GET_MODE (x
) != mode
4287 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4288 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4289 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4291 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4292 known_x
, known_mode
, known_ret
);
4293 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4297 /* Please keep nonzero_bits_binary_arith_p above in sync with
4298 the code in the switch below. */
4299 code
= GET_CODE (x
);
4303 #if defined(POINTERS_EXTEND_UNSIGNED)
4304 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4305 all the bits above ptr_mode are known to be zero. */
4306 /* As we do not know which address space the pointer is referring to,
4307 we can do this only if the target does not support different pointer
4308 or address modes depending on the address space. */
4309 if (target_default_pointer_address_modes_p ()
4310 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4312 && !targetm
.have_ptr_extend ())
4313 nonzero
&= GET_MODE_MASK (ptr_mode
);
4316 /* Include declared information about alignment of pointers. */
4317 /* ??? We don't properly preserve REG_POINTER changes across
4318 pointer-to-integer casts, so we can't trust it except for
4319 things that we know must be pointers. See execute/960116-1.c. */
4320 if ((x
== stack_pointer_rtx
4321 || x
== frame_pointer_rtx
4322 || x
== arg_pointer_rtx
)
4323 && REGNO_POINTER_ALIGN (REGNO (x
)))
4325 unsigned HOST_WIDE_INT alignment
4326 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4328 #ifdef PUSH_ROUNDING
4329 /* If PUSH_ROUNDING is defined, it is possible for the
4330 stack to be momentarily aligned only to that amount,
4331 so we pick the least alignment. */
4332 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4333 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4337 nonzero
&= ~(alignment
- 1);
4341 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4342 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4343 known_mode
, known_ret
,
4347 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4348 known_mode
, known_ret
);
4350 return nonzero_for_hook
;
4354 /* If X is negative in MODE, sign-extend the value. */
4355 if (SHORT_IMMEDIATES_SIGN_EXTEND
&& INTVAL (x
) > 0
4356 && mode_width
< BITS_PER_WORD
4357 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
4359 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4364 #ifdef LOAD_EXTEND_OP
4365 /* In many, if not most, RISC machines, reading a byte from memory
4366 zeros the rest of the register. Noticing that fact saves a lot
4367 of extra zero-extends. */
4368 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4369 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4374 case UNEQ
: case LTGT
:
4375 case GT
: case GTU
: case UNGT
:
4376 case LT
: case LTU
: case UNLT
:
4377 case GE
: case GEU
: case UNGE
:
4378 case LE
: case LEU
: case UNLE
:
4379 case UNORDERED
: case ORDERED
:
4380 /* If this produces an integer result, we know which bits are set.
4381 Code here used to clear bits outside the mode of X, but that is
4383 /* Mind that MODE is the mode the caller wants to look at this
4384 operation in, and not the actual operation mode. We can wind
4385 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4386 that describes the results of a vector compare. */
4387 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4388 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4389 nonzero
= STORE_FLAG_VALUE
;
4394 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4395 and num_sign_bit_copies. */
4396 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4397 == GET_MODE_PRECISION (GET_MODE (x
)))
4401 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4402 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4407 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4408 and num_sign_bit_copies. */
4409 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4410 == GET_MODE_PRECISION (GET_MODE (x
)))
4416 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4417 known_x
, known_mode
, known_ret
)
4418 & GET_MODE_MASK (mode
));
4422 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4423 known_x
, known_mode
, known_ret
);
4424 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4425 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4429 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4430 Otherwise, show all the bits in the outer mode but not the inner
4432 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4433 known_x
, known_mode
, known_ret
);
4434 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4436 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4437 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4438 inner_nz
|= (GET_MODE_MASK (mode
)
4439 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4442 nonzero
&= inner_nz
;
4446 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4447 known_x
, known_mode
, known_ret
)
4448 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4449 known_x
, known_mode
, known_ret
);
4453 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4455 unsigned HOST_WIDE_INT nonzero0
4456 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4457 known_x
, known_mode
, known_ret
);
4459 /* Don't call nonzero_bits for the second time if it cannot change
4461 if ((nonzero
& nonzero0
) != nonzero
)
4463 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4464 known_x
, known_mode
, known_ret
);
4468 case PLUS
: case MINUS
:
4470 case DIV
: case UDIV
:
4471 case MOD
: case UMOD
:
4472 /* We can apply the rules of arithmetic to compute the number of
4473 high- and low-order zero bits of these operations. We start by
4474 computing the width (position of the highest-order nonzero bit)
4475 and the number of low-order zero bits for each value. */
4477 unsigned HOST_WIDE_INT nz0
4478 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4479 known_x
, known_mode
, known_ret
);
4480 unsigned HOST_WIDE_INT nz1
4481 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4482 known_x
, known_mode
, known_ret
);
4483 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4484 int width0
= floor_log2 (nz0
) + 1;
4485 int width1
= floor_log2 (nz1
) + 1;
4486 int low0
= ctz_or_zero (nz0
);
4487 int low1
= ctz_or_zero (nz1
);
4488 unsigned HOST_WIDE_INT op0_maybe_minusp
4489 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4490 unsigned HOST_WIDE_INT op1_maybe_minusp
4491 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4492 unsigned int result_width
= mode_width
;
4498 result_width
= MAX (width0
, width1
) + 1;
4499 result_low
= MIN (low0
, low1
);
4502 result_low
= MIN (low0
, low1
);
4505 result_width
= width0
+ width1
;
4506 result_low
= low0
+ low1
;
4511 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4512 result_width
= width0
;
4517 result_width
= width0
;
4522 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4523 result_width
= MIN (width0
, width1
);
4524 result_low
= MIN (low0
, low1
);
4529 result_width
= MIN (width0
, width1
);
4530 result_low
= MIN (low0
, low1
);
4536 if (result_width
< mode_width
)
4537 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4540 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4545 if (CONST_INT_P (XEXP (x
, 1))
4546 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4547 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4551 /* If this is a SUBREG formed for a promoted variable that has
4552 been zero-extended, we know that at least the high-order bits
4553 are zero, though others might be too. */
4555 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4556 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4557 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4558 known_x
, known_mode
, known_ret
);
4560 inner_mode
= GET_MODE (SUBREG_REG (x
));
4561 /* If the inner mode is a single word for both the host and target
4562 machines, we can compute this from which bits of the inner
4563 object might be nonzero. */
4564 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4565 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4567 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4568 known_x
, known_mode
, known_ret
);
4570 #ifdef LOAD_EXTEND_OP
4571 /* If this is a typical RISC machine, we only have to worry
4572 about the way loads are extended. */
4573 if (WORD_REGISTER_OPERATIONS
4574 && ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4575 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4576 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4577 || !MEM_P (SUBREG_REG (x
))))
4580 /* On many CISC machines, accessing an object in a wider mode
4581 causes the high-order bits to become undefined. So they are
4582 not known to be zero. */
4583 if (GET_MODE_PRECISION (GET_MODE (x
))
4584 > GET_MODE_PRECISION (inner_mode
))
4585 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4586 & ~GET_MODE_MASK (inner_mode
));
4595 /* The nonzero bits are in two classes: any bits within MODE
4596 that aren't in GET_MODE (x) are always significant. The rest of the
4597 nonzero bits are those that are significant in the operand of
4598 the shift when shifted the appropriate number of bits. This
4599 shows that high-order bits are cleared by the right shift and
4600 low-order bits by left shifts. */
4601 if (CONST_INT_P (XEXP (x
, 1))
4602 && INTVAL (XEXP (x
, 1)) >= 0
4603 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4604 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4606 machine_mode inner_mode
= GET_MODE (x
);
4607 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4608 int count
= INTVAL (XEXP (x
, 1));
4609 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4610 unsigned HOST_WIDE_INT op_nonzero
4611 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4612 known_x
, known_mode
, known_ret
);
4613 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4614 unsigned HOST_WIDE_INT outer
= 0;
4616 if (mode_width
> width
)
4617 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4619 if (code
== LSHIFTRT
)
4621 else if (code
== ASHIFTRT
)
4625 /* If the sign bit may have been nonzero before the shift, we
4626 need to mark all the places it could have been copied to
4627 by the shift as possibly nonzero. */
4628 if (inner
& (HOST_WIDE_INT_1U
<< (width
- 1 - count
)))
4629 inner
|= ((HOST_WIDE_INT_1U
<< count
) - 1)
4632 else if (code
== ASHIFT
)
4635 inner
= ((inner
<< (count
% width
)
4636 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4638 nonzero
&= (outer
| inner
);
4644 /* This is at most the number of bits in the mode. */
4645 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4649 /* If CLZ has a known value at zero, then the nonzero bits are
4650 that value, plus the number of bits in the mode minus one. */
4651 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4653 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4659 /* If CTZ has a known value at zero, then the nonzero bits are
4660 that value, plus the number of bits in the mode minus one. */
4661 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4663 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4669 /* This is at most the number of bits in the mode minus 1. */
4670 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4679 unsigned HOST_WIDE_INT nonzero_true
4680 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4681 known_x
, known_mode
, known_ret
);
4683 /* Don't call nonzero_bits for the second time if it cannot change
4685 if ((nonzero
& nonzero_true
) != nonzero
)
4686 nonzero
&= nonzero_true
4687 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4688 known_x
, known_mode
, known_ret
);
4699 /* See the macro definition above. */
4700 #undef cached_num_sign_bit_copies
4703 /* Return true if num_sign_bit_copies1 might recurse into both operands
4707 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4709 if (!ARITHMETIC_P (x
))
4711 switch (GET_CODE (x
))
4729 /* The function cached_num_sign_bit_copies is a wrapper around
4730 num_sign_bit_copies1. It avoids exponential behavior in
4731 num_sign_bit_copies1 when X has identical subexpressions on the
4732 first or the second level. */
4735 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4736 machine_mode known_mode
,
4737 unsigned int known_ret
)
4739 if (x
== known_x
&& mode
== known_mode
)
4742 /* Try to find identical subexpressions. If found call
4743 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4744 the precomputed value for the subexpression as KNOWN_RET. */
4746 if (num_sign_bit_copies_binary_arith_p (x
))
4748 rtx x0
= XEXP (x
, 0);
4749 rtx x1
= XEXP (x
, 1);
4751 /* Check the first level. */
4754 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4755 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4759 /* Check the second level. */
4760 if (num_sign_bit_copies_binary_arith_p (x0
)
4761 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4763 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4764 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4768 if (num_sign_bit_copies_binary_arith_p (x1
)
4769 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4771 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4772 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4777 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4780 /* Return the number of bits at the high-order end of X that are known to
4781 be equal to the sign bit. X will be used in mode MODE; if MODE is
4782 VOIDmode, X will be used in its own mode. The returned value will always
4783 be between 1 and the number of bits in MODE. */
4786 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4787 machine_mode known_mode
,
4788 unsigned int known_ret
)
4790 enum rtx_code code
= GET_CODE (x
);
4791 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4792 int num0
, num1
, result
;
4793 unsigned HOST_WIDE_INT nonzero
;
4795 /* If we weren't given a mode, use the mode of X. If the mode is still
4796 VOIDmode, we don't know anything. Likewise if one of the modes is
4799 if (mode
== VOIDmode
)
4800 mode
= GET_MODE (x
);
4802 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4803 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4806 /* For a smaller object, just ignore the high bits. */
4807 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4809 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4810 known_x
, known_mode
, known_ret
);
4812 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4815 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4817 /* If this machine does not do all register operations on the entire
4818 register and MODE is wider than the mode of X, we can say nothing
4819 at all about the high-order bits. */
4820 if (!WORD_REGISTER_OPERATIONS
)
4823 /* Likewise on machines that do, if the mode of the object is smaller
4824 than a word and loads of that size don't sign extend, we can say
4825 nothing about the high order bits. */
4826 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4827 #ifdef LOAD_EXTEND_OP
4828 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4834 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4835 the code in the switch below. */
4840 #if defined(POINTERS_EXTEND_UNSIGNED)
4841 /* If pointers extend signed and this is a pointer in Pmode, say that
4842 all the bits above ptr_mode are known to be sign bit copies. */
4843 /* As we do not know which address space the pointer is referring to,
4844 we can do this only if the target does not support different pointer
4845 or address modes depending on the address space. */
4846 if (target_default_pointer_address_modes_p ()
4847 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4848 && mode
== Pmode
&& REG_POINTER (x
)
4849 && !targetm
.have_ptr_extend ())
4850 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4854 unsigned int copies_for_hook
= 1, copies
= 1;
4855 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4856 known_mode
, known_ret
,
4860 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4861 known_mode
, known_ret
);
4863 if (copies
> 1 || copies_for_hook
> 1)
4864 return MAX (copies
, copies_for_hook
);
4866 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4871 #ifdef LOAD_EXTEND_OP
4872 /* Some RISC machines sign-extend all loads of smaller than a word. */
4873 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4874 return MAX (1, ((int) bitwidth
4875 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4880 /* If the constant is negative, take its 1's complement and remask.
4881 Then see how many zero bits we have. */
4882 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4883 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4884 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
4885 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4887 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4890 /* If this is a SUBREG for a promoted object that is sign-extended
4891 and we are looking at it in a wider mode, we know that at least the
4892 high-order bits are known to be sign bit copies. */
4894 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4896 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4897 known_x
, known_mode
, known_ret
);
4898 return MAX ((int) bitwidth
4899 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4903 /* For a smaller object, just ignore the high bits. */
4904 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4906 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4907 known_x
, known_mode
, known_ret
);
4908 return MAX (1, (num0
4909 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4913 #ifdef LOAD_EXTEND_OP
4914 /* For paradoxical SUBREGs on machines where all register operations
4915 affect the entire register, just look inside. Note that we are
4916 passing MODE to the recursive call, so the number of sign bit copies
4917 will remain relative to that mode, not the inner mode. */
4919 /* This works only if loads sign extend. Otherwise, if we get a
4920 reload for the inner part, it may be loaded from the stack, and
4921 then we lose all sign bit copies that existed before the store
4924 if (WORD_REGISTER_OPERATIONS
4925 && paradoxical_subreg_p (x
)
4926 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4927 && MEM_P (SUBREG_REG (x
)))
4928 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4929 known_x
, known_mode
, known_ret
);
4934 if (CONST_INT_P (XEXP (x
, 1)))
4935 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4939 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4940 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4941 known_x
, known_mode
, known_ret
));
4944 /* For a smaller object, just ignore the high bits. */
4945 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4946 known_x
, known_mode
, known_ret
);
4947 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4951 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4952 known_x
, known_mode
, known_ret
);
4954 case ROTATE
: case ROTATERT
:
4955 /* If we are rotating left by a number of bits less than the number
4956 of sign bit copies, we can just subtract that amount from the
4958 if (CONST_INT_P (XEXP (x
, 1))
4959 && INTVAL (XEXP (x
, 1)) >= 0
4960 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4962 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4963 known_x
, known_mode
, known_ret
);
4964 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4965 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4970 /* In general, this subtracts one sign bit copy. But if the value
4971 is known to be positive, the number of sign bit copies is the
4972 same as that of the input. Finally, if the input has just one bit
4973 that might be nonzero, all the bits are copies of the sign bit. */
4974 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4975 known_x
, known_mode
, known_ret
);
4976 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4977 return num0
> 1 ? num0
- 1 : 1;
4979 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4984 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
4989 case IOR
: case AND
: case XOR
:
4990 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4991 /* Logical operations will preserve the number of sign-bit copies.
4992 MIN and MAX operations always return one of the operands. */
4993 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4994 known_x
, known_mode
, known_ret
);
4995 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4996 known_x
, known_mode
, known_ret
);
4998 /* If num1 is clearing some of the top bits then regardless of
4999 the other term, we are guaranteed to have at least that many
5000 high-order zero bits. */
5003 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5004 && CONST_INT_P (XEXP (x
, 1))
5005 && (UINTVAL (XEXP (x
, 1))
5006 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5009 /* Similarly for IOR when setting high-order bits. */
5012 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5013 && CONST_INT_P (XEXP (x
, 1))
5014 && (UINTVAL (XEXP (x
, 1))
5015 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5018 return MIN (num0
, num1
);
5020 case PLUS
: case MINUS
:
5021 /* For addition and subtraction, we can have a 1-bit carry. However,
5022 if we are subtracting 1 from a positive number, there will not
5023 be such a carry. Furthermore, if the positive number is known to
5024 be 0 or 1, we know the result is either -1 or 0. */
5026 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5027 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5029 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5030 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5031 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5032 : bitwidth
- floor_log2 (nonzero
) - 1);
5035 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5036 known_x
, known_mode
, known_ret
);
5037 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5038 known_x
, known_mode
, known_ret
);
5039 result
= MAX (1, MIN (num0
, num1
) - 1);
5044 /* The number of bits of the product is the sum of the number of
5045 bits of both terms. However, unless one of the terms if known
5046 to be positive, we must allow for an additional bit since negating
5047 a negative number can remove one sign bit copy. */
5049 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5050 known_x
, known_mode
, known_ret
);
5051 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5052 known_x
, known_mode
, known_ret
);
5054 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5056 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5057 || (((nonzero_bits (XEXP (x
, 0), mode
)
5058 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5059 && ((nonzero_bits (XEXP (x
, 1), mode
)
5060 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5064 return MAX (1, result
);
5067 /* The result must be <= the first operand. If the first operand
5068 has the high bit set, we know nothing about the number of sign
5070 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5072 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5073 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5076 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5077 known_x
, known_mode
, known_ret
);
5080 /* The result must be <= the second operand. If the second operand
5081 has (or just might have) the high bit set, we know nothing about
5082 the number of sign bit copies. */
5083 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5085 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5086 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5089 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5090 known_x
, known_mode
, known_ret
);
5093 /* Similar to unsigned division, except that we have to worry about
5094 the case where the divisor is negative, in which case we have
5096 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5097 known_x
, known_mode
, known_ret
);
5099 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5100 || (nonzero_bits (XEXP (x
, 1), mode
)
5101 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5107 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5108 known_x
, known_mode
, known_ret
);
5110 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5111 || (nonzero_bits (XEXP (x
, 1), mode
)
5112 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5118 /* Shifts by a constant add to the number of bits equal to the
5120 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5121 known_x
, known_mode
, known_ret
);
5122 if (CONST_INT_P (XEXP (x
, 1))
5123 && INTVAL (XEXP (x
, 1)) > 0
5124 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
5125 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5130 /* Left shifts destroy copies. */
5131 if (!CONST_INT_P (XEXP (x
, 1))
5132 || INTVAL (XEXP (x
, 1)) < 0
5133 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5134 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
5137 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5138 known_x
, known_mode
, known_ret
);
5139 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5142 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5143 known_x
, known_mode
, known_ret
);
5144 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5145 known_x
, known_mode
, known_ret
);
5146 return MIN (num0
, num1
);
5148 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5149 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5150 case GEU
: case GTU
: case LEU
: case LTU
:
5151 case UNORDERED
: case ORDERED
:
5152 /* If the constant is negative, take its 1's complement and remask.
5153 Then see how many zero bits we have. */
5154 nonzero
= STORE_FLAG_VALUE
;
5155 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5156 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5157 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5159 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5165 /* If we haven't been able to figure it out by one of the above rules,
5166 see if some of the high-order bits are known to be zero. If so,
5167 count those bits and return one less than that amount. If we can't
5168 safely compute the mask for this mode, always return BITWIDTH. */
5170 bitwidth
= GET_MODE_PRECISION (mode
);
5171 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5174 nonzero
= nonzero_bits (x
, mode
);
5175 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5176 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5179 /* Calculate the rtx_cost of a single instruction. A return value of
5180 zero indicates an instruction pattern without a known cost. */
5183 insn_rtx_cost (rtx pat
, bool speed
)
5188 /* Extract the single set rtx from the instruction pattern.
5189 We can't use single_set since we only have the pattern. */
5190 if (GET_CODE (pat
) == SET
)
5192 else if (GET_CODE (pat
) == PARALLEL
)
5195 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5197 rtx x
= XVECEXP (pat
, 0, i
);
5198 if (GET_CODE (x
) == SET
)
5211 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5212 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5215 /* Returns estimate on cost of computing SEQ. */
5218 seq_cost (const rtx_insn
*seq
, bool speed
)
5223 for (; seq
; seq
= NEXT_INSN (seq
))
5225 set
= single_set (seq
);
5227 cost
+= set_rtx_cost (set
, speed
);
5235 /* Given an insn INSN and condition COND, return the condition in a
5236 canonical form to simplify testing by callers. Specifically:
5238 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5239 (2) Both operands will be machine operands; (cc0) will have been replaced.
5240 (3) If an operand is a constant, it will be the second operand.
5241 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5242 for GE, GEU, and LEU.
5244 If the condition cannot be understood, or is an inequality floating-point
5245 comparison which needs to be reversed, 0 will be returned.
5247 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5249 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5250 insn used in locating the condition was found. If a replacement test
5251 of the condition is desired, it should be placed in front of that
5252 insn and we will be sure that the inputs are still valid.
5254 If WANT_REG is nonzero, we wish the condition to be relative to that
5255 register, if possible. Therefore, do not canonicalize the condition
5256 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5257 to be a compare to a CC mode register.
5259 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5263 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5264 rtx_insn
**earliest
,
5265 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5268 rtx_insn
*prev
= insn
;
5272 int reverse_code
= 0;
5274 basic_block bb
= BLOCK_FOR_INSN (insn
);
5276 code
= GET_CODE (cond
);
5277 mode
= GET_MODE (cond
);
5278 op0
= XEXP (cond
, 0);
5279 op1
= XEXP (cond
, 1);
5282 code
= reversed_comparison_code (cond
, insn
);
5283 if (code
== UNKNOWN
)
5289 /* If we are comparing a register with zero, see if the register is set
5290 in the previous insn to a COMPARE or a comparison operation. Perform
5291 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5294 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5295 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5296 && op1
== CONST0_RTX (GET_MODE (op0
))
5299 /* Set nonzero when we find something of interest. */
5302 /* If comparison with cc0, import actual comparison from compare
5306 if ((prev
= prev_nonnote_insn (prev
)) == 0
5307 || !NONJUMP_INSN_P (prev
)
5308 || (set
= single_set (prev
)) == 0
5309 || SET_DEST (set
) != cc0_rtx
)
5312 op0
= SET_SRC (set
);
5313 op1
= CONST0_RTX (GET_MODE (op0
));
5318 /* If this is a COMPARE, pick up the two things being compared. */
5319 if (GET_CODE (op0
) == COMPARE
)
5321 op1
= XEXP (op0
, 1);
5322 op0
= XEXP (op0
, 0);
5325 else if (!REG_P (op0
))
5328 /* Go back to the previous insn. Stop if it is not an INSN. We also
5329 stop if it isn't a single set or if it has a REG_INC note because
5330 we don't want to bother dealing with it. */
5332 prev
= prev_nonnote_nondebug_insn (prev
);
5335 || !NONJUMP_INSN_P (prev
)
5336 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5337 /* In cfglayout mode, there do not have to be labels at the
5338 beginning of a block, or jumps at the end, so the previous
5339 conditions would not stop us when we reach bb boundary. */
5340 || BLOCK_FOR_INSN (prev
) != bb
)
5343 set
= set_of (op0
, prev
);
5346 && (GET_CODE (set
) != SET
5347 || !rtx_equal_p (SET_DEST (set
), op0
)))
5350 /* If this is setting OP0, get what it sets it to if it looks
5354 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5355 #ifdef FLOAT_STORE_FLAG_VALUE
5356 REAL_VALUE_TYPE fsfv
;
5359 /* ??? We may not combine comparisons done in a CCmode with
5360 comparisons not done in a CCmode. This is to aid targets
5361 like Alpha that have an IEEE compliant EQ instruction, and
5362 a non-IEEE compliant BEQ instruction. The use of CCmode is
5363 actually artificial, simply to prevent the combination, but
5364 should not affect other platforms.
5366 However, we must allow VOIDmode comparisons to match either
5367 CCmode or non-CCmode comparison, because some ports have
5368 modeless comparisons inside branch patterns.
5370 ??? This mode check should perhaps look more like the mode check
5371 in simplify_comparison in combine. */
5372 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5373 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5375 && inner_mode
!= VOIDmode
)
5377 if (GET_CODE (SET_SRC (set
)) == COMPARE
5380 && val_signbit_known_set_p (inner_mode
,
5382 #ifdef FLOAT_STORE_FLAG_VALUE
5384 && SCALAR_FLOAT_MODE_P (inner_mode
)
5385 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5386 REAL_VALUE_NEGATIVE (fsfv
)))
5389 && COMPARISON_P (SET_SRC (set
))))
5391 else if (((code
== EQ
5393 && val_signbit_known_set_p (inner_mode
,
5395 #ifdef FLOAT_STORE_FLAG_VALUE
5397 && SCALAR_FLOAT_MODE_P (inner_mode
)
5398 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5399 REAL_VALUE_NEGATIVE (fsfv
)))
5402 && COMPARISON_P (SET_SRC (set
)))
5407 else if ((code
== EQ
|| code
== NE
)
5408 && GET_CODE (SET_SRC (set
)) == XOR
)
5409 /* Handle sequences like:
5412 ...(eq|ne op0 (const_int 0))...
5416 (eq op0 (const_int 0)) reduces to (eq X Y)
5417 (ne op0 (const_int 0)) reduces to (ne X Y)
5419 This is the form used by MIPS16, for example. */
5425 else if (reg_set_p (op0
, prev
))
5426 /* If this sets OP0, but not directly, we have to give up. */
5431 /* If the caller is expecting the condition to be valid at INSN,
5432 make sure X doesn't change before INSN. */
5433 if (valid_at_insn_p
)
5434 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5436 if (COMPARISON_P (x
))
5437 code
= GET_CODE (x
);
5440 code
= reversed_comparison_code (x
, prev
);
5441 if (code
== UNKNOWN
)
5446 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5452 /* If constant is first, put it last. */
5453 if (CONSTANT_P (op0
))
5454 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5456 /* If OP0 is the result of a comparison, we weren't able to find what
5457 was really being compared, so fail. */
5459 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5462 /* Canonicalize any ordered comparison with integers involving equality
5463 if we can do computations in the relevant mode and we do not
5466 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5467 && CONST_INT_P (op1
)
5468 && GET_MODE (op0
) != VOIDmode
5469 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5471 HOST_WIDE_INT const_val
= INTVAL (op1
);
5472 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5473 unsigned HOST_WIDE_INT max_val
5474 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5479 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5480 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5483 /* When cross-compiling, const_val might be sign-extended from
5484 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5486 if ((const_val
& max_val
)
5487 != (HOST_WIDE_INT_1U
5488 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5489 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5493 if (uconst_val
< max_val
)
5494 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5498 if (uconst_val
!= 0)
5499 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5507 /* Never return CC0; return zero instead. */
5511 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5514 /* Given a jump insn JUMP, return the condition that will cause it to branch
5515 to its JUMP_LABEL. If the condition cannot be understood, or is an
5516 inequality floating-point comparison which needs to be reversed, 0 will
5519 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5520 insn used in locating the condition was found. If a replacement test
5521 of the condition is desired, it should be placed in front of that
5522 insn and we will be sure that the inputs are still valid. If EARLIEST
5523 is null, the returned condition will be valid at INSN.
5525 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5526 compare CC mode register.
5528 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5531 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5532 int valid_at_insn_p
)
5538 /* If this is not a standard conditional jump, we can't parse it. */
5540 || ! any_condjump_p (jump
))
5542 set
= pc_set (jump
);
5544 cond
= XEXP (SET_SRC (set
), 0);
5546 /* If this branches to JUMP_LABEL when the condition is false, reverse
5549 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5550 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5552 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5553 allow_cc_mode
, valid_at_insn_p
);
5556 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5557 TARGET_MODE_REP_EXTENDED.
5559 Note that we assume that the property of
5560 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5561 narrower than mode B. I.e., if A is a mode narrower than B then in
5562 order to be able to operate on it in mode B, mode A needs to
5563 satisfy the requirements set by the representation of mode B. */
5566 init_num_sign_bit_copies_in_rep (void)
5568 machine_mode mode
, in_mode
;
5570 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5571 in_mode
= GET_MODE_WIDER_MODE (mode
))
5572 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5573 mode
= GET_MODE_WIDER_MODE (mode
))
5577 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5578 extends to the next widest mode. */
5579 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5580 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5582 /* We are in in_mode. Count how many bits outside of mode
5583 have to be copies of the sign-bit. */
5584 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5586 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5588 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5589 /* We can only check sign-bit copies starting from the
5590 top-bit. In order to be able to check the bits we
5591 have already seen we pretend that subsequent bits
5592 have to be sign-bit copies too. */
5593 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5594 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5595 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5600 /* Suppose that truncation from the machine mode of X to MODE is not a
5601 no-op. See if there is anything special about X so that we can
5602 assume it already contains a truncated value of MODE. */
5605 truncated_to_mode (machine_mode mode
, const_rtx x
)
5607 /* This register has already been used in MODE without explicit
5609 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5612 /* See if we already satisfy the requirements of MODE. If yes we
5613 can just switch to MODE. */
5614 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5615 && (num_sign_bit_copies (x
, GET_MODE (x
))
5616 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5622 /* Return true if RTX code CODE has a single sequence of zero or more
5623 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5624 entry in that case. */
5627 setup_reg_subrtx_bounds (unsigned int code
)
5629 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5631 for (; format
[i
] != 'e'; ++i
)
5634 /* No subrtxes. Leave start and count as 0. */
5636 if (format
[i
] == 'E' || format
[i
] == 'V')
5640 /* Record the sequence of 'e's. */
5641 rtx_all_subrtx_bounds
[code
].start
= i
;
5644 while (format
[i
] == 'e');
5645 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5646 /* rtl-iter.h relies on this. */
5647 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5649 for (; format
[i
]; ++i
)
5650 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5656 /* Initialize rtx_all_subrtx_bounds. */
5661 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5663 if (!setup_reg_subrtx_bounds (i
))
5664 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5665 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5666 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5669 init_num_sign_bit_copies_in_rep ();
5672 /* Check whether this is a constant pool constant. */
5674 constant_pool_constant_p (rtx x
)
5676 x
= avoid_constant_pool_reference (x
);
5677 return CONST_DOUBLE_P (x
);
5680 /* If M is a bitmask that selects a field of low-order bits within an item but
5681 not the entire word, return the length of the field. Return -1 otherwise.
5682 M is used in machine mode MODE. */
5685 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5687 if (mode
!= VOIDmode
)
5689 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5691 m
&= GET_MODE_MASK (mode
);
5694 return exact_log2 (m
+ 1);
5697 /* Return the mode of MEM's address. */
5700 get_address_mode (rtx mem
)
5704 gcc_assert (MEM_P (mem
));
5705 mode
= GET_MODE (XEXP (mem
, 0));
5706 if (mode
!= VOIDmode
)
5708 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5711 /* Split up a CONST_DOUBLE or integer constant rtx
5712 into two rtx's for single words,
5713 storing in *FIRST the word that comes first in memory in the target
5714 and in *SECOND the other.
5716 TODO: This function needs to be rewritten to work on any size
5720 split_double (rtx value
, rtx
*first
, rtx
*second
)
5722 if (CONST_INT_P (value
))
5724 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5726 /* In this case the CONST_INT holds both target words.
5727 Extract the bits from it into two word-sized pieces.
5728 Sign extend each half to HOST_WIDE_INT. */
5729 unsigned HOST_WIDE_INT low
, high
;
5730 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5731 unsigned bits_per_word
= BITS_PER_WORD
;
5733 /* Set sign_bit to the most significant bit of a word. */
5735 sign_bit
<<= bits_per_word
- 1;
5737 /* Set mask so that all bits of the word are set. We could
5738 have used 1 << BITS_PER_WORD instead of basing the
5739 calculation on sign_bit. However, on machines where
5740 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5741 compiler warning, even though the code would never be
5743 mask
= sign_bit
<< 1;
5746 /* Set sign_extend as any remaining bits. */
5747 sign_extend
= ~mask
;
5749 /* Pick the lower word and sign-extend it. */
5750 low
= INTVAL (value
);
5755 /* Pick the higher word, shifted to the least significant
5756 bits, and sign-extend it. */
5757 high
= INTVAL (value
);
5758 high
>>= bits_per_word
- 1;
5761 if (high
& sign_bit
)
5762 high
|= sign_extend
;
5764 /* Store the words in the target machine order. */
5765 if (WORDS_BIG_ENDIAN
)
5767 *first
= GEN_INT (high
);
5768 *second
= GEN_INT (low
);
5772 *first
= GEN_INT (low
);
5773 *second
= GEN_INT (high
);
5778 /* The rule for using CONST_INT for a wider mode
5779 is that we regard the value as signed.
5780 So sign-extend it. */
5781 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5782 if (WORDS_BIG_ENDIAN
)
5794 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5796 /* All of this is scary code and needs to be converted to
5797 properly work with any size integer. */
5798 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5799 if (WORDS_BIG_ENDIAN
)
5801 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5802 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5806 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5807 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5810 else if (!CONST_DOUBLE_P (value
))
5812 if (WORDS_BIG_ENDIAN
)
5814 *first
= const0_rtx
;
5820 *second
= const0_rtx
;
5823 else if (GET_MODE (value
) == VOIDmode
5824 /* This is the old way we did CONST_DOUBLE integers. */
5825 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5827 /* In an integer, the words are defined as most and least significant.
5828 So order them by the target's convention. */
5829 if (WORDS_BIG_ENDIAN
)
5831 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5832 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5836 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5837 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5844 /* Note, this converts the REAL_VALUE_TYPE to the target's
5845 format, splits up the floating point double and outputs
5846 exactly 32 bits of it into each of l[0] and l[1] --
5847 not necessarily BITS_PER_WORD bits. */
5848 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
5850 /* If 32 bits is an entire word for the target, but not for the host,
5851 then sign-extend on the host so that the number will look the same
5852 way on the host that it would on the target. See for instance
5853 simplify_unary_operation. The #if is needed to avoid compiler
5856 #if HOST_BITS_PER_LONG > 32
5857 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5859 if (l
[0] & ((long) 1 << 31))
5860 l
[0] |= ((unsigned long) (-1) << 32);
5861 if (l
[1] & ((long) 1 << 31))
5862 l
[1] |= ((unsigned long) (-1) << 32);
5866 *first
= GEN_INT (l
[0]);
5867 *second
= GEN_INT (l
[1]);
5871 /* Return true if X is a sign_extract or zero_extract from the least
5875 lsb_bitfield_op_p (rtx x
)
5877 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5879 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5880 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5881 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5883 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5888 /* Strip outer address "mutations" from LOC and return a pointer to the
5889 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5890 stripped expression there.
5892 "Mutations" either convert between modes or apply some kind of
5893 extension, truncation or alignment. */
5896 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5900 enum rtx_code code
= GET_CODE (*loc
);
5901 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5902 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5903 used to convert between pointer sizes. */
5904 loc
= &XEXP (*loc
, 0);
5905 else if (lsb_bitfield_op_p (*loc
))
5906 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5907 acts as a combined truncation and extension. */
5908 loc
= &XEXP (*loc
, 0);
5909 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5910 /* (and ... (const_int -X)) is used to align to X bytes. */
5911 loc
= &XEXP (*loc
, 0);
5912 else if (code
== SUBREG
5913 && !OBJECT_P (SUBREG_REG (*loc
))
5914 && subreg_lowpart_p (*loc
))
5915 /* (subreg (operator ...) ...) inside and is used for mode
5917 loc
= &SUBREG_REG (*loc
);
5925 /* Return true if CODE applies some kind of scale. The scaled value is
5926 is the first operand and the scale is the second. */
5929 binary_scale_code_p (enum rtx_code code
)
5931 return (code
== MULT
5933 /* Needed by ARM targets. */
5937 || code
== ROTATERT
);
5940 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5941 (see address_info). Return null otherwise. */
5944 get_base_term (rtx
*inner
)
5946 if (GET_CODE (*inner
) == LO_SUM
)
5947 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5950 || GET_CODE (*inner
) == SUBREG
5951 || GET_CODE (*inner
) == SCRATCH
)
5956 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5957 (see address_info). Return null otherwise. */
5960 get_index_term (rtx
*inner
)
5962 /* At present, only constant scales are allowed. */
5963 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5964 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5967 || GET_CODE (*inner
) == SUBREG
5968 || GET_CODE (*inner
) == SCRATCH
)
5973 /* Set the segment part of address INFO to LOC, given that INNER is the
5977 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5979 gcc_assert (!info
->segment
);
5980 info
->segment
= loc
;
5981 info
->segment_term
= inner
;
5984 /* Set the base part of address INFO to LOC, given that INNER is the
5988 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5990 gcc_assert (!info
->base
);
5992 info
->base_term
= inner
;
5995 /* Set the index part of address INFO to LOC, given that INNER is the
5999 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6001 gcc_assert (!info
->index
);
6003 info
->index_term
= inner
;
6006 /* Set the displacement part of address INFO to LOC, given that INNER
6007 is the constant term. */
6010 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6012 gcc_assert (!info
->disp
);
6014 info
->disp_term
= inner
;
6017 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6018 rest of INFO accordingly. */
6021 decompose_incdec_address (struct address_info
*info
)
6023 info
->autoinc_p
= true;
6025 rtx
*base
= &XEXP (*info
->inner
, 0);
6026 set_address_base (info
, base
, base
);
6027 gcc_checking_assert (info
->base
== info
->base_term
);
6029 /* These addresses are only valid when the size of the addressed
6031 gcc_checking_assert (info
->mode
!= VOIDmode
);
6034 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6035 of INFO accordingly. */
6038 decompose_automod_address (struct address_info
*info
)
6040 info
->autoinc_p
= true;
6042 rtx
*base
= &XEXP (*info
->inner
, 0);
6043 set_address_base (info
, base
, base
);
6044 gcc_checking_assert (info
->base
== info
->base_term
);
6046 rtx plus
= XEXP (*info
->inner
, 1);
6047 gcc_assert (GET_CODE (plus
) == PLUS
);
6049 info
->base_term2
= &XEXP (plus
, 0);
6050 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6052 rtx
*step
= &XEXP (plus
, 1);
6053 rtx
*inner_step
= strip_address_mutations (step
);
6054 if (CONSTANT_P (*inner_step
))
6055 set_address_disp (info
, step
, inner_step
);
6057 set_address_index (info
, step
, inner_step
);
6060 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6061 values in [PTR, END). Return a pointer to the end of the used array. */
6064 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6067 if (GET_CODE (x
) == PLUS
)
6069 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6070 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6074 gcc_assert (ptr
!= end
);
6080 /* Evaluate the likelihood of X being a base or index value, returning
6081 positive if it is likely to be a base, negative if it is likely to be
6082 an index, and 0 if we can't tell. Make the magnitude of the return
6083 value reflect the amount of confidence we have in the answer.
6085 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6088 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6089 enum rtx_code outer_code
, enum rtx_code index_code
)
6091 /* Believe *_POINTER unless the address shape requires otherwise. */
6092 if (REG_P (x
) && REG_POINTER (x
))
6094 if (MEM_P (x
) && MEM_POINTER (x
))
6097 if (REG_P (x
) && HARD_REGISTER_P (x
))
6099 /* X is a hard register. If it only fits one of the base
6100 or index classes, choose that interpretation. */
6101 int regno
= REGNO (x
);
6102 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6103 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6104 if (base_p
!= index_p
)
6105 return base_p
? 1 : -1;
6110 /* INFO->INNER describes a normal, non-automodified address.
6111 Fill in the rest of INFO accordingly. */
6114 decompose_normal_address (struct address_info
*info
)
6116 /* Treat the address as the sum of up to four values. */
6118 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6119 ops
+ ARRAY_SIZE (ops
)) - ops
;
6121 /* If there is more than one component, any base component is in a PLUS. */
6123 info
->base_outer_code
= PLUS
;
6125 /* Try to classify each sum operand now. Leave those that could be
6126 either a base or an index in OPS. */
6129 for (size_t in
= 0; in
< n_ops
; ++in
)
6132 rtx
*inner
= strip_address_mutations (loc
);
6133 if (CONSTANT_P (*inner
))
6134 set_address_disp (info
, loc
, inner
);
6135 else if (GET_CODE (*inner
) == UNSPEC
)
6136 set_address_segment (info
, loc
, inner
);
6139 /* The only other possibilities are a base or an index. */
6140 rtx
*base_term
= get_base_term (inner
);
6141 rtx
*index_term
= get_index_term (inner
);
6142 gcc_assert (base_term
|| index_term
);
6144 set_address_index (info
, loc
, index_term
);
6145 else if (!index_term
)
6146 set_address_base (info
, loc
, base_term
);
6149 gcc_assert (base_term
== index_term
);
6151 inner_ops
[out
] = base_term
;
6157 /* Classify the remaining OPS members as bases and indexes. */
6160 /* If we haven't seen a base or an index yet, assume that this is
6161 the base. If we were confident that another term was the base
6162 or index, treat the remaining operand as the other kind. */
6164 set_address_base (info
, ops
[0], inner_ops
[0]);
6166 set_address_index (info
, ops
[0], inner_ops
[0]);
6170 /* In the event of a tie, assume the base comes first. */
6171 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6173 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6174 GET_CODE (*ops
[0])))
6176 set_address_base (info
, ops
[0], inner_ops
[0]);
6177 set_address_index (info
, ops
[1], inner_ops
[1]);
6181 set_address_base (info
, ops
[1], inner_ops
[1]);
6182 set_address_index (info
, ops
[0], inner_ops
[0]);
6186 gcc_assert (out
== 0);
6189 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6190 or VOIDmode if not known. AS is the address space associated with LOC.
6191 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6194 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6195 addr_space_t as
, enum rtx_code outer_code
)
6197 memset (info
, 0, sizeof (*info
));
6200 info
->addr_outer_code
= outer_code
;
6202 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6203 info
->base_outer_code
= outer_code
;
6204 switch (GET_CODE (*info
->inner
))
6210 decompose_incdec_address (info
);
6215 decompose_automod_address (info
);
6219 decompose_normal_address (info
);
6224 /* Describe address operand LOC in INFO. */
6227 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6229 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6232 /* Describe the address of MEM X in INFO. */
6235 decompose_mem_address (struct address_info
*info
, rtx x
)
6237 gcc_assert (MEM_P (x
));
6238 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6239 MEM_ADDR_SPACE (x
), MEM
);
6242 /* Update INFO after a change to the address it describes. */
6245 update_address (struct address_info
*info
)
6247 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6248 info
->addr_outer_code
);
6251 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6252 more complicated than that. */
6255 get_index_scale (const struct address_info
*info
)
6257 rtx index
= *info
->index
;
6258 if (GET_CODE (index
) == MULT
6259 && CONST_INT_P (XEXP (index
, 1))
6260 && info
->index_term
== &XEXP (index
, 0))
6261 return INTVAL (XEXP (index
, 1));
6263 if (GET_CODE (index
) == ASHIFT
6264 && CONST_INT_P (XEXP (index
, 1))
6265 && info
->index_term
== &XEXP (index
, 0))
6266 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6268 if (info
->index
== info
->index_term
)
6274 /* Return the "index code" of INFO, in the form required by
6278 get_index_code (const struct address_info
*info
)
6281 return GET_CODE (*info
->index
);
6284 return GET_CODE (*info
->disp
);
6289 /* Return true if RTL X contains a SYMBOL_REF. */
6292 contains_symbol_ref_p (const_rtx x
)
6294 subrtx_iterator::array_type array
;
6295 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6296 if (SYMBOL_REF_P (*iter
))
6302 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6305 contains_symbolic_reference_p (const_rtx x
)
6307 subrtx_iterator::array_type array
;
6308 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6309 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6315 /* Return true if X contains a thread-local symbol. */
6318 tls_referenced_p (const_rtx x
)
6320 if (!targetm
.have_tls
)
6323 subrtx_iterator::array_type array
;
6324 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6325 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)