Introduce gimple_omp_task
[official-gcc.git] / gcc / rtlanal.c
blob4651f709f974c9f6d1af7870941b38977f953697
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40 #include "rtl-iter.h"
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
65 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
66 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
68 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
69 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
70 SIGN_EXTEND then while narrowing we also have to enforce the
71 representation and sign-extend the value to mode DESTINATION_REP.
73 If the value is already sign-extended to DESTINATION_REP mode we
74 can just switch to DESTINATION mode on it. For each pair of
75 integral modes SOURCE and DESTINATION, when truncating from SOURCE
76 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
77 contains the number of high-order bits in SOURCE that have to be
78 copies of the sign-bit so that we can do this mode-switch to
79 DESTINATION. */
81 static unsigned int
82 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
84 /* Store X into index I of ARRAY. ARRAY is known to have at least I
85 elements. Return the new base of ARRAY. */
87 template <typename T>
88 typename T::value_type *
89 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
90 value_type *base,
91 size_t i, value_type x)
93 if (base == array.stack)
95 if (i < LOCAL_ELEMS)
97 base[i] = x;
98 return base;
100 gcc_checking_assert (i == LOCAL_ELEMS);
101 vec_safe_grow (array.heap, i + 1);
102 base = array.heap->address ();
103 memcpy (base, array.stack, sizeof (array.stack));
104 base[LOCAL_ELEMS] = x;
105 return base;
107 unsigned int length = array.heap->length ();
108 if (length > i)
110 gcc_checking_assert (base == array.heap->address ());
111 base[i] = x;
112 return base;
114 else
116 gcc_checking_assert (i == length);
117 vec_safe_push (array.heap, x);
118 return array.heap->address ();
122 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
125 template <typename T>
126 size_t
127 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 value_type *base,
129 size_t end, rtx_type x)
131 enum rtx_code code = GET_CODE (x);
132 const char *format = GET_RTX_FORMAT (code);
133 size_t orig_end = end;
134 if (__builtin_expect (INSN_P (x), false))
136 /* Put the pattern at the top of the queue, since that's what
137 we're likely to want most. It also allows for the SEQUENCE
138 code below. */
139 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
140 if (format[i] == 'e')
142 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
143 if (__builtin_expect (end < LOCAL_ELEMS, true))
144 base[end++] = subx;
145 else
146 base = add_single_to_queue (array, base, end++, subx);
149 else
150 for (int i = 0; format[i]; ++i)
151 if (format[i] == 'e')
153 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
154 if (__builtin_expect (end < LOCAL_ELEMS, true))
155 base[end++] = subx;
156 else
157 base = add_single_to_queue (array, base, end++, subx);
159 else if (format[i] == 'E')
161 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
162 rtx *vec = x->u.fld[i].rt_rtvec->elem;
163 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
164 for (unsigned int j = 0; j < length; j++)
165 base[end++] = T::get_value (vec[j]);
166 else
167 for (unsigned int j = 0; j < length; j++)
168 base = add_single_to_queue (array, base, end++,
169 T::get_value (vec[j]));
170 if (code == SEQUENCE && end == length)
171 /* If the subrtxes of the sequence fill the entire array then
172 we know that no other parts of a containing insn are queued.
173 The caller is therefore iterating over the sequence as a
174 PATTERN (...), so we also want the patterns of the
175 subinstructions. */
176 for (unsigned int j = 0; j < length; j++)
178 typename T::rtx_type x = T::get_rtx (base[j]);
179 if (INSN_P (x))
180 base[j] = T::get_value (PATTERN (x));
183 return end - orig_end;
186 template <typename T>
187 void
188 generic_subrtx_iterator <T>::free_array (array_type &array)
190 vec_free (array.heap);
193 template <typename T>
194 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
196 template class generic_subrtx_iterator <const_rtx_accessor>;
197 template class generic_subrtx_iterator <rtx_var_accessor>;
198 template class generic_subrtx_iterator <rtx_ptr_accessor>;
200 /* Return 1 if the value of X is unstable
201 (would be different at a different point in the program).
202 The frame pointer, arg pointer, etc. are considered stable
203 (within one function) and so is anything marked `unchanging'. */
206 rtx_unstable_p (const_rtx x)
208 const RTX_CODE code = GET_CODE (x);
209 int i;
210 const char *fmt;
212 switch (code)
214 case MEM:
215 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
217 case CONST:
218 CASE_CONST_ANY:
219 case SYMBOL_REF:
220 case LABEL_REF:
221 return 0;
223 case REG:
224 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
225 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
226 /* The arg pointer varies if it is not a fixed register. */
227 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
228 return 0;
229 /* ??? When call-clobbered, the value is stable modulo the restore
230 that must happen after a call. This currently screws up local-alloc
231 into believing that the restore is not needed. */
232 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
233 return 0;
234 return 1;
236 case ASM_OPERANDS:
237 if (MEM_VOLATILE_P (x))
238 return 1;
240 /* Fall through. */
242 default:
243 break;
246 fmt = GET_RTX_FORMAT (code);
247 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
248 if (fmt[i] == 'e')
250 if (rtx_unstable_p (XEXP (x, i)))
251 return 1;
253 else if (fmt[i] == 'E')
255 int j;
256 for (j = 0; j < XVECLEN (x, i); j++)
257 if (rtx_unstable_p (XVECEXP (x, i, j)))
258 return 1;
261 return 0;
264 /* Return 1 if X has a value that can vary even between two
265 executions of the program. 0 means X can be compared reliably
266 against certain constants or near-constants.
267 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268 zero, we are slightly more conservative.
269 The frame pointer and the arg pointer are considered constant. */
271 bool
272 rtx_varies_p (const_rtx x, bool for_alias)
274 RTX_CODE code;
275 int i;
276 const char *fmt;
278 if (!x)
279 return 0;
281 code = GET_CODE (x);
282 switch (code)
284 case MEM:
285 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
287 case CONST:
288 CASE_CONST_ANY:
289 case SYMBOL_REF:
290 case LABEL_REF:
291 return 0;
293 case REG:
294 /* Note that we have to test for the actual rtx used for the frame
295 and arg pointers and not just the register number in case we have
296 eliminated the frame and/or arg pointer and are using it
297 for pseudos. */
298 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
299 /* The arg pointer varies if it is not a fixed register. */
300 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
301 return 0;
302 if (x == pic_offset_table_rtx
303 /* ??? When call-clobbered, the value is stable modulo the restore
304 that must happen after a call. This currently screws up
305 local-alloc into believing that the restore is not needed, so we
306 must return 0 only if we are called from alias analysis. */
307 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
308 return 0;
309 return 1;
311 case LO_SUM:
312 /* The operand 0 of a LO_SUM is considered constant
313 (in fact it is related specifically to operand 1)
314 during alias analysis. */
315 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
316 || rtx_varies_p (XEXP (x, 1), for_alias);
318 case ASM_OPERANDS:
319 if (MEM_VOLATILE_P (x))
320 return 1;
322 /* Fall through. */
324 default:
325 break;
328 fmt = GET_RTX_FORMAT (code);
329 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
330 if (fmt[i] == 'e')
332 if (rtx_varies_p (XEXP (x, i), for_alias))
333 return 1;
335 else if (fmt[i] == 'E')
337 int j;
338 for (j = 0; j < XVECLEN (x, i); j++)
339 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
340 return 1;
343 return 0;
346 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
347 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
348 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
349 references on strict alignment machines. */
351 static int
352 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
353 enum machine_mode mode, bool unaligned_mems)
355 enum rtx_code code = GET_CODE (x);
357 /* The offset must be a multiple of the mode size if we are considering
358 unaligned memory references on strict alignment machines. */
359 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
361 HOST_WIDE_INT actual_offset = offset;
363 #ifdef SPARC_STACK_BOUNDARY_HACK
364 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
365 the real alignment of %sp. However, when it does this, the
366 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
367 if (SPARC_STACK_BOUNDARY_HACK
368 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
369 actual_offset -= STACK_POINTER_OFFSET;
370 #endif
372 if (actual_offset % GET_MODE_SIZE (mode) != 0)
373 return 1;
376 switch (code)
378 case SYMBOL_REF:
379 if (SYMBOL_REF_WEAK (x))
380 return 1;
381 if (!CONSTANT_POOL_ADDRESS_P (x))
383 tree decl;
384 HOST_WIDE_INT decl_size;
386 if (offset < 0)
387 return 1;
388 if (size == 0)
389 size = GET_MODE_SIZE (mode);
390 if (size == 0)
391 return offset != 0;
393 /* If the size of the access or of the symbol is unknown,
394 assume the worst. */
395 decl = SYMBOL_REF_DECL (x);
397 /* Else check that the access is in bounds. TODO: restructure
398 expr_size/tree_expr_size/int_expr_size and just use the latter. */
399 if (!decl)
400 decl_size = -1;
401 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
402 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
403 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
404 : -1);
405 else if (TREE_CODE (decl) == STRING_CST)
406 decl_size = TREE_STRING_LENGTH (decl);
407 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
408 decl_size = int_size_in_bytes (TREE_TYPE (decl));
409 else
410 decl_size = -1;
412 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
415 return 0;
417 case LABEL_REF:
418 return 0;
420 case REG:
421 /* Stack references are assumed not to trap, but we need to deal with
422 nonsensical offsets. */
423 if (x == frame_pointer_rtx)
425 HOST_WIDE_INT adj_offset = offset - STARTING_FRAME_OFFSET;
426 if (size == 0)
427 size = GET_MODE_SIZE (mode);
428 if (FRAME_GROWS_DOWNWARD)
430 if (adj_offset < frame_offset || adj_offset + size - 1 >= 0)
431 return 1;
433 else
435 if (adj_offset < 0 || adj_offset + size - 1 >= frame_offset)
436 return 1;
438 return 0;
440 /* ??? Need to add a similar guard for nonsensical offsets. */
441 if (x == hard_frame_pointer_rtx
442 || x == stack_pointer_rtx
443 /* The arg pointer varies if it is not a fixed register. */
444 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
445 return 0;
446 /* All of the virtual frame registers are stack references. */
447 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
448 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
449 return 0;
450 return 1;
452 case CONST:
453 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
454 mode, unaligned_mems);
456 case PLUS:
457 /* An address is assumed not to trap if:
458 - it is the pic register plus a constant. */
459 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
460 return 0;
462 /* - or it is an address that can't trap plus a constant integer. */
463 if (CONST_INT_P (XEXP (x, 1))
464 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
465 size, mode, unaligned_mems))
466 return 0;
468 return 1;
470 case LO_SUM:
471 case PRE_MODIFY:
472 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
473 mode, unaligned_mems);
475 case PRE_DEC:
476 case PRE_INC:
477 case POST_DEC:
478 case POST_INC:
479 case POST_MODIFY:
480 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
481 mode, unaligned_mems);
483 default:
484 break;
487 /* If it isn't one of the case above, it can cause a trap. */
488 return 1;
491 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
494 rtx_addr_can_trap_p (const_rtx x)
496 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
499 /* Return true if X is an address that is known to not be zero. */
501 bool
502 nonzero_address_p (const_rtx x)
504 const enum rtx_code code = GET_CODE (x);
506 switch (code)
508 case SYMBOL_REF:
509 return !SYMBOL_REF_WEAK (x);
511 case LABEL_REF:
512 return true;
514 case REG:
515 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
516 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
517 || x == stack_pointer_rtx
518 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
519 return true;
520 /* All of the virtual frame registers are stack references. */
521 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
522 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
523 return true;
524 return false;
526 case CONST:
527 return nonzero_address_p (XEXP (x, 0));
529 case PLUS:
530 /* Handle PIC references. */
531 if (XEXP (x, 0) == pic_offset_table_rtx
532 && CONSTANT_P (XEXP (x, 1)))
533 return true;
534 return false;
536 case PRE_MODIFY:
537 /* Similar to the above; allow positive offsets. Further, since
538 auto-inc is only allowed in memories, the register must be a
539 pointer. */
540 if (CONST_INT_P (XEXP (x, 1))
541 && INTVAL (XEXP (x, 1)) > 0)
542 return true;
543 return nonzero_address_p (XEXP (x, 0));
545 case PRE_INC:
546 /* Similarly. Further, the offset is always positive. */
547 return true;
549 case PRE_DEC:
550 case POST_DEC:
551 case POST_INC:
552 case POST_MODIFY:
553 return nonzero_address_p (XEXP (x, 0));
555 case LO_SUM:
556 return nonzero_address_p (XEXP (x, 1));
558 default:
559 break;
562 /* If it isn't one of the case above, might be zero. */
563 return false;
566 /* Return 1 if X refers to a memory location whose address
567 cannot be compared reliably with constant addresses,
568 or if X refers to a BLKmode memory object.
569 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
570 zero, we are slightly more conservative. */
572 bool
573 rtx_addr_varies_p (const_rtx x, bool for_alias)
575 enum rtx_code code;
576 int i;
577 const char *fmt;
579 if (x == 0)
580 return 0;
582 code = GET_CODE (x);
583 if (code == MEM)
584 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
586 fmt = GET_RTX_FORMAT (code);
587 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
588 if (fmt[i] == 'e')
590 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
591 return 1;
593 else if (fmt[i] == 'E')
595 int j;
596 for (j = 0; j < XVECLEN (x, i); j++)
597 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
598 return 1;
600 return 0;
603 /* Return the CALL in X if there is one. */
606 get_call_rtx_from (rtx x)
608 if (INSN_P (x))
609 x = PATTERN (x);
610 if (GET_CODE (x) == PARALLEL)
611 x = XVECEXP (x, 0, 0);
612 if (GET_CODE (x) == SET)
613 x = SET_SRC (x);
614 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
615 return x;
616 return NULL_RTX;
619 /* Return the value of the integer term in X, if one is apparent;
620 otherwise return 0.
621 Only obvious integer terms are detected.
622 This is used in cse.c with the `related_value' field. */
624 HOST_WIDE_INT
625 get_integer_term (const_rtx x)
627 if (GET_CODE (x) == CONST)
628 x = XEXP (x, 0);
630 if (GET_CODE (x) == MINUS
631 && CONST_INT_P (XEXP (x, 1)))
632 return - INTVAL (XEXP (x, 1));
633 if (GET_CODE (x) == PLUS
634 && CONST_INT_P (XEXP (x, 1)))
635 return INTVAL (XEXP (x, 1));
636 return 0;
639 /* If X is a constant, return the value sans apparent integer term;
640 otherwise return 0.
641 Only obvious integer terms are detected. */
644 get_related_value (const_rtx x)
646 if (GET_CODE (x) != CONST)
647 return 0;
648 x = XEXP (x, 0);
649 if (GET_CODE (x) == PLUS
650 && CONST_INT_P (XEXP (x, 1)))
651 return XEXP (x, 0);
652 else if (GET_CODE (x) == MINUS
653 && CONST_INT_P (XEXP (x, 1)))
654 return XEXP (x, 0);
655 return 0;
658 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
659 to somewhere in the same object or object_block as SYMBOL. */
661 bool
662 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
664 tree decl;
666 if (GET_CODE (symbol) != SYMBOL_REF)
667 return false;
669 if (offset == 0)
670 return true;
672 if (offset > 0)
674 if (CONSTANT_POOL_ADDRESS_P (symbol)
675 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
676 return true;
678 decl = SYMBOL_REF_DECL (symbol);
679 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
680 return true;
683 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
684 && SYMBOL_REF_BLOCK (symbol)
685 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
686 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
687 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
688 return true;
690 return false;
693 /* Split X into a base and a constant offset, storing them in *BASE_OUT
694 and *OFFSET_OUT respectively. */
696 void
697 split_const (rtx x, rtx *base_out, rtx *offset_out)
699 if (GET_CODE (x) == CONST)
701 x = XEXP (x, 0);
702 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
704 *base_out = XEXP (x, 0);
705 *offset_out = XEXP (x, 1);
706 return;
709 *base_out = x;
710 *offset_out = const0_rtx;
713 /* Return the number of places FIND appears within X. If COUNT_DEST is
714 zero, we do not count occurrences inside the destination of a SET. */
717 count_occurrences (const_rtx x, const_rtx find, int count_dest)
719 int i, j;
720 enum rtx_code code;
721 const char *format_ptr;
722 int count;
724 if (x == find)
725 return 1;
727 code = GET_CODE (x);
729 switch (code)
731 case REG:
732 CASE_CONST_ANY:
733 case SYMBOL_REF:
734 case CODE_LABEL:
735 case PC:
736 case CC0:
737 return 0;
739 case EXPR_LIST:
740 count = count_occurrences (XEXP (x, 0), find, count_dest);
741 if (XEXP (x, 1))
742 count += count_occurrences (XEXP (x, 1), find, count_dest);
743 return count;
745 case MEM:
746 if (MEM_P (find) && rtx_equal_p (x, find))
747 return 1;
748 break;
750 case SET:
751 if (SET_DEST (x) == find && ! count_dest)
752 return count_occurrences (SET_SRC (x), find, count_dest);
753 break;
755 default:
756 break;
759 format_ptr = GET_RTX_FORMAT (code);
760 count = 0;
762 for (i = 0; i < GET_RTX_LENGTH (code); i++)
764 switch (*format_ptr++)
766 case 'e':
767 count += count_occurrences (XEXP (x, i), find, count_dest);
768 break;
770 case 'E':
771 for (j = 0; j < XVECLEN (x, i); j++)
772 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
773 break;
776 return count;
780 /* Return TRUE if OP is a register or subreg of a register that
781 holds an unsigned quantity. Otherwise, return FALSE. */
783 bool
784 unsigned_reg_p (rtx op)
786 if (REG_P (op)
787 && REG_EXPR (op)
788 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
789 return true;
791 if (GET_CODE (op) == SUBREG
792 && SUBREG_PROMOTED_SIGN (op))
793 return true;
795 return false;
799 /* Nonzero if register REG appears somewhere within IN.
800 Also works if REG is not a register; in this case it checks
801 for a subexpression of IN that is Lisp "equal" to REG. */
804 reg_mentioned_p (const_rtx reg, const_rtx in)
806 const char *fmt;
807 int i;
808 enum rtx_code code;
810 if (in == 0)
811 return 0;
813 if (reg == in)
814 return 1;
816 if (GET_CODE (in) == LABEL_REF)
817 return reg == LABEL_REF_LABEL (in);
819 code = GET_CODE (in);
821 switch (code)
823 /* Compare registers by number. */
824 case REG:
825 return REG_P (reg) && REGNO (in) == REGNO (reg);
827 /* These codes have no constituent expressions
828 and are unique. */
829 case SCRATCH:
830 case CC0:
831 case PC:
832 return 0;
834 CASE_CONST_ANY:
835 /* These are kept unique for a given value. */
836 return 0;
838 default:
839 break;
842 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
843 return 1;
845 fmt = GET_RTX_FORMAT (code);
847 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
849 if (fmt[i] == 'E')
851 int j;
852 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
853 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
854 return 1;
856 else if (fmt[i] == 'e'
857 && reg_mentioned_p (reg, XEXP (in, i)))
858 return 1;
860 return 0;
863 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
864 no CODE_LABEL insn. */
867 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
869 rtx_insn *p;
870 if (beg == end)
871 return 0;
872 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
873 if (LABEL_P (p))
874 return 0;
875 return 1;
878 /* Nonzero if register REG is used in an insn between
879 FROM_INSN and TO_INSN (exclusive of those two). */
882 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
883 const rtx_insn *to_insn)
885 rtx_insn *insn;
887 if (from_insn == to_insn)
888 return 0;
890 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
891 if (NONDEBUG_INSN_P (insn)
892 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
893 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
894 return 1;
895 return 0;
898 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
899 is entirely replaced by a new value and the only use is as a SET_DEST,
900 we do not consider it a reference. */
903 reg_referenced_p (const_rtx x, const_rtx body)
905 int i;
907 switch (GET_CODE (body))
909 case SET:
910 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
911 return 1;
913 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
914 of a REG that occupies all of the REG, the insn references X if
915 it is mentioned in the destination. */
916 if (GET_CODE (SET_DEST (body)) != CC0
917 && GET_CODE (SET_DEST (body)) != PC
918 && !REG_P (SET_DEST (body))
919 && ! (GET_CODE (SET_DEST (body)) == SUBREG
920 && REG_P (SUBREG_REG (SET_DEST (body)))
921 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
922 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
923 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
924 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
925 && reg_overlap_mentioned_p (x, SET_DEST (body)))
926 return 1;
927 return 0;
929 case ASM_OPERANDS:
930 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
931 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
932 return 1;
933 return 0;
935 case CALL:
936 case USE:
937 case IF_THEN_ELSE:
938 return reg_overlap_mentioned_p (x, body);
940 case TRAP_IF:
941 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
943 case PREFETCH:
944 return reg_overlap_mentioned_p (x, XEXP (body, 0));
946 case UNSPEC:
947 case UNSPEC_VOLATILE:
948 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
949 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
950 return 1;
951 return 0;
953 case PARALLEL:
954 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
955 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
956 return 1;
957 return 0;
959 case CLOBBER:
960 if (MEM_P (XEXP (body, 0)))
961 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
962 return 1;
963 return 0;
965 case COND_EXEC:
966 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
967 return 1;
968 return reg_referenced_p (x, COND_EXEC_CODE (body));
970 default:
971 return 0;
975 /* Nonzero if register REG is set or clobbered in an insn between
976 FROM_INSN and TO_INSN (exclusive of those two). */
979 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
980 const rtx_insn *to_insn)
982 const rtx_insn *insn;
984 if (from_insn == to_insn)
985 return 0;
987 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
988 if (INSN_P (insn) && reg_set_p (reg, insn))
989 return 1;
990 return 0;
993 /* Internals of reg_set_between_p. */
995 reg_set_p (const_rtx reg, const_rtx insn)
997 /* We can be passed an insn or part of one. If we are passed an insn,
998 check if a side-effect of the insn clobbers REG. */
999 if (INSN_P (insn)
1000 && (FIND_REG_INC_NOTE (insn, reg)
1001 || (CALL_P (insn)
1002 && ((REG_P (reg)
1003 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1004 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1005 GET_MODE (reg), REGNO (reg)))
1006 || MEM_P (reg)
1007 || find_reg_fusage (insn, CLOBBER, reg)))))
1008 return 1;
1010 return set_of (reg, insn) != NULL_RTX;
1013 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1014 only if none of them are modified between START and END. Return 1 if
1015 X contains a MEM; this routine does use memory aliasing. */
1018 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1020 const enum rtx_code code = GET_CODE (x);
1021 const char *fmt;
1022 int i, j;
1023 rtx_insn *insn;
1025 if (start == end)
1026 return 0;
1028 switch (code)
1030 CASE_CONST_ANY:
1031 case CONST:
1032 case SYMBOL_REF:
1033 case LABEL_REF:
1034 return 0;
1036 case PC:
1037 case CC0:
1038 return 1;
1040 case MEM:
1041 if (modified_between_p (XEXP (x, 0), start, end))
1042 return 1;
1043 if (MEM_READONLY_P (x))
1044 return 0;
1045 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1046 if (memory_modified_in_insn_p (x, insn))
1047 return 1;
1048 return 0;
1049 break;
1051 case REG:
1052 return reg_set_between_p (x, start, end);
1054 default:
1055 break;
1058 fmt = GET_RTX_FORMAT (code);
1059 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1061 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1062 return 1;
1064 else if (fmt[i] == 'E')
1065 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1066 if (modified_between_p (XVECEXP (x, i, j), start, end))
1067 return 1;
1070 return 0;
1073 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1074 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1075 does use memory aliasing. */
1078 modified_in_p (const_rtx x, const_rtx insn)
1080 const enum rtx_code code = GET_CODE (x);
1081 const char *fmt;
1082 int i, j;
1084 switch (code)
1086 CASE_CONST_ANY:
1087 case CONST:
1088 case SYMBOL_REF:
1089 case LABEL_REF:
1090 return 0;
1092 case PC:
1093 case CC0:
1094 return 1;
1096 case MEM:
1097 if (modified_in_p (XEXP (x, 0), insn))
1098 return 1;
1099 if (MEM_READONLY_P (x))
1100 return 0;
1101 if (memory_modified_in_insn_p (x, insn))
1102 return 1;
1103 return 0;
1104 break;
1106 case REG:
1107 return reg_set_p (x, insn);
1109 default:
1110 break;
1113 fmt = GET_RTX_FORMAT (code);
1114 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1116 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1117 return 1;
1119 else if (fmt[i] == 'E')
1120 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1121 if (modified_in_p (XVECEXP (x, i, j), insn))
1122 return 1;
1125 return 0;
1128 /* Helper function for set_of. */
1129 struct set_of_data
1131 const_rtx found;
1132 const_rtx pat;
1135 static void
1136 set_of_1 (rtx x, const_rtx pat, void *data1)
1138 struct set_of_data *const data = (struct set_of_data *) (data1);
1139 if (rtx_equal_p (x, data->pat)
1140 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1141 data->found = pat;
1144 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1145 (either directly or via STRICT_LOW_PART and similar modifiers). */
1146 const_rtx
1147 set_of (const_rtx pat, const_rtx insn)
1149 struct set_of_data data;
1150 data.found = NULL_RTX;
1151 data.pat = pat;
1152 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1153 return data.found;
1156 /* Add all hard register in X to *PSET. */
1157 void
1158 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1160 subrtx_iterator::array_type array;
1161 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1163 const_rtx x = *iter;
1164 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1165 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1169 /* This function, called through note_stores, collects sets and
1170 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1171 by DATA. */
1172 void
1173 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1175 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1176 if (REG_P (x) && HARD_REGISTER_P (x))
1177 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1180 /* Examine INSN, and compute the set of hard registers written by it.
1181 Store it in *PSET. Should only be called after reload. */
1182 void
1183 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset, bool implicit)
1185 rtx link;
1187 CLEAR_HARD_REG_SET (*pset);
1188 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1189 if (CALL_P (insn))
1191 if (implicit)
1192 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1194 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1195 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1197 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1198 if (REG_NOTE_KIND (link) == REG_INC)
1199 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1202 /* Like record_hard_reg_sets, but called through note_uses. */
1203 void
1204 record_hard_reg_uses (rtx *px, void *data)
1206 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1209 /* Given an INSN, return a SET expression if this insn has only a single SET.
1210 It may also have CLOBBERs, USEs, or SET whose output
1211 will not be used, which we ignore. */
1214 single_set_2 (const rtx_insn *insn, const_rtx pat)
1216 rtx set = NULL;
1217 int set_verified = 1;
1218 int i;
1220 if (GET_CODE (pat) == PARALLEL)
1222 for (i = 0; i < XVECLEN (pat, 0); i++)
1224 rtx sub = XVECEXP (pat, 0, i);
1225 switch (GET_CODE (sub))
1227 case USE:
1228 case CLOBBER:
1229 break;
1231 case SET:
1232 /* We can consider insns having multiple sets, where all
1233 but one are dead as single set insns. In common case
1234 only single set is present in the pattern so we want
1235 to avoid checking for REG_UNUSED notes unless necessary.
1237 When we reach set first time, we just expect this is
1238 the single set we are looking for and only when more
1239 sets are found in the insn, we check them. */
1240 if (!set_verified)
1242 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1243 && !side_effects_p (set))
1244 set = NULL;
1245 else
1246 set_verified = 1;
1248 if (!set)
1249 set = sub, set_verified = 0;
1250 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1251 || side_effects_p (sub))
1252 return NULL_RTX;
1253 break;
1255 default:
1256 return NULL_RTX;
1260 return set;
1263 /* Given an INSN, return nonzero if it has more than one SET, else return
1264 zero. */
1267 multiple_sets (const_rtx insn)
1269 int found;
1270 int i;
1272 /* INSN must be an insn. */
1273 if (! INSN_P (insn))
1274 return 0;
1276 /* Only a PARALLEL can have multiple SETs. */
1277 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1279 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1280 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1282 /* If we have already found a SET, then return now. */
1283 if (found)
1284 return 1;
1285 else
1286 found = 1;
1290 /* Either zero or one SET. */
1291 return 0;
1294 /* Return nonzero if the destination of SET equals the source
1295 and there are no side effects. */
1298 set_noop_p (const_rtx set)
1300 rtx src = SET_SRC (set);
1301 rtx dst = SET_DEST (set);
1303 if (dst == pc_rtx && src == pc_rtx)
1304 return 1;
1306 if (MEM_P (dst) && MEM_P (src))
1307 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1309 if (GET_CODE (dst) == ZERO_EXTRACT)
1310 return rtx_equal_p (XEXP (dst, 0), src)
1311 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1312 && !side_effects_p (src);
1314 if (GET_CODE (dst) == STRICT_LOW_PART)
1315 dst = XEXP (dst, 0);
1317 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1319 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1320 return 0;
1321 src = SUBREG_REG (src);
1322 dst = SUBREG_REG (dst);
1325 /* It is a NOOP if destination overlaps with selected src vector
1326 elements. */
1327 if (GET_CODE (src) == VEC_SELECT
1328 && REG_P (XEXP (src, 0)) && REG_P (dst)
1329 && HARD_REGISTER_P (XEXP (src, 0))
1330 && HARD_REGISTER_P (dst))
1332 int i;
1333 rtx par = XEXP (src, 1);
1334 rtx src0 = XEXP (src, 0);
1335 int c0 = INTVAL (XVECEXP (par, 0, 0));
1336 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1338 for (i = 1; i < XVECLEN (par, 0); i++)
1339 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1340 return 0;
1341 return
1342 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1343 offset, GET_MODE (dst)) == (int) REGNO (dst);
1346 return (REG_P (src) && REG_P (dst)
1347 && REGNO (src) == REGNO (dst));
1350 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1351 value to itself. */
1354 noop_move_p (const_rtx insn)
1356 rtx pat = PATTERN (insn);
1358 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1359 return 1;
1361 /* Insns carrying these notes are useful later on. */
1362 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1363 return 0;
1365 /* Check the code to be executed for COND_EXEC. */
1366 if (GET_CODE (pat) == COND_EXEC)
1367 pat = COND_EXEC_CODE (pat);
1369 if (GET_CODE (pat) == SET && set_noop_p (pat))
1370 return 1;
1372 if (GET_CODE (pat) == PARALLEL)
1374 int i;
1375 /* If nothing but SETs of registers to themselves,
1376 this insn can also be deleted. */
1377 for (i = 0; i < XVECLEN (pat, 0); i++)
1379 rtx tem = XVECEXP (pat, 0, i);
1381 if (GET_CODE (tem) == USE
1382 || GET_CODE (tem) == CLOBBER)
1383 continue;
1385 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1386 return 0;
1389 return 1;
1391 return 0;
1395 /* Return nonzero if register in range [REGNO, ENDREGNO)
1396 appears either explicitly or implicitly in X
1397 other than being stored into.
1399 References contained within the substructure at LOC do not count.
1400 LOC may be zero, meaning don't ignore anything. */
1403 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1404 rtx *loc)
1406 int i;
1407 unsigned int x_regno;
1408 RTX_CODE code;
1409 const char *fmt;
1411 repeat:
1412 /* The contents of a REG_NONNEG note is always zero, so we must come here
1413 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1414 if (x == 0)
1415 return 0;
1417 code = GET_CODE (x);
1419 switch (code)
1421 case REG:
1422 x_regno = REGNO (x);
1424 /* If we modifying the stack, frame, or argument pointer, it will
1425 clobber a virtual register. In fact, we could be more precise,
1426 but it isn't worth it. */
1427 if ((x_regno == STACK_POINTER_REGNUM
1428 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1429 || x_regno == ARG_POINTER_REGNUM
1430 #endif
1431 || x_regno == FRAME_POINTER_REGNUM)
1432 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1433 return 1;
1435 return endregno > x_regno && regno < END_REGNO (x);
1437 case SUBREG:
1438 /* If this is a SUBREG of a hard reg, we can see exactly which
1439 registers are being modified. Otherwise, handle normally. */
1440 if (REG_P (SUBREG_REG (x))
1441 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1443 unsigned int inner_regno = subreg_regno (x);
1444 unsigned int inner_endregno
1445 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1446 ? subreg_nregs (x) : 1);
1448 return endregno > inner_regno && regno < inner_endregno;
1450 break;
1452 case CLOBBER:
1453 case SET:
1454 if (&SET_DEST (x) != loc
1455 /* Note setting a SUBREG counts as referring to the REG it is in for
1456 a pseudo but not for hard registers since we can
1457 treat each word individually. */
1458 && ((GET_CODE (SET_DEST (x)) == SUBREG
1459 && loc != &SUBREG_REG (SET_DEST (x))
1460 && REG_P (SUBREG_REG (SET_DEST (x)))
1461 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1462 && refers_to_regno_p (regno, endregno,
1463 SUBREG_REG (SET_DEST (x)), loc))
1464 || (!REG_P (SET_DEST (x))
1465 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1466 return 1;
1468 if (code == CLOBBER || loc == &SET_SRC (x))
1469 return 0;
1470 x = SET_SRC (x);
1471 goto repeat;
1473 default:
1474 break;
1477 /* X does not match, so try its subexpressions. */
1479 fmt = GET_RTX_FORMAT (code);
1480 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1482 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1484 if (i == 0)
1486 x = XEXP (x, 0);
1487 goto repeat;
1489 else
1490 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1491 return 1;
1493 else if (fmt[i] == 'E')
1495 int j;
1496 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1497 if (loc != &XVECEXP (x, i, j)
1498 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1499 return 1;
1502 return 0;
1505 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1506 we check if any register number in X conflicts with the relevant register
1507 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1508 contains a MEM (we don't bother checking for memory addresses that can't
1509 conflict because we expect this to be a rare case. */
1512 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1514 unsigned int regno, endregno;
1516 /* If either argument is a constant, then modifying X can not
1517 affect IN. Here we look at IN, we can profitably combine
1518 CONSTANT_P (x) with the switch statement below. */
1519 if (CONSTANT_P (in))
1520 return 0;
1522 recurse:
1523 switch (GET_CODE (x))
1525 case STRICT_LOW_PART:
1526 case ZERO_EXTRACT:
1527 case SIGN_EXTRACT:
1528 /* Overly conservative. */
1529 x = XEXP (x, 0);
1530 goto recurse;
1532 case SUBREG:
1533 regno = REGNO (SUBREG_REG (x));
1534 if (regno < FIRST_PSEUDO_REGISTER)
1535 regno = subreg_regno (x);
1536 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1537 ? subreg_nregs (x) : 1);
1538 goto do_reg;
1540 case REG:
1541 regno = REGNO (x);
1542 endregno = END_REGNO (x);
1543 do_reg:
1544 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1546 case MEM:
1548 const char *fmt;
1549 int i;
1551 if (MEM_P (in))
1552 return 1;
1554 fmt = GET_RTX_FORMAT (GET_CODE (in));
1555 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1556 if (fmt[i] == 'e')
1558 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1559 return 1;
1561 else if (fmt[i] == 'E')
1563 int j;
1564 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1565 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1566 return 1;
1569 return 0;
1572 case SCRATCH:
1573 case PC:
1574 case CC0:
1575 return reg_mentioned_p (x, in);
1577 case PARALLEL:
1579 int i;
1581 /* If any register in here refers to it we return true. */
1582 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1583 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1584 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1585 return 1;
1586 return 0;
1589 default:
1590 gcc_assert (CONSTANT_P (x));
1591 return 0;
1595 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1596 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1597 ignored by note_stores, but passed to FUN.
1599 FUN receives three arguments:
1600 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1601 2. the SET or CLOBBER rtx that does the store,
1602 3. the pointer DATA provided to note_stores.
1604 If the item being stored in or clobbered is a SUBREG of a hard register,
1605 the SUBREG will be passed. */
1607 void
1608 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1610 int i;
1612 if (GET_CODE (x) == COND_EXEC)
1613 x = COND_EXEC_CODE (x);
1615 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1617 rtx dest = SET_DEST (x);
1619 while ((GET_CODE (dest) == SUBREG
1620 && (!REG_P (SUBREG_REG (dest))
1621 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1622 || GET_CODE (dest) == ZERO_EXTRACT
1623 || GET_CODE (dest) == STRICT_LOW_PART)
1624 dest = XEXP (dest, 0);
1626 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1627 each of whose first operand is a register. */
1628 if (GET_CODE (dest) == PARALLEL)
1630 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1631 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1632 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1634 else
1635 (*fun) (dest, x, data);
1638 else if (GET_CODE (x) == PARALLEL)
1639 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1640 note_stores (XVECEXP (x, 0, i), fun, data);
1643 /* Like notes_stores, but call FUN for each expression that is being
1644 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1645 FUN for each expression, not any interior subexpressions. FUN receives a
1646 pointer to the expression and the DATA passed to this function.
1648 Note that this is not quite the same test as that done in reg_referenced_p
1649 since that considers something as being referenced if it is being
1650 partially set, while we do not. */
1652 void
1653 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1655 rtx body = *pbody;
1656 int i;
1658 switch (GET_CODE (body))
1660 case COND_EXEC:
1661 (*fun) (&COND_EXEC_TEST (body), data);
1662 note_uses (&COND_EXEC_CODE (body), fun, data);
1663 return;
1665 case PARALLEL:
1666 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1667 note_uses (&XVECEXP (body, 0, i), fun, data);
1668 return;
1670 case SEQUENCE:
1671 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1672 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1673 return;
1675 case USE:
1676 (*fun) (&XEXP (body, 0), data);
1677 return;
1679 case ASM_OPERANDS:
1680 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1681 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1682 return;
1684 case TRAP_IF:
1685 (*fun) (&TRAP_CONDITION (body), data);
1686 return;
1688 case PREFETCH:
1689 (*fun) (&XEXP (body, 0), data);
1690 return;
1692 case UNSPEC:
1693 case UNSPEC_VOLATILE:
1694 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1695 (*fun) (&XVECEXP (body, 0, i), data);
1696 return;
1698 case CLOBBER:
1699 if (MEM_P (XEXP (body, 0)))
1700 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1701 return;
1703 case SET:
1705 rtx dest = SET_DEST (body);
1707 /* For sets we replace everything in source plus registers in memory
1708 expression in store and operands of a ZERO_EXTRACT. */
1709 (*fun) (&SET_SRC (body), data);
1711 if (GET_CODE (dest) == ZERO_EXTRACT)
1713 (*fun) (&XEXP (dest, 1), data);
1714 (*fun) (&XEXP (dest, 2), data);
1717 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1718 dest = XEXP (dest, 0);
1720 if (MEM_P (dest))
1721 (*fun) (&XEXP (dest, 0), data);
1723 return;
1725 default:
1726 /* All the other possibilities never store. */
1727 (*fun) (pbody, data);
1728 return;
1732 /* Return nonzero if X's old contents don't survive after INSN.
1733 This will be true if X is (cc0) or if X is a register and
1734 X dies in INSN or because INSN entirely sets X.
1736 "Entirely set" means set directly and not through a SUBREG, or
1737 ZERO_EXTRACT, so no trace of the old contents remains.
1738 Likewise, REG_INC does not count.
1740 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1741 but for this use that makes no difference, since regs don't overlap
1742 during their lifetimes. Therefore, this function may be used
1743 at any time after deaths have been computed.
1745 If REG is a hard reg that occupies multiple machine registers, this
1746 function will only return 1 if each of those registers will be replaced
1747 by INSN. */
1750 dead_or_set_p (const_rtx insn, const_rtx x)
1752 unsigned int regno, end_regno;
1753 unsigned int i;
1755 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1756 if (GET_CODE (x) == CC0)
1757 return 1;
1759 gcc_assert (REG_P (x));
1761 regno = REGNO (x);
1762 end_regno = END_REGNO (x);
1763 for (i = regno; i < end_regno; i++)
1764 if (! dead_or_set_regno_p (insn, i))
1765 return 0;
1767 return 1;
1770 /* Return TRUE iff DEST is a register or subreg of a register and
1771 doesn't change the number of words of the inner register, and any
1772 part of the register is TEST_REGNO. */
1774 static bool
1775 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1777 unsigned int regno, endregno;
1779 if (GET_CODE (dest) == SUBREG
1780 && (((GET_MODE_SIZE (GET_MODE (dest))
1781 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1782 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1783 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1784 dest = SUBREG_REG (dest);
1786 if (!REG_P (dest))
1787 return false;
1789 regno = REGNO (dest);
1790 endregno = END_REGNO (dest);
1791 return (test_regno >= regno && test_regno < endregno);
1794 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1795 any member matches the covers_regno_no_parallel_p criteria. */
1797 static bool
1798 covers_regno_p (const_rtx dest, unsigned int test_regno)
1800 if (GET_CODE (dest) == PARALLEL)
1802 /* Some targets place small structures in registers for return
1803 values of functions, and those registers are wrapped in
1804 PARALLELs that we may see as the destination of a SET. */
1805 int i;
1807 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1809 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1810 if (inner != NULL_RTX
1811 && covers_regno_no_parallel_p (inner, test_regno))
1812 return true;
1815 return false;
1817 else
1818 return covers_regno_no_parallel_p (dest, test_regno);
1821 /* Utility function for dead_or_set_p to check an individual register. */
1824 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1826 const_rtx pattern;
1828 /* See if there is a death note for something that includes TEST_REGNO. */
1829 if (find_regno_note (insn, REG_DEAD, test_regno))
1830 return 1;
1832 if (CALL_P (insn)
1833 && find_regno_fusage (insn, CLOBBER, test_regno))
1834 return 1;
1836 pattern = PATTERN (insn);
1838 /* If a COND_EXEC is not executed, the value survives. */
1839 if (GET_CODE (pattern) == COND_EXEC)
1840 return 0;
1842 if (GET_CODE (pattern) == SET)
1843 return covers_regno_p (SET_DEST (pattern), test_regno);
1844 else if (GET_CODE (pattern) == PARALLEL)
1846 int i;
1848 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1850 rtx body = XVECEXP (pattern, 0, i);
1852 if (GET_CODE (body) == COND_EXEC)
1853 body = COND_EXEC_CODE (body);
1855 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1856 && covers_regno_p (SET_DEST (body), test_regno))
1857 return 1;
1861 return 0;
1864 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1865 If DATUM is nonzero, look for one whose datum is DATUM. */
1868 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1870 rtx link;
1872 gcc_checking_assert (insn);
1874 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1875 if (! INSN_P (insn))
1876 return 0;
1877 if (datum == 0)
1879 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1880 if (REG_NOTE_KIND (link) == kind)
1881 return link;
1882 return 0;
1885 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1886 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1887 return link;
1888 return 0;
1891 /* Return the reg-note of kind KIND in insn INSN which applies to register
1892 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1893 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1894 it might be the case that the note overlaps REGNO. */
1897 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1899 rtx link;
1901 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1902 if (! INSN_P (insn))
1903 return 0;
1905 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1906 if (REG_NOTE_KIND (link) == kind
1907 /* Verify that it is a register, so that scratch and MEM won't cause a
1908 problem here. */
1909 && REG_P (XEXP (link, 0))
1910 && REGNO (XEXP (link, 0)) <= regno
1911 && END_REGNO (XEXP (link, 0)) > regno)
1912 return link;
1913 return 0;
1916 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1917 has such a note. */
1920 find_reg_equal_equiv_note (const_rtx insn)
1922 rtx link;
1924 if (!INSN_P (insn))
1925 return 0;
1927 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1928 if (REG_NOTE_KIND (link) == REG_EQUAL
1929 || REG_NOTE_KIND (link) == REG_EQUIV)
1931 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1932 insns that have multiple sets. Checking single_set to
1933 make sure of this is not the proper check, as explained
1934 in the comment in set_unique_reg_note.
1936 This should be changed into an assert. */
1937 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1938 return 0;
1939 return link;
1941 return NULL;
1944 /* Check whether INSN is a single_set whose source is known to be
1945 equivalent to a constant. Return that constant if so, otherwise
1946 return null. */
1949 find_constant_src (const rtx_insn *insn)
1951 rtx note, set, x;
1953 set = single_set (insn);
1954 if (set)
1956 x = avoid_constant_pool_reference (SET_SRC (set));
1957 if (CONSTANT_P (x))
1958 return x;
1961 note = find_reg_equal_equiv_note (insn);
1962 if (note && CONSTANT_P (XEXP (note, 0)))
1963 return XEXP (note, 0);
1965 return NULL_RTX;
1968 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1969 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1972 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1974 /* If it's not a CALL_INSN, it can't possibly have a
1975 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1976 if (!CALL_P (insn))
1977 return 0;
1979 gcc_assert (datum);
1981 if (!REG_P (datum))
1983 rtx link;
1985 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1986 link;
1987 link = XEXP (link, 1))
1988 if (GET_CODE (XEXP (link, 0)) == code
1989 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1990 return 1;
1992 else
1994 unsigned int regno = REGNO (datum);
1996 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1997 to pseudo registers, so don't bother checking. */
1999 if (regno < FIRST_PSEUDO_REGISTER)
2001 unsigned int end_regno = END_HARD_REGNO (datum);
2002 unsigned int i;
2004 for (i = regno; i < end_regno; i++)
2005 if (find_regno_fusage (insn, code, i))
2006 return 1;
2010 return 0;
2013 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2014 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2017 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2019 rtx link;
2021 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2022 to pseudo registers, so don't bother checking. */
2024 if (regno >= FIRST_PSEUDO_REGISTER
2025 || !CALL_P (insn) )
2026 return 0;
2028 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2030 rtx op, reg;
2032 if (GET_CODE (op = XEXP (link, 0)) == code
2033 && REG_P (reg = XEXP (op, 0))
2034 && REGNO (reg) <= regno
2035 && END_HARD_REGNO (reg) > regno)
2036 return 1;
2039 return 0;
2043 /* Return true if KIND is an integer REG_NOTE. */
2045 static bool
2046 int_reg_note_p (enum reg_note kind)
2048 return kind == REG_BR_PROB;
2051 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2052 stored as the pointer to the next register note. */
2055 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2057 rtx note;
2059 gcc_checking_assert (!int_reg_note_p (kind));
2060 switch (kind)
2062 case REG_CC_SETTER:
2063 case REG_CC_USER:
2064 case REG_LABEL_TARGET:
2065 case REG_LABEL_OPERAND:
2066 case REG_TM:
2067 /* These types of register notes use an INSN_LIST rather than an
2068 EXPR_LIST, so that copying is done right and dumps look
2069 better. */
2070 note = alloc_INSN_LIST (datum, list);
2071 PUT_REG_NOTE_KIND (note, kind);
2072 break;
2074 default:
2075 note = alloc_EXPR_LIST (kind, datum, list);
2076 break;
2079 return note;
2082 /* Add register note with kind KIND and datum DATUM to INSN. */
2084 void
2085 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2087 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2090 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2092 void
2093 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2095 gcc_checking_assert (int_reg_note_p (kind));
2096 REG_NOTES (insn) = gen_rtx_INT_LIST ((enum machine_mode) kind,
2097 datum, REG_NOTES (insn));
2100 /* Add a register note like NOTE to INSN. */
2102 void
2103 add_shallow_copy_of_reg_note (rtx insn, rtx note)
2105 if (GET_CODE (note) == INT_LIST)
2106 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2107 else
2108 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2111 /* Remove register note NOTE from the REG_NOTES of INSN. */
2113 void
2114 remove_note (rtx insn, const_rtx note)
2116 rtx link;
2118 if (note == NULL_RTX)
2119 return;
2121 if (REG_NOTES (insn) == note)
2122 REG_NOTES (insn) = XEXP (note, 1);
2123 else
2124 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2125 if (XEXP (link, 1) == note)
2127 XEXP (link, 1) = XEXP (note, 1);
2128 break;
2131 switch (REG_NOTE_KIND (note))
2133 case REG_EQUAL:
2134 case REG_EQUIV:
2135 df_notes_rescan (as_a <rtx_insn *> (insn));
2136 break;
2137 default:
2138 break;
2142 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2144 void
2145 remove_reg_equal_equiv_notes (rtx insn)
2147 rtx *loc;
2149 loc = &REG_NOTES (insn);
2150 while (*loc)
2152 enum reg_note kind = REG_NOTE_KIND (*loc);
2153 if (kind == REG_EQUAL || kind == REG_EQUIV)
2154 *loc = XEXP (*loc, 1);
2155 else
2156 loc = &XEXP (*loc, 1);
2160 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2162 void
2163 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2165 df_ref eq_use;
2167 if (!df)
2168 return;
2170 /* This loop is a little tricky. We cannot just go down the chain because
2171 it is being modified by some actions in the loop. So we just iterate
2172 over the head. We plan to drain the list anyway. */
2173 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2175 rtx_insn *insn = DF_REF_INSN (eq_use);
2176 rtx note = find_reg_equal_equiv_note (insn);
2178 /* This assert is generally triggered when someone deletes a REG_EQUAL
2179 or REG_EQUIV note by hacking the list manually rather than calling
2180 remove_note. */
2181 gcc_assert (note);
2183 remove_note (insn, note);
2187 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2188 return 1 if it is found. A simple equality test is used to determine if
2189 NODE matches. */
2192 in_expr_list_p (const_rtx listp, const_rtx node)
2194 const_rtx x;
2196 for (x = listp; x; x = XEXP (x, 1))
2197 if (node == XEXP (x, 0))
2198 return 1;
2200 return 0;
2203 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2204 remove that entry from the list if it is found.
2206 A simple equality test is used to determine if NODE matches. */
2208 void
2209 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2211 rtx_expr_list *temp = *listp;
2212 rtx prev = NULL_RTX;
2214 while (temp)
2216 if (node == temp->element ())
2218 /* Splice the node out of the list. */
2219 if (prev)
2220 XEXP (prev, 1) = temp->next ();
2221 else
2222 *listp = temp->next ();
2224 return;
2227 prev = temp;
2228 temp = temp->next ();
2232 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2233 remove that entry from the list if it is found.
2235 A simple equality test is used to determine if NODE matches. */
2237 void
2238 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2240 rtx_insn_list *temp = *listp;
2241 rtx prev = NULL;
2243 while (temp)
2245 if (node == temp->insn ())
2247 /* Splice the node out of the list. */
2248 if (prev)
2249 XEXP (prev, 1) = temp->next ();
2250 else
2251 *listp = temp->next ();
2253 return;
2256 prev = temp;
2257 temp = temp->next ();
2261 /* Nonzero if X contains any volatile instructions. These are instructions
2262 which may cause unpredictable machine state instructions, and thus no
2263 instructions or register uses should be moved or combined across them.
2264 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2267 volatile_insn_p (const_rtx x)
2269 const RTX_CODE code = GET_CODE (x);
2270 switch (code)
2272 case LABEL_REF:
2273 case SYMBOL_REF:
2274 case CONST:
2275 CASE_CONST_ANY:
2276 case CC0:
2277 case PC:
2278 case REG:
2279 case SCRATCH:
2280 case CLOBBER:
2281 case ADDR_VEC:
2282 case ADDR_DIFF_VEC:
2283 case CALL:
2284 case MEM:
2285 return 0;
2287 case UNSPEC_VOLATILE:
2288 return 1;
2290 case ASM_INPUT:
2291 case ASM_OPERANDS:
2292 if (MEM_VOLATILE_P (x))
2293 return 1;
2295 default:
2296 break;
2299 /* Recursively scan the operands of this expression. */
2302 const char *const fmt = GET_RTX_FORMAT (code);
2303 int i;
2305 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2307 if (fmt[i] == 'e')
2309 if (volatile_insn_p (XEXP (x, i)))
2310 return 1;
2312 else if (fmt[i] == 'E')
2314 int j;
2315 for (j = 0; j < XVECLEN (x, i); j++)
2316 if (volatile_insn_p (XVECEXP (x, i, j)))
2317 return 1;
2321 return 0;
2324 /* Nonzero if X contains any volatile memory references
2325 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2328 volatile_refs_p (const_rtx x)
2330 const RTX_CODE code = GET_CODE (x);
2331 switch (code)
2333 case LABEL_REF:
2334 case SYMBOL_REF:
2335 case CONST:
2336 CASE_CONST_ANY:
2337 case CC0:
2338 case PC:
2339 case REG:
2340 case SCRATCH:
2341 case CLOBBER:
2342 case ADDR_VEC:
2343 case ADDR_DIFF_VEC:
2344 return 0;
2346 case UNSPEC_VOLATILE:
2347 return 1;
2349 case MEM:
2350 case ASM_INPUT:
2351 case ASM_OPERANDS:
2352 if (MEM_VOLATILE_P (x))
2353 return 1;
2355 default:
2356 break;
2359 /* Recursively scan the operands of this expression. */
2362 const char *const fmt = GET_RTX_FORMAT (code);
2363 int i;
2365 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2367 if (fmt[i] == 'e')
2369 if (volatile_refs_p (XEXP (x, i)))
2370 return 1;
2372 else if (fmt[i] == 'E')
2374 int j;
2375 for (j = 0; j < XVECLEN (x, i); j++)
2376 if (volatile_refs_p (XVECEXP (x, i, j)))
2377 return 1;
2381 return 0;
2384 /* Similar to above, except that it also rejects register pre- and post-
2385 incrementing. */
2388 side_effects_p (const_rtx x)
2390 const RTX_CODE code = GET_CODE (x);
2391 switch (code)
2393 case LABEL_REF:
2394 case SYMBOL_REF:
2395 case CONST:
2396 CASE_CONST_ANY:
2397 case CC0:
2398 case PC:
2399 case REG:
2400 case SCRATCH:
2401 case ADDR_VEC:
2402 case ADDR_DIFF_VEC:
2403 case VAR_LOCATION:
2404 return 0;
2406 case CLOBBER:
2407 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2408 when some combination can't be done. If we see one, don't think
2409 that we can simplify the expression. */
2410 return (GET_MODE (x) != VOIDmode);
2412 case PRE_INC:
2413 case PRE_DEC:
2414 case POST_INC:
2415 case POST_DEC:
2416 case PRE_MODIFY:
2417 case POST_MODIFY:
2418 case CALL:
2419 case UNSPEC_VOLATILE:
2420 return 1;
2422 case MEM:
2423 case ASM_INPUT:
2424 case ASM_OPERANDS:
2425 if (MEM_VOLATILE_P (x))
2426 return 1;
2428 default:
2429 break;
2432 /* Recursively scan the operands of this expression. */
2435 const char *fmt = GET_RTX_FORMAT (code);
2436 int i;
2438 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2440 if (fmt[i] == 'e')
2442 if (side_effects_p (XEXP (x, i)))
2443 return 1;
2445 else if (fmt[i] == 'E')
2447 int j;
2448 for (j = 0; j < XVECLEN (x, i); j++)
2449 if (side_effects_p (XVECEXP (x, i, j)))
2450 return 1;
2454 return 0;
2457 /* Return nonzero if evaluating rtx X might cause a trap.
2458 FLAGS controls how to consider MEMs. A nonzero means the context
2459 of the access may have changed from the original, such that the
2460 address may have become invalid. */
2463 may_trap_p_1 (const_rtx x, unsigned flags)
2465 int i;
2466 enum rtx_code code;
2467 const char *fmt;
2469 /* We make no distinction currently, but this function is part of
2470 the internal target-hooks ABI so we keep the parameter as
2471 "unsigned flags". */
2472 bool code_changed = flags != 0;
2474 if (x == 0)
2475 return 0;
2476 code = GET_CODE (x);
2477 switch (code)
2479 /* Handle these cases quickly. */
2480 CASE_CONST_ANY:
2481 case SYMBOL_REF:
2482 case LABEL_REF:
2483 case CONST:
2484 case PC:
2485 case CC0:
2486 case REG:
2487 case SCRATCH:
2488 return 0;
2490 case UNSPEC:
2491 return targetm.unspec_may_trap_p (x, flags);
2493 case UNSPEC_VOLATILE:
2494 case ASM_INPUT:
2495 case TRAP_IF:
2496 return 1;
2498 case ASM_OPERANDS:
2499 return MEM_VOLATILE_P (x);
2501 /* Memory ref can trap unless it's a static var or a stack slot. */
2502 case MEM:
2503 /* Recognize specific pattern of stack checking probes. */
2504 if (flag_stack_check
2505 && MEM_VOLATILE_P (x)
2506 && XEXP (x, 0) == stack_pointer_rtx)
2507 return 1;
2508 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2509 reference; moving it out of context such as when moving code
2510 when optimizing, might cause its address to become invalid. */
2511 code_changed
2512 || !MEM_NOTRAP_P (x))
2514 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2515 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2516 GET_MODE (x), code_changed);
2519 return 0;
2521 /* Division by a non-constant might trap. */
2522 case DIV:
2523 case MOD:
2524 case UDIV:
2525 case UMOD:
2526 if (HONOR_SNANS (GET_MODE (x)))
2527 return 1;
2528 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2529 return flag_trapping_math;
2530 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2531 return 1;
2532 break;
2534 case EXPR_LIST:
2535 /* An EXPR_LIST is used to represent a function call. This
2536 certainly may trap. */
2537 return 1;
2539 case GE:
2540 case GT:
2541 case LE:
2542 case LT:
2543 case LTGT:
2544 case COMPARE:
2545 /* Some floating point comparisons may trap. */
2546 if (!flag_trapping_math)
2547 break;
2548 /* ??? There is no machine independent way to check for tests that trap
2549 when COMPARE is used, though many targets do make this distinction.
2550 For instance, sparc uses CCFPE for compares which generate exceptions
2551 and CCFP for compares which do not generate exceptions. */
2552 if (HONOR_NANS (GET_MODE (x)))
2553 return 1;
2554 /* But often the compare has some CC mode, so check operand
2555 modes as well. */
2556 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2557 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2558 return 1;
2559 break;
2561 case EQ:
2562 case NE:
2563 if (HONOR_SNANS (GET_MODE (x)))
2564 return 1;
2565 /* Often comparison is CC mode, so check operand modes. */
2566 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2567 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2568 return 1;
2569 break;
2571 case FIX:
2572 /* Conversion of floating point might trap. */
2573 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2574 return 1;
2575 break;
2577 case NEG:
2578 case ABS:
2579 case SUBREG:
2580 /* These operations don't trap even with floating point. */
2581 break;
2583 default:
2584 /* Any floating arithmetic may trap. */
2585 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2586 return 1;
2589 fmt = GET_RTX_FORMAT (code);
2590 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2592 if (fmt[i] == 'e')
2594 if (may_trap_p_1 (XEXP (x, i), flags))
2595 return 1;
2597 else if (fmt[i] == 'E')
2599 int j;
2600 for (j = 0; j < XVECLEN (x, i); j++)
2601 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2602 return 1;
2605 return 0;
2608 /* Return nonzero if evaluating rtx X might cause a trap. */
2611 may_trap_p (const_rtx x)
2613 return may_trap_p_1 (x, 0);
2616 /* Same as above, but additionally return nonzero if evaluating rtx X might
2617 cause a fault. We define a fault for the purpose of this function as a
2618 erroneous execution condition that cannot be encountered during the normal
2619 execution of a valid program; the typical example is an unaligned memory
2620 access on a strict alignment machine. The compiler guarantees that it
2621 doesn't generate code that will fault from a valid program, but this
2622 guarantee doesn't mean anything for individual instructions. Consider
2623 the following example:
2625 struct S { int d; union { char *cp; int *ip; }; };
2627 int foo(struct S *s)
2629 if (s->d == 1)
2630 return *s->ip;
2631 else
2632 return *s->cp;
2635 on a strict alignment machine. In a valid program, foo will never be
2636 invoked on a structure for which d is equal to 1 and the underlying
2637 unique field of the union not aligned on a 4-byte boundary, but the
2638 expression *s->ip might cause a fault if considered individually.
2640 At the RTL level, potentially problematic expressions will almost always
2641 verify may_trap_p; for example, the above dereference can be emitted as
2642 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2643 However, suppose that foo is inlined in a caller that causes s->cp to
2644 point to a local character variable and guarantees that s->d is not set
2645 to 1; foo may have been effectively translated into pseudo-RTL as:
2647 if ((reg:SI) == 1)
2648 (set (reg:SI) (mem:SI (%fp - 7)))
2649 else
2650 (set (reg:QI) (mem:QI (%fp - 7)))
2652 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2653 memory reference to a stack slot, but it will certainly cause a fault
2654 on a strict alignment machine. */
2657 may_trap_or_fault_p (const_rtx x)
2659 return may_trap_p_1 (x, 1);
2662 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2663 i.e., an inequality. */
2666 inequality_comparisons_p (const_rtx x)
2668 const char *fmt;
2669 int len, i;
2670 const enum rtx_code code = GET_CODE (x);
2672 switch (code)
2674 case REG:
2675 case SCRATCH:
2676 case PC:
2677 case CC0:
2678 CASE_CONST_ANY:
2679 case CONST:
2680 case LABEL_REF:
2681 case SYMBOL_REF:
2682 return 0;
2684 case LT:
2685 case LTU:
2686 case GT:
2687 case GTU:
2688 case LE:
2689 case LEU:
2690 case GE:
2691 case GEU:
2692 return 1;
2694 default:
2695 break;
2698 len = GET_RTX_LENGTH (code);
2699 fmt = GET_RTX_FORMAT (code);
2701 for (i = 0; i < len; i++)
2703 if (fmt[i] == 'e')
2705 if (inequality_comparisons_p (XEXP (x, i)))
2706 return 1;
2708 else if (fmt[i] == 'E')
2710 int j;
2711 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2712 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2713 return 1;
2717 return 0;
2720 /* Replace any occurrence of FROM in X with TO. The function does
2721 not enter into CONST_DOUBLE for the replace.
2723 Note that copying is not done so X must not be shared unless all copies
2724 are to be modified. */
2727 replace_rtx (rtx x, rtx from, rtx to)
2729 int i, j;
2730 const char *fmt;
2732 if (x == from)
2733 return to;
2735 /* Allow this function to make replacements in EXPR_LISTs. */
2736 if (x == 0)
2737 return 0;
2739 if (GET_CODE (x) == SUBREG)
2741 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2743 if (CONST_INT_P (new_rtx))
2745 x = simplify_subreg (GET_MODE (x), new_rtx,
2746 GET_MODE (SUBREG_REG (x)),
2747 SUBREG_BYTE (x));
2748 gcc_assert (x);
2750 else
2751 SUBREG_REG (x) = new_rtx;
2753 return x;
2755 else if (GET_CODE (x) == ZERO_EXTEND)
2757 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2759 if (CONST_INT_P (new_rtx))
2761 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2762 new_rtx, GET_MODE (XEXP (x, 0)));
2763 gcc_assert (x);
2765 else
2766 XEXP (x, 0) = new_rtx;
2768 return x;
2771 fmt = GET_RTX_FORMAT (GET_CODE (x));
2772 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2774 if (fmt[i] == 'e')
2775 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2776 else if (fmt[i] == 'E')
2777 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2778 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2781 return x;
2784 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2785 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2787 void
2788 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
2790 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2791 rtx x = *loc;
2792 if (JUMP_TABLE_DATA_P (x))
2794 x = PATTERN (x);
2795 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
2796 int len = GET_NUM_ELEM (vec);
2797 for (int i = 0; i < len; ++i)
2799 rtx ref = RTVEC_ELT (vec, i);
2800 if (XEXP (ref, 0) == old_label)
2802 XEXP (ref, 0) = new_label;
2803 if (update_label_nuses)
2805 ++LABEL_NUSES (new_label);
2806 --LABEL_NUSES (old_label);
2810 return;
2813 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2814 field. This is not handled by the iterator because it doesn't
2815 handle unprinted ('0') fields. */
2816 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
2817 JUMP_LABEL (x) = new_label;
2819 subrtx_ptr_iterator::array_type array;
2820 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
2822 rtx *loc = *iter;
2823 if (rtx x = *loc)
2825 if (GET_CODE (x) == SYMBOL_REF
2826 && CONSTANT_POOL_ADDRESS_P (x))
2828 rtx c = get_pool_constant (x);
2829 if (rtx_referenced_p (old_label, c))
2831 /* Create a copy of constant C; replace the label inside
2832 but do not update LABEL_NUSES because uses in constant pool
2833 are not counted. */
2834 rtx new_c = copy_rtx (c);
2835 replace_label (&new_c, old_label, new_label, false);
2837 /* Add the new constant NEW_C to constant pool and replace
2838 the old reference to constant by new reference. */
2839 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
2840 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
2844 if ((GET_CODE (x) == LABEL_REF
2845 || GET_CODE (x) == INSN_LIST)
2846 && XEXP (x, 0) == old_label)
2848 XEXP (x, 0) = new_label;
2849 if (update_label_nuses)
2851 ++LABEL_NUSES (new_label);
2852 --LABEL_NUSES (old_label);
2859 void
2860 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
2861 bool update_label_nuses)
2863 rtx insn_as_rtx = insn;
2864 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
2865 gcc_checking_assert (insn_as_rtx == insn);
2868 /* Return true if X is referenced in BODY. */
2870 bool
2871 rtx_referenced_p (const_rtx x, const_rtx body)
2873 subrtx_iterator::array_type array;
2874 FOR_EACH_SUBRTX (iter, array, body, ALL)
2875 if (const_rtx y = *iter)
2877 /* Check if a label_ref Y refers to label X. */
2878 if (GET_CODE (y) == LABEL_REF
2879 && LABEL_P (x)
2880 && LABEL_REF_LABEL (y) == x)
2881 return true;
2883 if (rtx_equal_p (x, y))
2884 return true;
2886 /* If Y is a reference to pool constant traverse the constant. */
2887 if (GET_CODE (y) == SYMBOL_REF
2888 && CONSTANT_POOL_ADDRESS_P (y))
2889 iter.substitute (get_pool_constant (y));
2891 return false;
2894 /* If INSN is a tablejump return true and store the label (before jump table) to
2895 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2897 bool
2898 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
2900 rtx label, table;
2902 if (!JUMP_P (insn))
2903 return false;
2905 label = JUMP_LABEL (insn);
2906 if (label != NULL_RTX && !ANY_RETURN_P (label)
2907 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
2908 && JUMP_TABLE_DATA_P (table))
2910 if (labelp)
2911 *labelp = label;
2912 if (tablep)
2913 *tablep = as_a <rtx_jump_table_data *> (table);
2914 return true;
2916 return false;
2919 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2920 constant that is not in the constant pool and not in the condition
2921 of an IF_THEN_ELSE. */
2923 static int
2924 computed_jump_p_1 (const_rtx x)
2926 const enum rtx_code code = GET_CODE (x);
2927 int i, j;
2928 const char *fmt;
2930 switch (code)
2932 case LABEL_REF:
2933 case PC:
2934 return 0;
2936 case CONST:
2937 CASE_CONST_ANY:
2938 case SYMBOL_REF:
2939 case REG:
2940 return 1;
2942 case MEM:
2943 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2944 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2946 case IF_THEN_ELSE:
2947 return (computed_jump_p_1 (XEXP (x, 1))
2948 || computed_jump_p_1 (XEXP (x, 2)));
2950 default:
2951 break;
2954 fmt = GET_RTX_FORMAT (code);
2955 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2957 if (fmt[i] == 'e'
2958 && computed_jump_p_1 (XEXP (x, i)))
2959 return 1;
2961 else if (fmt[i] == 'E')
2962 for (j = 0; j < XVECLEN (x, i); j++)
2963 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2964 return 1;
2967 return 0;
2970 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2972 Tablejumps and casesi insns are not considered indirect jumps;
2973 we can recognize them by a (use (label_ref)). */
2976 computed_jump_p (const_rtx insn)
2978 int i;
2979 if (JUMP_P (insn))
2981 rtx pat = PATTERN (insn);
2983 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2984 if (JUMP_LABEL (insn) != NULL)
2985 return 0;
2987 if (GET_CODE (pat) == PARALLEL)
2989 int len = XVECLEN (pat, 0);
2990 int has_use_labelref = 0;
2992 for (i = len - 1; i >= 0; i--)
2993 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2994 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2995 == LABEL_REF))
2997 has_use_labelref = 1;
2998 break;
3001 if (! has_use_labelref)
3002 for (i = len - 1; i >= 0; i--)
3003 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3004 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3005 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3006 return 1;
3008 else if (GET_CODE (pat) == SET
3009 && SET_DEST (pat) == pc_rtx
3010 && computed_jump_p_1 (SET_SRC (pat)))
3011 return 1;
3013 return 0;
3016 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
3017 calls. Processes the subexpressions of EXP and passes them to F. */
3018 static int
3019 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
3021 int result, i, j;
3022 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
3023 rtx *x;
3025 for (; format[n] != '\0'; n++)
3027 switch (format[n])
3029 case 'e':
3030 /* Call F on X. */
3031 x = &XEXP (exp, n);
3032 result = (*f) (x, data);
3033 if (result == -1)
3034 /* Do not traverse sub-expressions. */
3035 continue;
3036 else if (result != 0)
3037 /* Stop the traversal. */
3038 return result;
3040 if (*x == NULL_RTX)
3041 /* There are no sub-expressions. */
3042 continue;
3044 i = non_rtx_starting_operands[GET_CODE (*x)];
3045 if (i >= 0)
3047 result = for_each_rtx_1 (*x, i, f, data);
3048 if (result != 0)
3049 return result;
3051 break;
3053 case 'V':
3054 case 'E':
3055 if (XVEC (exp, n) == 0)
3056 continue;
3057 for (j = 0; j < XVECLEN (exp, n); ++j)
3059 /* Call F on X. */
3060 x = &XVECEXP (exp, n, j);
3061 result = (*f) (x, data);
3062 if (result == -1)
3063 /* Do not traverse sub-expressions. */
3064 continue;
3065 else if (result != 0)
3066 /* Stop the traversal. */
3067 return result;
3069 if (*x == NULL_RTX)
3070 /* There are no sub-expressions. */
3071 continue;
3073 i = non_rtx_starting_operands[GET_CODE (*x)];
3074 if (i >= 0)
3076 result = for_each_rtx_1 (*x, i, f, data);
3077 if (result != 0)
3078 return result;
3081 break;
3083 default:
3084 /* Nothing to do. */
3085 break;
3089 return 0;
3092 /* Traverse X via depth-first search, calling F for each
3093 sub-expression (including X itself). F is also passed the DATA.
3094 If F returns -1, do not traverse sub-expressions, but continue
3095 traversing the rest of the tree. If F ever returns any other
3096 nonzero value, stop the traversal, and return the value returned
3097 by F. Otherwise, return 0. This function does not traverse inside
3098 tree structure that contains RTX_EXPRs, or into sub-expressions
3099 whose format code is `0' since it is not known whether or not those
3100 codes are actually RTL.
3102 This routine is very general, and could (should?) be used to
3103 implement many of the other routines in this file. */
3106 for_each_rtx (rtx *x, rtx_function f, void *data)
3108 int result;
3109 int i;
3111 /* Call F on X. */
3112 result = (*f) (x, data);
3113 if (result == -1)
3114 /* Do not traverse sub-expressions. */
3115 return 0;
3116 else if (result != 0)
3117 /* Stop the traversal. */
3118 return result;
3120 if (*x == NULL_RTX)
3121 /* There are no sub-expressions. */
3122 return 0;
3124 i = non_rtx_starting_operands[GET_CODE (*x)];
3125 if (i < 0)
3126 return 0;
3128 return for_each_rtx_1 (*x, i, f, data);
3131 /* Like "for_each_rtx", but for calling on an rtx_insn **. */
3134 for_each_rtx_in_insn (rtx_insn **insn, rtx_function f, void *data)
3136 rtx insn_as_rtx = *insn;
3137 int result;
3139 result = for_each_rtx (&insn_as_rtx, f, data);
3141 if (insn_as_rtx != *insn)
3142 *insn = safe_as_a <rtx_insn *> (insn_as_rtx);
3144 return result;
3149 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3150 the equivalent add insn and pass the result to FN, using DATA as the
3151 final argument. */
3153 static int
3154 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3156 rtx x = XEXP (mem, 0);
3157 switch (GET_CODE (x))
3159 case PRE_INC:
3160 case POST_INC:
3162 int size = GET_MODE_SIZE (GET_MODE (mem));
3163 rtx r1 = XEXP (x, 0);
3164 rtx c = gen_int_mode (size, GET_MODE (r1));
3165 return fn (mem, x, r1, r1, c, data);
3168 case PRE_DEC:
3169 case POST_DEC:
3171 int size = GET_MODE_SIZE (GET_MODE (mem));
3172 rtx r1 = XEXP (x, 0);
3173 rtx c = gen_int_mode (-size, GET_MODE (r1));
3174 return fn (mem, x, r1, r1, c, data);
3177 case PRE_MODIFY:
3178 case POST_MODIFY:
3180 rtx r1 = XEXP (x, 0);
3181 rtx add = XEXP (x, 1);
3182 return fn (mem, x, r1, add, NULL, data);
3185 default:
3186 gcc_unreachable ();
3190 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3191 For each such autoinc operation found, call FN, passing it
3192 the innermost enclosing MEM, the operation itself, the RTX modified
3193 by the operation, two RTXs (the second may be NULL) that, once
3194 added, represent the value to be held by the modified RTX
3195 afterwards, and DATA. FN is to return 0 to continue the
3196 traversal or any other value to have it returned to the caller of
3197 for_each_inc_dec. */
3200 for_each_inc_dec (rtx x,
3201 for_each_inc_dec_fn fn,
3202 void *data)
3204 subrtx_var_iterator::array_type array;
3205 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3207 rtx mem = *iter;
3208 if (mem
3209 && MEM_P (mem)
3210 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3212 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3213 if (res != 0)
3214 return res;
3215 iter.skip_subrtxes ();
3218 return 0;
3222 /* Searches X for any reference to REGNO, returning the rtx of the
3223 reference found if any. Otherwise, returns NULL_RTX. */
3226 regno_use_in (unsigned int regno, rtx x)
3228 const char *fmt;
3229 int i, j;
3230 rtx tem;
3232 if (REG_P (x) && REGNO (x) == regno)
3233 return x;
3235 fmt = GET_RTX_FORMAT (GET_CODE (x));
3236 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3238 if (fmt[i] == 'e')
3240 if ((tem = regno_use_in (regno, XEXP (x, i))))
3241 return tem;
3243 else if (fmt[i] == 'E')
3244 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3245 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3246 return tem;
3249 return NULL_RTX;
3252 /* Return a value indicating whether OP, an operand of a commutative
3253 operation, is preferred as the first or second operand. The higher
3254 the value, the stronger the preference for being the first operand.
3255 We use negative values to indicate a preference for the first operand
3256 and positive values for the second operand. */
3259 commutative_operand_precedence (rtx op)
3261 enum rtx_code code = GET_CODE (op);
3263 /* Constants always come the second operand. Prefer "nice" constants. */
3264 if (code == CONST_INT)
3265 return -8;
3266 if (code == CONST_WIDE_INT)
3267 return -8;
3268 if (code == CONST_DOUBLE)
3269 return -7;
3270 if (code == CONST_FIXED)
3271 return -7;
3272 op = avoid_constant_pool_reference (op);
3273 code = GET_CODE (op);
3275 switch (GET_RTX_CLASS (code))
3277 case RTX_CONST_OBJ:
3278 if (code == CONST_INT)
3279 return -6;
3280 if (code == CONST_WIDE_INT)
3281 return -6;
3282 if (code == CONST_DOUBLE)
3283 return -5;
3284 if (code == CONST_FIXED)
3285 return -5;
3286 return -4;
3288 case RTX_EXTRA:
3289 /* SUBREGs of objects should come second. */
3290 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3291 return -3;
3292 return 0;
3294 case RTX_OBJ:
3295 /* Complex expressions should be the first, so decrease priority
3296 of objects. Prefer pointer objects over non pointer objects. */
3297 if ((REG_P (op) && REG_POINTER (op))
3298 || (MEM_P (op) && MEM_POINTER (op)))
3299 return -1;
3300 return -2;
3302 case RTX_COMM_ARITH:
3303 /* Prefer operands that are themselves commutative to be first.
3304 This helps to make things linear. In particular,
3305 (and (and (reg) (reg)) (not (reg))) is canonical. */
3306 return 4;
3308 case RTX_BIN_ARITH:
3309 /* If only one operand is a binary expression, it will be the first
3310 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3311 is canonical, although it will usually be further simplified. */
3312 return 2;
3314 case RTX_UNARY:
3315 /* Then prefer NEG and NOT. */
3316 if (code == NEG || code == NOT)
3317 return 1;
3319 default:
3320 return 0;
3324 /* Return 1 iff it is necessary to swap operands of commutative operation
3325 in order to canonicalize expression. */
3327 bool
3328 swap_commutative_operands_p (rtx x, rtx y)
3330 return (commutative_operand_precedence (x)
3331 < commutative_operand_precedence (y));
3334 /* Return 1 if X is an autoincrement side effect and the register is
3335 not the stack pointer. */
3337 auto_inc_p (const_rtx x)
3339 switch (GET_CODE (x))
3341 case PRE_INC:
3342 case POST_INC:
3343 case PRE_DEC:
3344 case POST_DEC:
3345 case PRE_MODIFY:
3346 case POST_MODIFY:
3347 /* There are no REG_INC notes for SP. */
3348 if (XEXP (x, 0) != stack_pointer_rtx)
3349 return 1;
3350 default:
3351 break;
3353 return 0;
3356 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3358 loc_mentioned_in_p (rtx *loc, const_rtx in)
3360 enum rtx_code code;
3361 const char *fmt;
3362 int i, j;
3364 if (!in)
3365 return 0;
3367 code = GET_CODE (in);
3368 fmt = GET_RTX_FORMAT (code);
3369 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3371 if (fmt[i] == 'e')
3373 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3374 return 1;
3376 else if (fmt[i] == 'E')
3377 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3378 if (loc == &XVECEXP (in, i, j)
3379 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3380 return 1;
3382 return 0;
3385 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3386 and SUBREG_BYTE, return the bit offset where the subreg begins
3387 (counting from the least significant bit of the operand). */
3389 unsigned int
3390 subreg_lsb_1 (enum machine_mode outer_mode,
3391 enum machine_mode inner_mode,
3392 unsigned int subreg_byte)
3394 unsigned int bitpos;
3395 unsigned int byte;
3396 unsigned int word;
3398 /* A paradoxical subreg begins at bit position 0. */
3399 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3400 return 0;
3402 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3403 /* If the subreg crosses a word boundary ensure that
3404 it also begins and ends on a word boundary. */
3405 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3406 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3407 && (subreg_byte % UNITS_PER_WORD
3408 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3410 if (WORDS_BIG_ENDIAN)
3411 word = (GET_MODE_SIZE (inner_mode)
3412 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3413 else
3414 word = subreg_byte / UNITS_PER_WORD;
3415 bitpos = word * BITS_PER_WORD;
3417 if (BYTES_BIG_ENDIAN)
3418 byte = (GET_MODE_SIZE (inner_mode)
3419 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3420 else
3421 byte = subreg_byte % UNITS_PER_WORD;
3422 bitpos += byte * BITS_PER_UNIT;
3424 return bitpos;
3427 /* Given a subreg X, return the bit offset where the subreg begins
3428 (counting from the least significant bit of the reg). */
3430 unsigned int
3431 subreg_lsb (const_rtx x)
3433 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3434 SUBREG_BYTE (x));
3437 /* Fill in information about a subreg of a hard register.
3438 xregno - A regno of an inner hard subreg_reg (or what will become one).
3439 xmode - The mode of xregno.
3440 offset - The byte offset.
3441 ymode - The mode of a top level SUBREG (or what may become one).
3442 info - Pointer to structure to fill in.
3444 Rather than considering one particular inner register (and thus one
3445 particular "outer" register) in isolation, this function really uses
3446 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3447 function does not check whether adding INFO->offset to XREGNO gives
3448 a valid hard register; even if INFO->offset + XREGNO is out of range,
3449 there might be another register of the same type that is in range.
3450 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3451 register, since that can depend on things like whether the final
3452 register number is even or odd. Callers that want to check whether
3453 this particular subreg can be replaced by a simple (reg ...) should
3454 use simplify_subreg_regno. */
3456 void
3457 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3458 unsigned int offset, enum machine_mode ymode,
3459 struct subreg_info *info)
3461 int nregs_xmode, nregs_ymode;
3462 int mode_multiple, nregs_multiple;
3463 int offset_adj, y_offset, y_offset_adj;
3464 int regsize_xmode, regsize_ymode;
3465 bool rknown;
3467 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3469 rknown = false;
3471 /* If there are holes in a non-scalar mode in registers, we expect
3472 that it is made up of its units concatenated together. */
3473 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3475 enum machine_mode xmode_unit;
3477 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3478 if (GET_MODE_INNER (xmode) == VOIDmode)
3479 xmode_unit = xmode;
3480 else
3481 xmode_unit = GET_MODE_INNER (xmode);
3482 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3483 gcc_assert (nregs_xmode
3484 == (GET_MODE_NUNITS (xmode)
3485 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3486 gcc_assert (hard_regno_nregs[xregno][xmode]
3487 == (hard_regno_nregs[xregno][xmode_unit]
3488 * GET_MODE_NUNITS (xmode)));
3490 /* You can only ask for a SUBREG of a value with holes in the middle
3491 if you don't cross the holes. (Such a SUBREG should be done by
3492 picking a different register class, or doing it in memory if
3493 necessary.) An example of a value with holes is XCmode on 32-bit
3494 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3495 3 for each part, but in memory it's two 128-bit parts.
3496 Padding is assumed to be at the end (not necessarily the 'high part')
3497 of each unit. */
3498 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3499 < GET_MODE_NUNITS (xmode))
3500 && (offset / GET_MODE_SIZE (xmode_unit)
3501 != ((offset + GET_MODE_SIZE (ymode) - 1)
3502 / GET_MODE_SIZE (xmode_unit))))
3504 info->representable_p = false;
3505 rknown = true;
3508 else
3509 nregs_xmode = hard_regno_nregs[xregno][xmode];
3511 nregs_ymode = hard_regno_nregs[xregno][ymode];
3513 /* Paradoxical subregs are otherwise valid. */
3514 if (!rknown
3515 && offset == 0
3516 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3518 info->representable_p = true;
3519 /* If this is a big endian paradoxical subreg, which uses more
3520 actual hard registers than the original register, we must
3521 return a negative offset so that we find the proper highpart
3522 of the register. */
3523 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3524 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3525 info->offset = nregs_xmode - nregs_ymode;
3526 else
3527 info->offset = 0;
3528 info->nregs = nregs_ymode;
3529 return;
3532 /* If registers store different numbers of bits in the different
3533 modes, we cannot generally form this subreg. */
3534 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3535 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3536 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3537 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3539 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3540 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3541 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3543 info->representable_p = false;
3544 info->nregs
3545 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3546 info->offset = offset / regsize_xmode;
3547 return;
3549 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3551 info->representable_p = false;
3552 info->nregs
3553 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3554 info->offset = offset / regsize_xmode;
3555 return;
3559 /* Lowpart subregs are otherwise valid. */
3560 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3562 info->representable_p = true;
3563 rknown = true;
3565 if (offset == 0 || nregs_xmode == nregs_ymode)
3567 info->offset = 0;
3568 info->nregs = nregs_ymode;
3569 return;
3573 /* This should always pass, otherwise we don't know how to verify
3574 the constraint. These conditions may be relaxed but
3575 subreg_regno_offset would need to be redesigned. */
3576 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3577 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3579 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3580 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3582 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3583 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3584 HOST_WIDE_INT off_low = offset & (ysize - 1);
3585 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3586 offset = (xsize - ysize - off_high) | off_low;
3588 /* The XMODE value can be seen as a vector of NREGS_XMODE
3589 values. The subreg must represent a lowpart of given field.
3590 Compute what field it is. */
3591 offset_adj = offset;
3592 offset_adj -= subreg_lowpart_offset (ymode,
3593 mode_for_size (GET_MODE_BITSIZE (xmode)
3594 / nregs_xmode,
3595 MODE_INT, 0));
3597 /* Size of ymode must not be greater than the size of xmode. */
3598 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3599 gcc_assert (mode_multiple != 0);
3601 y_offset = offset / GET_MODE_SIZE (ymode);
3602 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3603 nregs_multiple = nregs_xmode / nregs_ymode;
3605 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3606 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3608 if (!rknown)
3610 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3611 rknown = true;
3613 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3614 info->nregs = nregs_ymode;
3617 /* This function returns the regno offset of a subreg expression.
3618 xregno - A regno of an inner hard subreg_reg (or what will become one).
3619 xmode - The mode of xregno.
3620 offset - The byte offset.
3621 ymode - The mode of a top level SUBREG (or what may become one).
3622 RETURN - The regno offset which would be used. */
3623 unsigned int
3624 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3625 unsigned int offset, enum machine_mode ymode)
3627 struct subreg_info info;
3628 subreg_get_info (xregno, xmode, offset, ymode, &info);
3629 return info.offset;
3632 /* This function returns true when the offset is representable via
3633 subreg_offset in the given regno.
3634 xregno - A regno of an inner hard subreg_reg (or what will become one).
3635 xmode - The mode of xregno.
3636 offset - The byte offset.
3637 ymode - The mode of a top level SUBREG (or what may become one).
3638 RETURN - Whether the offset is representable. */
3639 bool
3640 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3641 unsigned int offset, enum machine_mode ymode)
3643 struct subreg_info info;
3644 subreg_get_info (xregno, xmode, offset, ymode, &info);
3645 return info.representable_p;
3648 /* Return the number of a YMODE register to which
3650 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3652 can be simplified. Return -1 if the subreg can't be simplified.
3654 XREGNO is a hard register number. */
3657 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3658 unsigned int offset, enum machine_mode ymode)
3660 struct subreg_info info;
3661 unsigned int yregno;
3663 #ifdef CANNOT_CHANGE_MODE_CLASS
3664 /* Give the backend a chance to disallow the mode change. */
3665 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3666 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3667 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3668 /* We can use mode change in LRA for some transformations. */
3669 && ! lra_in_progress)
3670 return -1;
3671 #endif
3673 /* We shouldn't simplify stack-related registers. */
3674 if ((!reload_completed || frame_pointer_needed)
3675 && xregno == FRAME_POINTER_REGNUM)
3676 return -1;
3678 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3679 && xregno == ARG_POINTER_REGNUM)
3680 return -1;
3682 if (xregno == STACK_POINTER_REGNUM
3683 /* We should convert hard stack register in LRA if it is
3684 possible. */
3685 && ! lra_in_progress)
3686 return -1;
3688 /* Try to get the register offset. */
3689 subreg_get_info (xregno, xmode, offset, ymode, &info);
3690 if (!info.representable_p)
3691 return -1;
3693 /* Make sure that the offsetted register value is in range. */
3694 yregno = xregno + info.offset;
3695 if (!HARD_REGISTER_NUM_P (yregno))
3696 return -1;
3698 /* See whether (reg:YMODE YREGNO) is valid.
3700 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3701 This is a kludge to work around how complex FP arguments are passed
3702 on IA-64 and should be fixed. See PR target/49226. */
3703 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3704 && HARD_REGNO_MODE_OK (xregno, xmode))
3705 return -1;
3707 return (int) yregno;
3710 /* Return the final regno that a subreg expression refers to. */
3711 unsigned int
3712 subreg_regno (const_rtx x)
3714 unsigned int ret;
3715 rtx subreg = SUBREG_REG (x);
3716 int regno = REGNO (subreg);
3718 ret = regno + subreg_regno_offset (regno,
3719 GET_MODE (subreg),
3720 SUBREG_BYTE (x),
3721 GET_MODE (x));
3722 return ret;
3726 /* Return the number of registers that a subreg expression refers
3727 to. */
3728 unsigned int
3729 subreg_nregs (const_rtx x)
3731 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3734 /* Return the number of registers that a subreg REG with REGNO
3735 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3736 changed so that the regno can be passed in. */
3738 unsigned int
3739 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3741 struct subreg_info info;
3742 rtx subreg = SUBREG_REG (x);
3744 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3745 &info);
3746 return info.nregs;
3750 struct parms_set_data
3752 int nregs;
3753 HARD_REG_SET regs;
3756 /* Helper function for noticing stores to parameter registers. */
3757 static void
3758 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3760 struct parms_set_data *const d = (struct parms_set_data *) data;
3761 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3762 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3764 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3765 d->nregs--;
3769 /* Look backward for first parameter to be loaded.
3770 Note that loads of all parameters will not necessarily be
3771 found if CSE has eliminated some of them (e.g., an argument
3772 to the outer function is passed down as a parameter).
3773 Do not skip BOUNDARY. */
3774 rtx_insn *
3775 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3777 struct parms_set_data parm;
3778 rtx p;
3779 rtx_insn *before, *first_set;
3781 /* Since different machines initialize their parameter registers
3782 in different orders, assume nothing. Collect the set of all
3783 parameter registers. */
3784 CLEAR_HARD_REG_SET (parm.regs);
3785 parm.nregs = 0;
3786 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3787 if (GET_CODE (XEXP (p, 0)) == USE
3788 && REG_P (XEXP (XEXP (p, 0), 0)))
3790 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3792 /* We only care about registers which can hold function
3793 arguments. */
3794 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3795 continue;
3797 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3798 parm.nregs++;
3800 before = call_insn;
3801 first_set = call_insn;
3803 /* Search backward for the first set of a register in this set. */
3804 while (parm.nregs && before != boundary)
3806 before = PREV_INSN (before);
3808 /* It is possible that some loads got CSEed from one call to
3809 another. Stop in that case. */
3810 if (CALL_P (before))
3811 break;
3813 /* Our caller needs either ensure that we will find all sets
3814 (in case code has not been optimized yet), or take care
3815 for possible labels in a way by setting boundary to preceding
3816 CODE_LABEL. */
3817 if (LABEL_P (before))
3819 gcc_assert (before == boundary);
3820 break;
3823 if (INSN_P (before))
3825 int nregs_old = parm.nregs;
3826 note_stores (PATTERN (before), parms_set, &parm);
3827 /* If we found something that did not set a parameter reg,
3828 we're done. Do not keep going, as that might result
3829 in hoisting an insn before the setting of a pseudo
3830 that is used by the hoisted insn. */
3831 if (nregs_old != parm.nregs)
3832 first_set = before;
3833 else
3834 break;
3837 return first_set;
3840 /* Return true if we should avoid inserting code between INSN and preceding
3841 call instruction. */
3843 bool
3844 keep_with_call_p (const rtx_insn *insn)
3846 rtx set;
3848 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3850 if (REG_P (SET_DEST (set))
3851 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3852 && fixed_regs[REGNO (SET_DEST (set))]
3853 && general_operand (SET_SRC (set), VOIDmode))
3854 return true;
3855 if (REG_P (SET_SRC (set))
3856 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3857 && REG_P (SET_DEST (set))
3858 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3859 return true;
3860 /* There may be a stack pop just after the call and before the store
3861 of the return register. Search for the actual store when deciding
3862 if we can break or not. */
3863 if (SET_DEST (set) == stack_pointer_rtx)
3865 /* This CONST_CAST is okay because next_nonnote_insn just
3866 returns its argument and we assign it to a const_rtx
3867 variable. */
3868 const rtx_insn *i2
3869 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3870 if (i2 && keep_with_call_p (i2))
3871 return true;
3874 return false;
3877 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3878 to non-complex jumps. That is, direct unconditional, conditional,
3879 and tablejumps, but not computed jumps or returns. It also does
3880 not apply to the fallthru case of a conditional jump. */
3882 bool
3883 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3885 rtx tmp = JUMP_LABEL (jump_insn);
3886 rtx_jump_table_data *table;
3888 if (label == tmp)
3889 return true;
3891 if (tablejump_p (jump_insn, NULL, &table))
3893 rtvec vec = table->get_labels ();
3894 int i, veclen = GET_NUM_ELEM (vec);
3896 for (i = 0; i < veclen; ++i)
3897 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3898 return true;
3901 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3902 return true;
3904 return false;
3908 /* Return an estimate of the cost of computing rtx X.
3909 One use is in cse, to decide which expression to keep in the hash table.
3910 Another is in rtl generation, to pick the cheapest way to multiply.
3911 Other uses like the latter are expected in the future.
3913 X appears as operand OPNO in an expression with code OUTER_CODE.
3914 SPEED specifies whether costs optimized for speed or size should
3915 be returned. */
3918 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3920 int i, j;
3921 enum rtx_code code;
3922 const char *fmt;
3923 int total;
3924 int factor;
3926 if (x == 0)
3927 return 0;
3929 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3930 many insns, taking N times as long. */
3931 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3932 if (factor == 0)
3933 factor = 1;
3935 /* Compute the default costs of certain things.
3936 Note that targetm.rtx_costs can override the defaults. */
3938 code = GET_CODE (x);
3939 switch (code)
3941 case MULT:
3942 /* Multiplication has time-complexity O(N*N), where N is the
3943 number of units (translated from digits) when using
3944 schoolbook long multiplication. */
3945 total = factor * factor * COSTS_N_INSNS (5);
3946 break;
3947 case DIV:
3948 case UDIV:
3949 case MOD:
3950 case UMOD:
3951 /* Similarly, complexity for schoolbook long division. */
3952 total = factor * factor * COSTS_N_INSNS (7);
3953 break;
3954 case USE:
3955 /* Used in combine.c as a marker. */
3956 total = 0;
3957 break;
3958 case SET:
3959 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3960 the mode for the factor. */
3961 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3962 if (factor == 0)
3963 factor = 1;
3964 /* Pass through. */
3965 default:
3966 total = factor * COSTS_N_INSNS (1);
3969 switch (code)
3971 case REG:
3972 return 0;
3974 case SUBREG:
3975 total = 0;
3976 /* If we can't tie these modes, make this expensive. The larger
3977 the mode, the more expensive it is. */
3978 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3979 return COSTS_N_INSNS (2 + factor);
3980 break;
3982 default:
3983 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3984 return total;
3985 break;
3988 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3989 which is already in total. */
3991 fmt = GET_RTX_FORMAT (code);
3992 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3993 if (fmt[i] == 'e')
3994 total += rtx_cost (XEXP (x, i), code, i, speed);
3995 else if (fmt[i] == 'E')
3996 for (j = 0; j < XVECLEN (x, i); j++)
3997 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3999 return total;
4002 /* Fill in the structure C with information about both speed and size rtx
4003 costs for X, which is operand OPNO in an expression with code OUTER. */
4005 void
4006 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
4007 struct full_rtx_costs *c)
4009 c->speed = rtx_cost (x, outer, opno, true);
4010 c->size = rtx_cost (x, outer, opno, false);
4014 /* Return cost of address expression X.
4015 Expect that X is properly formed address reference.
4017 SPEED parameter specify whether costs optimized for speed or size should
4018 be returned. */
4021 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
4023 /* We may be asked for cost of various unusual addresses, such as operands
4024 of push instruction. It is not worthwhile to complicate writing
4025 of the target hook by such cases. */
4027 if (!memory_address_addr_space_p (mode, x, as))
4028 return 1000;
4030 return targetm.address_cost (x, mode, as, speed);
4033 /* If the target doesn't override, compute the cost as with arithmetic. */
4036 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
4038 return rtx_cost (x, MEM, 0, speed);
4042 unsigned HOST_WIDE_INT
4043 nonzero_bits (const_rtx x, enum machine_mode mode)
4045 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4048 unsigned int
4049 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
4051 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4054 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4055 It avoids exponential behavior in nonzero_bits1 when X has
4056 identical subexpressions on the first or the second level. */
4058 static unsigned HOST_WIDE_INT
4059 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
4060 enum machine_mode known_mode,
4061 unsigned HOST_WIDE_INT known_ret)
4063 if (x == known_x && mode == known_mode)
4064 return known_ret;
4066 /* Try to find identical subexpressions. If found call
4067 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4068 precomputed value for the subexpression as KNOWN_RET. */
4070 if (ARITHMETIC_P (x))
4072 rtx x0 = XEXP (x, 0);
4073 rtx x1 = XEXP (x, 1);
4075 /* Check the first level. */
4076 if (x0 == x1)
4077 return nonzero_bits1 (x, mode, x0, mode,
4078 cached_nonzero_bits (x0, mode, known_x,
4079 known_mode, known_ret));
4081 /* Check the second level. */
4082 if (ARITHMETIC_P (x0)
4083 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4084 return nonzero_bits1 (x, mode, x1, mode,
4085 cached_nonzero_bits (x1, mode, known_x,
4086 known_mode, known_ret));
4088 if (ARITHMETIC_P (x1)
4089 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4090 return nonzero_bits1 (x, mode, x0, mode,
4091 cached_nonzero_bits (x0, mode, known_x,
4092 known_mode, known_ret));
4095 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4098 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4099 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4100 is less useful. We can't allow both, because that results in exponential
4101 run time recursion. There is a nullstone testcase that triggered
4102 this. This macro avoids accidental uses of num_sign_bit_copies. */
4103 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4105 /* Given an expression, X, compute which bits in X can be nonzero.
4106 We don't care about bits outside of those defined in MODE.
4108 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4109 an arithmetic operation, we can do better. */
4111 static unsigned HOST_WIDE_INT
4112 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4113 enum machine_mode known_mode,
4114 unsigned HOST_WIDE_INT known_ret)
4116 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4117 unsigned HOST_WIDE_INT inner_nz;
4118 enum rtx_code code;
4119 enum machine_mode inner_mode;
4120 unsigned int mode_width = GET_MODE_PRECISION (mode);
4122 /* For floating-point and vector values, assume all bits are needed. */
4123 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4124 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4125 return nonzero;
4127 /* If X is wider than MODE, use its mode instead. */
4128 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4130 mode = GET_MODE (x);
4131 nonzero = GET_MODE_MASK (mode);
4132 mode_width = GET_MODE_PRECISION (mode);
4135 if (mode_width > HOST_BITS_PER_WIDE_INT)
4136 /* Our only callers in this case look for single bit values. So
4137 just return the mode mask. Those tests will then be false. */
4138 return nonzero;
4140 #ifndef WORD_REGISTER_OPERATIONS
4141 /* If MODE is wider than X, but both are a single word for both the host
4142 and target machines, we can compute this from which bits of the
4143 object might be nonzero in its own mode, taking into account the fact
4144 that on many CISC machines, accessing an object in a wider mode
4145 causes the high-order bits to become undefined. So they are
4146 not known to be zero. */
4148 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4149 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4150 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4151 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4153 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4154 known_x, known_mode, known_ret);
4155 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4156 return nonzero;
4158 #endif
4160 code = GET_CODE (x);
4161 switch (code)
4163 case REG:
4164 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4165 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4166 all the bits above ptr_mode are known to be zero. */
4167 /* As we do not know which address space the pointer is referring to,
4168 we can do this only if the target does not support different pointer
4169 or address modes depending on the address space. */
4170 if (target_default_pointer_address_modes_p ()
4171 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4172 && REG_POINTER (x))
4173 nonzero &= GET_MODE_MASK (ptr_mode);
4174 #endif
4176 /* Include declared information about alignment of pointers. */
4177 /* ??? We don't properly preserve REG_POINTER changes across
4178 pointer-to-integer casts, so we can't trust it except for
4179 things that we know must be pointers. See execute/960116-1.c. */
4180 if ((x == stack_pointer_rtx
4181 || x == frame_pointer_rtx
4182 || x == arg_pointer_rtx)
4183 && REGNO_POINTER_ALIGN (REGNO (x)))
4185 unsigned HOST_WIDE_INT alignment
4186 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4188 #ifdef PUSH_ROUNDING
4189 /* If PUSH_ROUNDING is defined, it is possible for the
4190 stack to be momentarily aligned only to that amount,
4191 so we pick the least alignment. */
4192 if (x == stack_pointer_rtx && PUSH_ARGS)
4193 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4194 alignment);
4195 #endif
4197 nonzero &= ~(alignment - 1);
4201 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4202 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4203 known_mode, known_ret,
4204 &nonzero_for_hook);
4206 if (new_rtx)
4207 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4208 known_mode, known_ret);
4210 return nonzero_for_hook;
4213 case CONST_INT:
4214 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4215 /* If X is negative in MODE, sign-extend the value. */
4216 if (INTVAL (x) > 0
4217 && mode_width < BITS_PER_WORD
4218 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4219 != 0)
4220 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4221 #endif
4223 return UINTVAL (x);
4225 case MEM:
4226 #ifdef LOAD_EXTEND_OP
4227 /* In many, if not most, RISC machines, reading a byte from memory
4228 zeros the rest of the register. Noticing that fact saves a lot
4229 of extra zero-extends. */
4230 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4231 nonzero &= GET_MODE_MASK (GET_MODE (x));
4232 #endif
4233 break;
4235 case EQ: case NE:
4236 case UNEQ: case LTGT:
4237 case GT: case GTU: case UNGT:
4238 case LT: case LTU: case UNLT:
4239 case GE: case GEU: case UNGE:
4240 case LE: case LEU: case UNLE:
4241 case UNORDERED: case ORDERED:
4242 /* If this produces an integer result, we know which bits are set.
4243 Code here used to clear bits outside the mode of X, but that is
4244 now done above. */
4245 /* Mind that MODE is the mode the caller wants to look at this
4246 operation in, and not the actual operation mode. We can wind
4247 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4248 that describes the results of a vector compare. */
4249 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4250 && mode_width <= HOST_BITS_PER_WIDE_INT)
4251 nonzero = STORE_FLAG_VALUE;
4252 break;
4254 case NEG:
4255 #if 0
4256 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4257 and num_sign_bit_copies. */
4258 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4259 == GET_MODE_PRECISION (GET_MODE (x)))
4260 nonzero = 1;
4261 #endif
4263 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4264 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4265 break;
4267 case ABS:
4268 #if 0
4269 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4270 and num_sign_bit_copies. */
4271 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4272 == GET_MODE_PRECISION (GET_MODE (x)))
4273 nonzero = 1;
4274 #endif
4275 break;
4277 case TRUNCATE:
4278 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4279 known_x, known_mode, known_ret)
4280 & GET_MODE_MASK (mode));
4281 break;
4283 case ZERO_EXTEND:
4284 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4285 known_x, known_mode, known_ret);
4286 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4287 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4288 break;
4290 case SIGN_EXTEND:
4291 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4292 Otherwise, show all the bits in the outer mode but not the inner
4293 may be nonzero. */
4294 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4295 known_x, known_mode, known_ret);
4296 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4298 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4299 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4300 inner_nz |= (GET_MODE_MASK (mode)
4301 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4304 nonzero &= inner_nz;
4305 break;
4307 case AND:
4308 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4309 known_x, known_mode, known_ret)
4310 & cached_nonzero_bits (XEXP (x, 1), mode,
4311 known_x, known_mode, known_ret);
4312 break;
4314 case XOR: case IOR:
4315 case UMIN: case UMAX: case SMIN: case SMAX:
4317 unsigned HOST_WIDE_INT nonzero0
4318 = cached_nonzero_bits (XEXP (x, 0), mode,
4319 known_x, known_mode, known_ret);
4321 /* Don't call nonzero_bits for the second time if it cannot change
4322 anything. */
4323 if ((nonzero & nonzero0) != nonzero)
4324 nonzero &= nonzero0
4325 | cached_nonzero_bits (XEXP (x, 1), mode,
4326 known_x, known_mode, known_ret);
4328 break;
4330 case PLUS: case MINUS:
4331 case MULT:
4332 case DIV: case UDIV:
4333 case MOD: case UMOD:
4334 /* We can apply the rules of arithmetic to compute the number of
4335 high- and low-order zero bits of these operations. We start by
4336 computing the width (position of the highest-order nonzero bit)
4337 and the number of low-order zero bits for each value. */
4339 unsigned HOST_WIDE_INT nz0
4340 = cached_nonzero_bits (XEXP (x, 0), mode,
4341 known_x, known_mode, known_ret);
4342 unsigned HOST_WIDE_INT nz1
4343 = cached_nonzero_bits (XEXP (x, 1), mode,
4344 known_x, known_mode, known_ret);
4345 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4346 int width0 = floor_log2 (nz0) + 1;
4347 int width1 = floor_log2 (nz1) + 1;
4348 int low0 = floor_log2 (nz0 & -nz0);
4349 int low1 = floor_log2 (nz1 & -nz1);
4350 unsigned HOST_WIDE_INT op0_maybe_minusp
4351 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4352 unsigned HOST_WIDE_INT op1_maybe_minusp
4353 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4354 unsigned int result_width = mode_width;
4355 int result_low = 0;
4357 switch (code)
4359 case PLUS:
4360 result_width = MAX (width0, width1) + 1;
4361 result_low = MIN (low0, low1);
4362 break;
4363 case MINUS:
4364 result_low = MIN (low0, low1);
4365 break;
4366 case MULT:
4367 result_width = width0 + width1;
4368 result_low = low0 + low1;
4369 break;
4370 case DIV:
4371 if (width1 == 0)
4372 break;
4373 if (!op0_maybe_minusp && !op1_maybe_minusp)
4374 result_width = width0;
4375 break;
4376 case UDIV:
4377 if (width1 == 0)
4378 break;
4379 result_width = width0;
4380 break;
4381 case MOD:
4382 if (width1 == 0)
4383 break;
4384 if (!op0_maybe_minusp && !op1_maybe_minusp)
4385 result_width = MIN (width0, width1);
4386 result_low = MIN (low0, low1);
4387 break;
4388 case UMOD:
4389 if (width1 == 0)
4390 break;
4391 result_width = MIN (width0, width1);
4392 result_low = MIN (low0, low1);
4393 break;
4394 default:
4395 gcc_unreachable ();
4398 if (result_width < mode_width)
4399 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4401 if (result_low > 0)
4402 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4404 break;
4406 case ZERO_EXTRACT:
4407 if (CONST_INT_P (XEXP (x, 1))
4408 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4409 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4410 break;
4412 case SUBREG:
4413 /* If this is a SUBREG formed for a promoted variable that has
4414 been zero-extended, we know that at least the high-order bits
4415 are zero, though others might be too. */
4417 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4418 nonzero = GET_MODE_MASK (GET_MODE (x))
4419 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4420 known_x, known_mode, known_ret);
4422 inner_mode = GET_MODE (SUBREG_REG (x));
4423 /* If the inner mode is a single word for both the host and target
4424 machines, we can compute this from which bits of the inner
4425 object might be nonzero. */
4426 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4427 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4429 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4430 known_x, known_mode, known_ret);
4432 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4433 /* If this is a typical RISC machine, we only have to worry
4434 about the way loads are extended. */
4435 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4436 ? val_signbit_known_set_p (inner_mode, nonzero)
4437 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4438 || !MEM_P (SUBREG_REG (x)))
4439 #endif
4441 /* On many CISC machines, accessing an object in a wider mode
4442 causes the high-order bits to become undefined. So they are
4443 not known to be zero. */
4444 if (GET_MODE_PRECISION (GET_MODE (x))
4445 > GET_MODE_PRECISION (inner_mode))
4446 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4447 & ~GET_MODE_MASK (inner_mode));
4450 break;
4452 case ASHIFTRT:
4453 case LSHIFTRT:
4454 case ASHIFT:
4455 case ROTATE:
4456 /* The nonzero bits are in two classes: any bits within MODE
4457 that aren't in GET_MODE (x) are always significant. The rest of the
4458 nonzero bits are those that are significant in the operand of
4459 the shift when shifted the appropriate number of bits. This
4460 shows that high-order bits are cleared by the right shift and
4461 low-order bits by left shifts. */
4462 if (CONST_INT_P (XEXP (x, 1))
4463 && INTVAL (XEXP (x, 1)) >= 0
4464 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4465 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4467 enum machine_mode inner_mode = GET_MODE (x);
4468 unsigned int width = GET_MODE_PRECISION (inner_mode);
4469 int count = INTVAL (XEXP (x, 1));
4470 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4471 unsigned HOST_WIDE_INT op_nonzero
4472 = cached_nonzero_bits (XEXP (x, 0), mode,
4473 known_x, known_mode, known_ret);
4474 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4475 unsigned HOST_WIDE_INT outer = 0;
4477 if (mode_width > width)
4478 outer = (op_nonzero & nonzero & ~mode_mask);
4480 if (code == LSHIFTRT)
4481 inner >>= count;
4482 else if (code == ASHIFTRT)
4484 inner >>= count;
4486 /* If the sign bit may have been nonzero before the shift, we
4487 need to mark all the places it could have been copied to
4488 by the shift as possibly nonzero. */
4489 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4490 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4491 << (width - count);
4493 else if (code == ASHIFT)
4494 inner <<= count;
4495 else
4496 inner = ((inner << (count % width)
4497 | (inner >> (width - (count % width)))) & mode_mask);
4499 nonzero &= (outer | inner);
4501 break;
4503 case FFS:
4504 case POPCOUNT:
4505 /* This is at most the number of bits in the mode. */
4506 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4507 break;
4509 case CLZ:
4510 /* If CLZ has a known value at zero, then the nonzero bits are
4511 that value, plus the number of bits in the mode minus one. */
4512 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4513 nonzero
4514 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4515 else
4516 nonzero = -1;
4517 break;
4519 case CTZ:
4520 /* If CTZ has a known value at zero, then the nonzero bits are
4521 that value, plus the number of bits in the mode minus one. */
4522 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4523 nonzero
4524 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4525 else
4526 nonzero = -1;
4527 break;
4529 case CLRSB:
4530 /* This is at most the number of bits in the mode minus 1. */
4531 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4532 break;
4534 case PARITY:
4535 nonzero = 1;
4536 break;
4538 case IF_THEN_ELSE:
4540 unsigned HOST_WIDE_INT nonzero_true
4541 = cached_nonzero_bits (XEXP (x, 1), mode,
4542 known_x, known_mode, known_ret);
4544 /* Don't call nonzero_bits for the second time if it cannot change
4545 anything. */
4546 if ((nonzero & nonzero_true) != nonzero)
4547 nonzero &= nonzero_true
4548 | cached_nonzero_bits (XEXP (x, 2), mode,
4549 known_x, known_mode, known_ret);
4551 break;
4553 default:
4554 break;
4557 return nonzero;
4560 /* See the macro definition above. */
4561 #undef cached_num_sign_bit_copies
4564 /* The function cached_num_sign_bit_copies is a wrapper around
4565 num_sign_bit_copies1. It avoids exponential behavior in
4566 num_sign_bit_copies1 when X has identical subexpressions on the
4567 first or the second level. */
4569 static unsigned int
4570 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4571 enum machine_mode known_mode,
4572 unsigned int known_ret)
4574 if (x == known_x && mode == known_mode)
4575 return known_ret;
4577 /* Try to find identical subexpressions. If found call
4578 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4579 the precomputed value for the subexpression as KNOWN_RET. */
4581 if (ARITHMETIC_P (x))
4583 rtx x0 = XEXP (x, 0);
4584 rtx x1 = XEXP (x, 1);
4586 /* Check the first level. */
4587 if (x0 == x1)
4588 return
4589 num_sign_bit_copies1 (x, mode, x0, mode,
4590 cached_num_sign_bit_copies (x0, mode, known_x,
4591 known_mode,
4592 known_ret));
4594 /* Check the second level. */
4595 if (ARITHMETIC_P (x0)
4596 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4597 return
4598 num_sign_bit_copies1 (x, mode, x1, mode,
4599 cached_num_sign_bit_copies (x1, mode, known_x,
4600 known_mode,
4601 known_ret));
4603 if (ARITHMETIC_P (x1)
4604 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4605 return
4606 num_sign_bit_copies1 (x, mode, x0, mode,
4607 cached_num_sign_bit_copies (x0, mode, known_x,
4608 known_mode,
4609 known_ret));
4612 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4615 /* Return the number of bits at the high-order end of X that are known to
4616 be equal to the sign bit. X will be used in mode MODE; if MODE is
4617 VOIDmode, X will be used in its own mode. The returned value will always
4618 be between 1 and the number of bits in MODE. */
4620 static unsigned int
4621 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4622 enum machine_mode known_mode,
4623 unsigned int known_ret)
4625 enum rtx_code code = GET_CODE (x);
4626 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4627 int num0, num1, result;
4628 unsigned HOST_WIDE_INT nonzero;
4630 /* If we weren't given a mode, use the mode of X. If the mode is still
4631 VOIDmode, we don't know anything. Likewise if one of the modes is
4632 floating-point. */
4634 if (mode == VOIDmode)
4635 mode = GET_MODE (x);
4637 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4638 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4639 return 1;
4641 /* For a smaller object, just ignore the high bits. */
4642 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4644 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4645 known_x, known_mode, known_ret);
4646 return MAX (1,
4647 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4650 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4652 #ifndef WORD_REGISTER_OPERATIONS
4653 /* If this machine does not do all register operations on the entire
4654 register and MODE is wider than the mode of X, we can say nothing
4655 at all about the high-order bits. */
4656 return 1;
4657 #else
4658 /* Likewise on machines that do, if the mode of the object is smaller
4659 than a word and loads of that size don't sign extend, we can say
4660 nothing about the high order bits. */
4661 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4662 #ifdef LOAD_EXTEND_OP
4663 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4664 #endif
4666 return 1;
4667 #endif
4670 switch (code)
4672 case REG:
4674 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4675 /* If pointers extend signed and this is a pointer in Pmode, say that
4676 all the bits above ptr_mode are known to be sign bit copies. */
4677 /* As we do not know which address space the pointer is referring to,
4678 we can do this only if the target does not support different pointer
4679 or address modes depending on the address space. */
4680 if (target_default_pointer_address_modes_p ()
4681 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4682 && mode == Pmode && REG_POINTER (x))
4683 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4684 #endif
4687 unsigned int copies_for_hook = 1, copies = 1;
4688 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4689 known_mode, known_ret,
4690 &copies_for_hook);
4692 if (new_rtx)
4693 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4694 known_mode, known_ret);
4696 if (copies > 1 || copies_for_hook > 1)
4697 return MAX (copies, copies_for_hook);
4699 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4701 break;
4703 case MEM:
4704 #ifdef LOAD_EXTEND_OP
4705 /* Some RISC machines sign-extend all loads of smaller than a word. */
4706 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4707 return MAX (1, ((int) bitwidth
4708 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4709 #endif
4710 break;
4712 case CONST_INT:
4713 /* If the constant is negative, take its 1's complement and remask.
4714 Then see how many zero bits we have. */
4715 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4716 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4717 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4718 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4720 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4722 case SUBREG:
4723 /* If this is a SUBREG for a promoted object that is sign-extended
4724 and we are looking at it in a wider mode, we know that at least the
4725 high-order bits are known to be sign bit copies. */
4727 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4729 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4730 known_x, known_mode, known_ret);
4731 return MAX ((int) bitwidth
4732 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4733 num0);
4736 /* For a smaller object, just ignore the high bits. */
4737 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4739 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4740 known_x, known_mode, known_ret);
4741 return MAX (1, (num0
4742 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4743 - bitwidth)));
4746 #ifdef WORD_REGISTER_OPERATIONS
4747 #ifdef LOAD_EXTEND_OP
4748 /* For paradoxical SUBREGs on machines where all register operations
4749 affect the entire register, just look inside. Note that we are
4750 passing MODE to the recursive call, so the number of sign bit copies
4751 will remain relative to that mode, not the inner mode. */
4753 /* This works only if loads sign extend. Otherwise, if we get a
4754 reload for the inner part, it may be loaded from the stack, and
4755 then we lose all sign bit copies that existed before the store
4756 to the stack. */
4758 if (paradoxical_subreg_p (x)
4759 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4760 && MEM_P (SUBREG_REG (x)))
4761 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4762 known_x, known_mode, known_ret);
4763 #endif
4764 #endif
4765 break;
4767 case SIGN_EXTRACT:
4768 if (CONST_INT_P (XEXP (x, 1)))
4769 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4770 break;
4772 case SIGN_EXTEND:
4773 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4774 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4775 known_x, known_mode, known_ret));
4777 case TRUNCATE:
4778 /* For a smaller object, just ignore the high bits. */
4779 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4780 known_x, known_mode, known_ret);
4781 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4782 - bitwidth)));
4784 case NOT:
4785 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4786 known_x, known_mode, known_ret);
4788 case ROTATE: case ROTATERT:
4789 /* If we are rotating left by a number of bits less than the number
4790 of sign bit copies, we can just subtract that amount from the
4791 number. */
4792 if (CONST_INT_P (XEXP (x, 1))
4793 && INTVAL (XEXP (x, 1)) >= 0
4794 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4796 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4797 known_x, known_mode, known_ret);
4798 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4799 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4801 break;
4803 case NEG:
4804 /* In general, this subtracts one sign bit copy. But if the value
4805 is known to be positive, the number of sign bit copies is the
4806 same as that of the input. Finally, if the input has just one bit
4807 that might be nonzero, all the bits are copies of the sign bit. */
4808 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4809 known_x, known_mode, known_ret);
4810 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4811 return num0 > 1 ? num0 - 1 : 1;
4813 nonzero = nonzero_bits (XEXP (x, 0), mode);
4814 if (nonzero == 1)
4815 return bitwidth;
4817 if (num0 > 1
4818 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4819 num0--;
4821 return num0;
4823 case IOR: case AND: case XOR:
4824 case SMIN: case SMAX: case UMIN: case UMAX:
4825 /* Logical operations will preserve the number of sign-bit copies.
4826 MIN and MAX operations always return one of the operands. */
4827 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4828 known_x, known_mode, known_ret);
4829 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4830 known_x, known_mode, known_ret);
4832 /* If num1 is clearing some of the top bits then regardless of
4833 the other term, we are guaranteed to have at least that many
4834 high-order zero bits. */
4835 if (code == AND
4836 && num1 > 1
4837 && bitwidth <= HOST_BITS_PER_WIDE_INT
4838 && CONST_INT_P (XEXP (x, 1))
4839 && (UINTVAL (XEXP (x, 1))
4840 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4841 return num1;
4843 /* Similarly for IOR when setting high-order bits. */
4844 if (code == IOR
4845 && num1 > 1
4846 && bitwidth <= HOST_BITS_PER_WIDE_INT
4847 && CONST_INT_P (XEXP (x, 1))
4848 && (UINTVAL (XEXP (x, 1))
4849 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4850 return num1;
4852 return MIN (num0, num1);
4854 case PLUS: case MINUS:
4855 /* For addition and subtraction, we can have a 1-bit carry. However,
4856 if we are subtracting 1 from a positive number, there will not
4857 be such a carry. Furthermore, if the positive number is known to
4858 be 0 or 1, we know the result is either -1 or 0. */
4860 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4861 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4863 nonzero = nonzero_bits (XEXP (x, 0), mode);
4864 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4865 return (nonzero == 1 || nonzero == 0 ? bitwidth
4866 : bitwidth - floor_log2 (nonzero) - 1);
4869 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4870 known_x, known_mode, known_ret);
4871 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4872 known_x, known_mode, known_ret);
4873 result = MAX (1, MIN (num0, num1) - 1);
4875 return result;
4877 case MULT:
4878 /* The number of bits of the product is the sum of the number of
4879 bits of both terms. However, unless one of the terms if known
4880 to be positive, we must allow for an additional bit since negating
4881 a negative number can remove one sign bit copy. */
4883 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4884 known_x, known_mode, known_ret);
4885 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4886 known_x, known_mode, known_ret);
4888 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4889 if (result > 0
4890 && (bitwidth > HOST_BITS_PER_WIDE_INT
4891 || (((nonzero_bits (XEXP (x, 0), mode)
4892 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4893 && ((nonzero_bits (XEXP (x, 1), mode)
4894 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4895 != 0))))
4896 result--;
4898 return MAX (1, result);
4900 case UDIV:
4901 /* The result must be <= the first operand. If the first operand
4902 has the high bit set, we know nothing about the number of sign
4903 bit copies. */
4904 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4905 return 1;
4906 else if ((nonzero_bits (XEXP (x, 0), mode)
4907 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4908 return 1;
4909 else
4910 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4911 known_x, known_mode, known_ret);
4913 case UMOD:
4914 /* The result must be <= the second operand. If the second operand
4915 has (or just might have) the high bit set, we know nothing about
4916 the number of sign bit copies. */
4917 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4918 return 1;
4919 else if ((nonzero_bits (XEXP (x, 1), mode)
4920 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4921 return 1;
4922 else
4923 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4924 known_x, known_mode, known_ret);
4926 case DIV:
4927 /* Similar to unsigned division, except that we have to worry about
4928 the case where the divisor is negative, in which case we have
4929 to add 1. */
4930 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4931 known_x, known_mode, known_ret);
4932 if (result > 1
4933 && (bitwidth > HOST_BITS_PER_WIDE_INT
4934 || (nonzero_bits (XEXP (x, 1), mode)
4935 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4936 result--;
4938 return result;
4940 case MOD:
4941 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4942 known_x, known_mode, known_ret);
4943 if (result > 1
4944 && (bitwidth > HOST_BITS_PER_WIDE_INT
4945 || (nonzero_bits (XEXP (x, 1), mode)
4946 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4947 result--;
4949 return result;
4951 case ASHIFTRT:
4952 /* Shifts by a constant add to the number of bits equal to the
4953 sign bit. */
4954 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4955 known_x, known_mode, known_ret);
4956 if (CONST_INT_P (XEXP (x, 1))
4957 && INTVAL (XEXP (x, 1)) > 0
4958 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4959 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4961 return num0;
4963 case ASHIFT:
4964 /* Left shifts destroy copies. */
4965 if (!CONST_INT_P (XEXP (x, 1))
4966 || INTVAL (XEXP (x, 1)) < 0
4967 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4968 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4969 return 1;
4971 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4972 known_x, known_mode, known_ret);
4973 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4975 case IF_THEN_ELSE:
4976 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4977 known_x, known_mode, known_ret);
4978 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4979 known_x, known_mode, known_ret);
4980 return MIN (num0, num1);
4982 case EQ: case NE: case GE: case GT: case LE: case LT:
4983 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4984 case GEU: case GTU: case LEU: case LTU:
4985 case UNORDERED: case ORDERED:
4986 /* If the constant is negative, take its 1's complement and remask.
4987 Then see how many zero bits we have. */
4988 nonzero = STORE_FLAG_VALUE;
4989 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4990 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4991 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4993 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4995 default:
4996 break;
4999 /* If we haven't been able to figure it out by one of the above rules,
5000 see if some of the high-order bits are known to be zero. If so,
5001 count those bits and return one less than that amount. If we can't
5002 safely compute the mask for this mode, always return BITWIDTH. */
5004 bitwidth = GET_MODE_PRECISION (mode);
5005 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5006 return 1;
5008 nonzero = nonzero_bits (x, mode);
5009 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
5010 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5013 /* Calculate the rtx_cost of a single instruction. A return value of
5014 zero indicates an instruction pattern without a known cost. */
5017 insn_rtx_cost (rtx pat, bool speed)
5019 int i, cost;
5020 rtx set;
5022 /* Extract the single set rtx from the instruction pattern.
5023 We can't use single_set since we only have the pattern. */
5024 if (GET_CODE (pat) == SET)
5025 set = pat;
5026 else if (GET_CODE (pat) == PARALLEL)
5028 set = NULL_RTX;
5029 for (i = 0; i < XVECLEN (pat, 0); i++)
5031 rtx x = XVECEXP (pat, 0, i);
5032 if (GET_CODE (x) == SET)
5034 if (set)
5035 return 0;
5036 set = x;
5039 if (!set)
5040 return 0;
5042 else
5043 return 0;
5045 cost = set_src_cost (SET_SRC (set), speed);
5046 return cost > 0 ? cost : COSTS_N_INSNS (1);
5049 /* Given an insn INSN and condition COND, return the condition in a
5050 canonical form to simplify testing by callers. Specifically:
5052 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5053 (2) Both operands will be machine operands; (cc0) will have been replaced.
5054 (3) If an operand is a constant, it will be the second operand.
5055 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5056 for GE, GEU, and LEU.
5058 If the condition cannot be understood, or is an inequality floating-point
5059 comparison which needs to be reversed, 0 will be returned.
5061 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5063 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5064 insn used in locating the condition was found. If a replacement test
5065 of the condition is desired, it should be placed in front of that
5066 insn and we will be sure that the inputs are still valid.
5068 If WANT_REG is nonzero, we wish the condition to be relative to that
5069 register, if possible. Therefore, do not canonicalize the condition
5070 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5071 to be a compare to a CC mode register.
5073 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5074 and at INSN. */
5077 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5078 rtx_insn **earliest,
5079 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5081 enum rtx_code code;
5082 rtx_insn *prev = insn;
5083 const_rtx set;
5084 rtx tem;
5085 rtx op0, op1;
5086 int reverse_code = 0;
5087 enum machine_mode mode;
5088 basic_block bb = BLOCK_FOR_INSN (insn);
5090 code = GET_CODE (cond);
5091 mode = GET_MODE (cond);
5092 op0 = XEXP (cond, 0);
5093 op1 = XEXP (cond, 1);
5095 if (reverse)
5096 code = reversed_comparison_code (cond, insn);
5097 if (code == UNKNOWN)
5098 return 0;
5100 if (earliest)
5101 *earliest = insn;
5103 /* If we are comparing a register with zero, see if the register is set
5104 in the previous insn to a COMPARE or a comparison operation. Perform
5105 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5106 in cse.c */
5108 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5109 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5110 && op1 == CONST0_RTX (GET_MODE (op0))
5111 && op0 != want_reg)
5113 /* Set nonzero when we find something of interest. */
5114 rtx x = 0;
5116 #ifdef HAVE_cc0
5117 /* If comparison with cc0, import actual comparison from compare
5118 insn. */
5119 if (op0 == cc0_rtx)
5121 if ((prev = prev_nonnote_insn (prev)) == 0
5122 || !NONJUMP_INSN_P (prev)
5123 || (set = single_set (prev)) == 0
5124 || SET_DEST (set) != cc0_rtx)
5125 return 0;
5127 op0 = SET_SRC (set);
5128 op1 = CONST0_RTX (GET_MODE (op0));
5129 if (earliest)
5130 *earliest = prev;
5132 #endif
5134 /* If this is a COMPARE, pick up the two things being compared. */
5135 if (GET_CODE (op0) == COMPARE)
5137 op1 = XEXP (op0, 1);
5138 op0 = XEXP (op0, 0);
5139 continue;
5141 else if (!REG_P (op0))
5142 break;
5144 /* Go back to the previous insn. Stop if it is not an INSN. We also
5145 stop if it isn't a single set or if it has a REG_INC note because
5146 we don't want to bother dealing with it. */
5148 prev = prev_nonnote_nondebug_insn (prev);
5150 if (prev == 0
5151 || !NONJUMP_INSN_P (prev)
5152 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5153 /* In cfglayout mode, there do not have to be labels at the
5154 beginning of a block, or jumps at the end, so the previous
5155 conditions would not stop us when we reach bb boundary. */
5156 || BLOCK_FOR_INSN (prev) != bb)
5157 break;
5159 set = set_of (op0, prev);
5161 if (set
5162 && (GET_CODE (set) != SET
5163 || !rtx_equal_p (SET_DEST (set), op0)))
5164 break;
5166 /* If this is setting OP0, get what it sets it to if it looks
5167 relevant. */
5168 if (set)
5170 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
5171 #ifdef FLOAT_STORE_FLAG_VALUE
5172 REAL_VALUE_TYPE fsfv;
5173 #endif
5175 /* ??? We may not combine comparisons done in a CCmode with
5176 comparisons not done in a CCmode. This is to aid targets
5177 like Alpha that have an IEEE compliant EQ instruction, and
5178 a non-IEEE compliant BEQ instruction. The use of CCmode is
5179 actually artificial, simply to prevent the combination, but
5180 should not affect other platforms.
5182 However, we must allow VOIDmode comparisons to match either
5183 CCmode or non-CCmode comparison, because some ports have
5184 modeless comparisons inside branch patterns.
5186 ??? This mode check should perhaps look more like the mode check
5187 in simplify_comparison in combine. */
5188 if (((GET_MODE_CLASS (mode) == MODE_CC)
5189 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5190 && mode != VOIDmode
5191 && inner_mode != VOIDmode)
5192 break;
5193 if (GET_CODE (SET_SRC (set)) == COMPARE
5194 || (((code == NE
5195 || (code == LT
5196 && val_signbit_known_set_p (inner_mode,
5197 STORE_FLAG_VALUE))
5198 #ifdef FLOAT_STORE_FLAG_VALUE
5199 || (code == LT
5200 && SCALAR_FLOAT_MODE_P (inner_mode)
5201 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5202 REAL_VALUE_NEGATIVE (fsfv)))
5203 #endif
5205 && COMPARISON_P (SET_SRC (set))))
5206 x = SET_SRC (set);
5207 else if (((code == EQ
5208 || (code == GE
5209 && val_signbit_known_set_p (inner_mode,
5210 STORE_FLAG_VALUE))
5211 #ifdef FLOAT_STORE_FLAG_VALUE
5212 || (code == GE
5213 && SCALAR_FLOAT_MODE_P (inner_mode)
5214 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5215 REAL_VALUE_NEGATIVE (fsfv)))
5216 #endif
5218 && COMPARISON_P (SET_SRC (set)))
5220 reverse_code = 1;
5221 x = SET_SRC (set);
5223 else if ((code == EQ || code == NE)
5224 && GET_CODE (SET_SRC (set)) == XOR)
5225 /* Handle sequences like:
5227 (set op0 (xor X Y))
5228 ...(eq|ne op0 (const_int 0))...
5230 in which case:
5232 (eq op0 (const_int 0)) reduces to (eq X Y)
5233 (ne op0 (const_int 0)) reduces to (ne X Y)
5235 This is the form used by MIPS16, for example. */
5236 x = SET_SRC (set);
5237 else
5238 break;
5241 else if (reg_set_p (op0, prev))
5242 /* If this sets OP0, but not directly, we have to give up. */
5243 break;
5245 if (x)
5247 /* If the caller is expecting the condition to be valid at INSN,
5248 make sure X doesn't change before INSN. */
5249 if (valid_at_insn_p)
5250 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5251 break;
5252 if (COMPARISON_P (x))
5253 code = GET_CODE (x);
5254 if (reverse_code)
5256 code = reversed_comparison_code (x, prev);
5257 if (code == UNKNOWN)
5258 return 0;
5259 reverse_code = 0;
5262 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5263 if (earliest)
5264 *earliest = prev;
5268 /* If constant is first, put it last. */
5269 if (CONSTANT_P (op0))
5270 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5272 /* If OP0 is the result of a comparison, we weren't able to find what
5273 was really being compared, so fail. */
5274 if (!allow_cc_mode
5275 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5276 return 0;
5278 /* Canonicalize any ordered comparison with integers involving equality
5279 if we can do computations in the relevant mode and we do not
5280 overflow. */
5282 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5283 && CONST_INT_P (op1)
5284 && GET_MODE (op0) != VOIDmode
5285 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5287 HOST_WIDE_INT const_val = INTVAL (op1);
5288 unsigned HOST_WIDE_INT uconst_val = const_val;
5289 unsigned HOST_WIDE_INT max_val
5290 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5292 switch (code)
5294 case LE:
5295 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5296 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5297 break;
5299 /* When cross-compiling, const_val might be sign-extended from
5300 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5301 case GE:
5302 if ((const_val & max_val)
5303 != ((unsigned HOST_WIDE_INT) 1
5304 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5305 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5306 break;
5308 case LEU:
5309 if (uconst_val < max_val)
5310 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5311 break;
5313 case GEU:
5314 if (uconst_val != 0)
5315 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5316 break;
5318 default:
5319 break;
5323 /* Never return CC0; return zero instead. */
5324 if (CC0_P (op0))
5325 return 0;
5327 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5330 /* Given a jump insn JUMP, return the condition that will cause it to branch
5331 to its JUMP_LABEL. If the condition cannot be understood, or is an
5332 inequality floating-point comparison which needs to be reversed, 0 will
5333 be returned.
5335 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5336 insn used in locating the condition was found. If a replacement test
5337 of the condition is desired, it should be placed in front of that
5338 insn and we will be sure that the inputs are still valid. If EARLIEST
5339 is null, the returned condition will be valid at INSN.
5341 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5342 compare CC mode register.
5344 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5347 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5348 int valid_at_insn_p)
5350 rtx cond;
5351 int reverse;
5352 rtx set;
5354 /* If this is not a standard conditional jump, we can't parse it. */
5355 if (!JUMP_P (jump)
5356 || ! any_condjump_p (jump))
5357 return 0;
5358 set = pc_set (jump);
5360 cond = XEXP (SET_SRC (set), 0);
5362 /* If this branches to JUMP_LABEL when the condition is false, reverse
5363 the condition. */
5364 reverse
5365 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5366 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5368 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5369 allow_cc_mode, valid_at_insn_p);
5372 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5373 TARGET_MODE_REP_EXTENDED.
5375 Note that we assume that the property of
5376 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5377 narrower than mode B. I.e., if A is a mode narrower than B then in
5378 order to be able to operate on it in mode B, mode A needs to
5379 satisfy the requirements set by the representation of mode B. */
5381 static void
5382 init_num_sign_bit_copies_in_rep (void)
5384 enum machine_mode mode, in_mode;
5386 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5387 in_mode = GET_MODE_WIDER_MODE (mode))
5388 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5389 mode = GET_MODE_WIDER_MODE (mode))
5391 enum machine_mode i;
5393 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5394 extends to the next widest mode. */
5395 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5396 || GET_MODE_WIDER_MODE (mode) == in_mode);
5398 /* We are in in_mode. Count how many bits outside of mode
5399 have to be copies of the sign-bit. */
5400 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5402 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5404 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5405 /* We can only check sign-bit copies starting from the
5406 top-bit. In order to be able to check the bits we
5407 have already seen we pretend that subsequent bits
5408 have to be sign-bit copies too. */
5409 || num_sign_bit_copies_in_rep [in_mode][mode])
5410 num_sign_bit_copies_in_rep [in_mode][mode]
5411 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5416 /* Suppose that truncation from the machine mode of X to MODE is not a
5417 no-op. See if there is anything special about X so that we can
5418 assume it already contains a truncated value of MODE. */
5420 bool
5421 truncated_to_mode (enum machine_mode mode, const_rtx x)
5423 /* This register has already been used in MODE without explicit
5424 truncation. */
5425 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5426 return true;
5428 /* See if we already satisfy the requirements of MODE. If yes we
5429 can just switch to MODE. */
5430 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5431 && (num_sign_bit_copies (x, GET_MODE (x))
5432 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5433 return true;
5435 return false;
5438 /* Return true if RTX code CODE has a single sequence of zero or more
5439 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5440 entry in that case. */
5442 static bool
5443 setup_reg_subrtx_bounds (unsigned int code)
5445 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5446 unsigned int i = 0;
5447 for (; format[i] != 'e'; ++i)
5449 if (!format[i])
5450 /* No subrtxes. Leave start and count as 0. */
5451 return true;
5452 if (format[i] == 'E' || format[i] == 'V')
5453 return false;
5456 /* Record the sequence of 'e's. */
5457 rtx_all_subrtx_bounds[code].start = i;
5459 ++i;
5460 while (format[i] == 'e');
5461 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5462 /* rtl-iter.h relies on this. */
5463 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5465 for (; format[i]; ++i)
5466 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5467 return false;
5469 return true;
5472 /* Initialize non_rtx_starting_operands, which is used to speed up
5473 for_each_rtx, and rtx_all_subrtx_bounds. */
5474 void
5475 init_rtlanal (void)
5477 int i;
5478 for (i = 0; i < NUM_RTX_CODE; i++)
5480 const char *format = GET_RTX_FORMAT (i);
5481 const char *first = strpbrk (format, "eEV");
5482 non_rtx_starting_operands[i] = first ? first - format : -1;
5483 if (!setup_reg_subrtx_bounds (i))
5484 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5485 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5486 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5489 init_num_sign_bit_copies_in_rep ();
5492 /* Check whether this is a constant pool constant. */
5493 bool
5494 constant_pool_constant_p (rtx x)
5496 x = avoid_constant_pool_reference (x);
5497 return CONST_DOUBLE_P (x);
5500 /* If M is a bitmask that selects a field of low-order bits within an item but
5501 not the entire word, return the length of the field. Return -1 otherwise.
5502 M is used in machine mode MODE. */
5505 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5507 if (mode != VOIDmode)
5509 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5510 return -1;
5511 m &= GET_MODE_MASK (mode);
5514 return exact_log2 (m + 1);
5517 /* Return the mode of MEM's address. */
5519 enum machine_mode
5520 get_address_mode (rtx mem)
5522 enum machine_mode mode;
5524 gcc_assert (MEM_P (mem));
5525 mode = GET_MODE (XEXP (mem, 0));
5526 if (mode != VOIDmode)
5527 return mode;
5528 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5531 /* Split up a CONST_DOUBLE or integer constant rtx
5532 into two rtx's for single words,
5533 storing in *FIRST the word that comes first in memory in the target
5534 and in *SECOND the other.
5536 TODO: This function needs to be rewritten to work on any size
5537 integer. */
5539 void
5540 split_double (rtx value, rtx *first, rtx *second)
5542 if (CONST_INT_P (value))
5544 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5546 /* In this case the CONST_INT holds both target words.
5547 Extract the bits from it into two word-sized pieces.
5548 Sign extend each half to HOST_WIDE_INT. */
5549 unsigned HOST_WIDE_INT low, high;
5550 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5551 unsigned bits_per_word = BITS_PER_WORD;
5553 /* Set sign_bit to the most significant bit of a word. */
5554 sign_bit = 1;
5555 sign_bit <<= bits_per_word - 1;
5557 /* Set mask so that all bits of the word are set. We could
5558 have used 1 << BITS_PER_WORD instead of basing the
5559 calculation on sign_bit. However, on machines where
5560 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5561 compiler warning, even though the code would never be
5562 executed. */
5563 mask = sign_bit << 1;
5564 mask--;
5566 /* Set sign_extend as any remaining bits. */
5567 sign_extend = ~mask;
5569 /* Pick the lower word and sign-extend it. */
5570 low = INTVAL (value);
5571 low &= mask;
5572 if (low & sign_bit)
5573 low |= sign_extend;
5575 /* Pick the higher word, shifted to the least significant
5576 bits, and sign-extend it. */
5577 high = INTVAL (value);
5578 high >>= bits_per_word - 1;
5579 high >>= 1;
5580 high &= mask;
5581 if (high & sign_bit)
5582 high |= sign_extend;
5584 /* Store the words in the target machine order. */
5585 if (WORDS_BIG_ENDIAN)
5587 *first = GEN_INT (high);
5588 *second = GEN_INT (low);
5590 else
5592 *first = GEN_INT (low);
5593 *second = GEN_INT (high);
5596 else
5598 /* The rule for using CONST_INT for a wider mode
5599 is that we regard the value as signed.
5600 So sign-extend it. */
5601 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5602 if (WORDS_BIG_ENDIAN)
5604 *first = high;
5605 *second = value;
5607 else
5609 *first = value;
5610 *second = high;
5614 else if (GET_CODE (value) == CONST_WIDE_INT)
5616 /* All of this is scary code and needs to be converted to
5617 properly work with any size integer. */
5618 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5619 if (WORDS_BIG_ENDIAN)
5621 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5622 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5624 else
5626 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5627 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5630 else if (!CONST_DOUBLE_P (value))
5632 if (WORDS_BIG_ENDIAN)
5634 *first = const0_rtx;
5635 *second = value;
5637 else
5639 *first = value;
5640 *second = const0_rtx;
5643 else if (GET_MODE (value) == VOIDmode
5644 /* This is the old way we did CONST_DOUBLE integers. */
5645 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5647 /* In an integer, the words are defined as most and least significant.
5648 So order them by the target's convention. */
5649 if (WORDS_BIG_ENDIAN)
5651 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5652 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5654 else
5656 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5657 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5660 else
5662 REAL_VALUE_TYPE r;
5663 long l[2];
5664 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5666 /* Note, this converts the REAL_VALUE_TYPE to the target's
5667 format, splits up the floating point double and outputs
5668 exactly 32 bits of it into each of l[0] and l[1] --
5669 not necessarily BITS_PER_WORD bits. */
5670 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5672 /* If 32 bits is an entire word for the target, but not for the host,
5673 then sign-extend on the host so that the number will look the same
5674 way on the host that it would on the target. See for instance
5675 simplify_unary_operation. The #if is needed to avoid compiler
5676 warnings. */
5678 #if HOST_BITS_PER_LONG > 32
5679 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5681 if (l[0] & ((long) 1 << 31))
5682 l[0] |= ((long) (-1) << 32);
5683 if (l[1] & ((long) 1 << 31))
5684 l[1] |= ((long) (-1) << 32);
5686 #endif
5688 *first = GEN_INT (l[0]);
5689 *second = GEN_INT (l[1]);
5693 /* Return true if X is a sign_extract or zero_extract from the least
5694 significant bit. */
5696 static bool
5697 lsb_bitfield_op_p (rtx x)
5699 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5701 enum machine_mode mode = GET_MODE (XEXP (x, 0));
5702 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5703 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5705 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5707 return false;
5710 /* Strip outer address "mutations" from LOC and return a pointer to the
5711 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5712 stripped expression there.
5714 "Mutations" either convert between modes or apply some kind of
5715 extension, truncation or alignment. */
5717 rtx *
5718 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5720 for (;;)
5722 enum rtx_code code = GET_CODE (*loc);
5723 if (GET_RTX_CLASS (code) == RTX_UNARY)
5724 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5725 used to convert between pointer sizes. */
5726 loc = &XEXP (*loc, 0);
5727 else if (lsb_bitfield_op_p (*loc))
5728 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5729 acts as a combined truncation and extension. */
5730 loc = &XEXP (*loc, 0);
5731 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5732 /* (and ... (const_int -X)) is used to align to X bytes. */
5733 loc = &XEXP (*loc, 0);
5734 else if (code == SUBREG
5735 && !OBJECT_P (SUBREG_REG (*loc))
5736 && subreg_lowpart_p (*loc))
5737 /* (subreg (operator ...) ...) inside and is used for mode
5738 conversion too. */
5739 loc = &SUBREG_REG (*loc);
5740 else
5741 return loc;
5742 if (outer_code)
5743 *outer_code = code;
5747 /* Return true if CODE applies some kind of scale. The scaled value is
5748 is the first operand and the scale is the second. */
5750 static bool
5751 binary_scale_code_p (enum rtx_code code)
5753 return (code == MULT
5754 || code == ASHIFT
5755 /* Needed by ARM targets. */
5756 || code == ASHIFTRT
5757 || code == LSHIFTRT
5758 || code == ROTATE
5759 || code == ROTATERT);
5762 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5763 (see address_info). Return null otherwise. */
5765 static rtx *
5766 get_base_term (rtx *inner)
5768 if (GET_CODE (*inner) == LO_SUM)
5769 inner = strip_address_mutations (&XEXP (*inner, 0));
5770 if (REG_P (*inner)
5771 || MEM_P (*inner)
5772 || GET_CODE (*inner) == SUBREG)
5773 return inner;
5774 return 0;
5777 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5778 (see address_info). Return null otherwise. */
5780 static rtx *
5781 get_index_term (rtx *inner)
5783 /* At present, only constant scales are allowed. */
5784 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5785 inner = strip_address_mutations (&XEXP (*inner, 0));
5786 if (REG_P (*inner)
5787 || MEM_P (*inner)
5788 || GET_CODE (*inner) == SUBREG)
5789 return inner;
5790 return 0;
5793 /* Set the segment part of address INFO to LOC, given that INNER is the
5794 unmutated value. */
5796 static void
5797 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5799 gcc_assert (!info->segment);
5800 info->segment = loc;
5801 info->segment_term = inner;
5804 /* Set the base part of address INFO to LOC, given that INNER is the
5805 unmutated value. */
5807 static void
5808 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5810 gcc_assert (!info->base);
5811 info->base = loc;
5812 info->base_term = inner;
5815 /* Set the index part of address INFO to LOC, given that INNER is the
5816 unmutated value. */
5818 static void
5819 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5821 gcc_assert (!info->index);
5822 info->index = loc;
5823 info->index_term = inner;
5826 /* Set the displacement part of address INFO to LOC, given that INNER
5827 is the constant term. */
5829 static void
5830 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5832 gcc_assert (!info->disp);
5833 info->disp = loc;
5834 info->disp_term = inner;
5837 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5838 rest of INFO accordingly. */
5840 static void
5841 decompose_incdec_address (struct address_info *info)
5843 info->autoinc_p = true;
5845 rtx *base = &XEXP (*info->inner, 0);
5846 set_address_base (info, base, base);
5847 gcc_checking_assert (info->base == info->base_term);
5849 /* These addresses are only valid when the size of the addressed
5850 value is known. */
5851 gcc_checking_assert (info->mode != VOIDmode);
5854 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5855 of INFO accordingly. */
5857 static void
5858 decompose_automod_address (struct address_info *info)
5860 info->autoinc_p = true;
5862 rtx *base = &XEXP (*info->inner, 0);
5863 set_address_base (info, base, base);
5864 gcc_checking_assert (info->base == info->base_term);
5866 rtx plus = XEXP (*info->inner, 1);
5867 gcc_assert (GET_CODE (plus) == PLUS);
5869 info->base_term2 = &XEXP (plus, 0);
5870 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5872 rtx *step = &XEXP (plus, 1);
5873 rtx *inner_step = strip_address_mutations (step);
5874 if (CONSTANT_P (*inner_step))
5875 set_address_disp (info, step, inner_step);
5876 else
5877 set_address_index (info, step, inner_step);
5880 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5881 values in [PTR, END). Return a pointer to the end of the used array. */
5883 static rtx **
5884 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5886 rtx x = *loc;
5887 if (GET_CODE (x) == PLUS)
5889 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5890 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5892 else
5894 gcc_assert (ptr != end);
5895 *ptr++ = loc;
5897 return ptr;
5900 /* Evaluate the likelihood of X being a base or index value, returning
5901 positive if it is likely to be a base, negative if it is likely to be
5902 an index, and 0 if we can't tell. Make the magnitude of the return
5903 value reflect the amount of confidence we have in the answer.
5905 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5907 static int
5908 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5909 enum rtx_code outer_code, enum rtx_code index_code)
5911 /* Believe *_POINTER unless the address shape requires otherwise. */
5912 if (REG_P (x) && REG_POINTER (x))
5913 return 2;
5914 if (MEM_P (x) && MEM_POINTER (x))
5915 return 2;
5917 if (REG_P (x) && HARD_REGISTER_P (x))
5919 /* X is a hard register. If it only fits one of the base
5920 or index classes, choose that interpretation. */
5921 int regno = REGNO (x);
5922 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5923 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5924 if (base_p != index_p)
5925 return base_p ? 1 : -1;
5927 return 0;
5930 /* INFO->INNER describes a normal, non-automodified address.
5931 Fill in the rest of INFO accordingly. */
5933 static void
5934 decompose_normal_address (struct address_info *info)
5936 /* Treat the address as the sum of up to four values. */
5937 rtx *ops[4];
5938 size_t n_ops = extract_plus_operands (info->inner, ops,
5939 ops + ARRAY_SIZE (ops)) - ops;
5941 /* If there is more than one component, any base component is in a PLUS. */
5942 if (n_ops > 1)
5943 info->base_outer_code = PLUS;
5945 /* Try to classify each sum operand now. Leave those that could be
5946 either a base or an index in OPS. */
5947 rtx *inner_ops[4];
5948 size_t out = 0;
5949 for (size_t in = 0; in < n_ops; ++in)
5951 rtx *loc = ops[in];
5952 rtx *inner = strip_address_mutations (loc);
5953 if (CONSTANT_P (*inner))
5954 set_address_disp (info, loc, inner);
5955 else if (GET_CODE (*inner) == UNSPEC)
5956 set_address_segment (info, loc, inner);
5957 else
5959 /* The only other possibilities are a base or an index. */
5960 rtx *base_term = get_base_term (inner);
5961 rtx *index_term = get_index_term (inner);
5962 gcc_assert (base_term || index_term);
5963 if (!base_term)
5964 set_address_index (info, loc, index_term);
5965 else if (!index_term)
5966 set_address_base (info, loc, base_term);
5967 else
5969 gcc_assert (base_term == index_term);
5970 ops[out] = loc;
5971 inner_ops[out] = base_term;
5972 ++out;
5977 /* Classify the remaining OPS members as bases and indexes. */
5978 if (out == 1)
5980 /* If we haven't seen a base or an index yet, assume that this is
5981 the base. If we were confident that another term was the base
5982 or index, treat the remaining operand as the other kind. */
5983 if (!info->base)
5984 set_address_base (info, ops[0], inner_ops[0]);
5985 else
5986 set_address_index (info, ops[0], inner_ops[0]);
5988 else if (out == 2)
5990 /* In the event of a tie, assume the base comes first. */
5991 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5992 GET_CODE (*ops[1]))
5993 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5994 GET_CODE (*ops[0])))
5996 set_address_base (info, ops[0], inner_ops[0]);
5997 set_address_index (info, ops[1], inner_ops[1]);
5999 else
6001 set_address_base (info, ops[1], inner_ops[1]);
6002 set_address_index (info, ops[0], inner_ops[0]);
6005 else
6006 gcc_assert (out == 0);
6009 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6010 or VOIDmode if not known. AS is the address space associated with LOC.
6011 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6013 void
6014 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
6015 addr_space_t as, enum rtx_code outer_code)
6017 memset (info, 0, sizeof (*info));
6018 info->mode = mode;
6019 info->as = as;
6020 info->addr_outer_code = outer_code;
6021 info->outer = loc;
6022 info->inner = strip_address_mutations (loc, &outer_code);
6023 info->base_outer_code = outer_code;
6024 switch (GET_CODE (*info->inner))
6026 case PRE_DEC:
6027 case PRE_INC:
6028 case POST_DEC:
6029 case POST_INC:
6030 decompose_incdec_address (info);
6031 break;
6033 case PRE_MODIFY:
6034 case POST_MODIFY:
6035 decompose_automod_address (info);
6036 break;
6038 default:
6039 decompose_normal_address (info);
6040 break;
6044 /* Describe address operand LOC in INFO. */
6046 void
6047 decompose_lea_address (struct address_info *info, rtx *loc)
6049 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6052 /* Describe the address of MEM X in INFO. */
6054 void
6055 decompose_mem_address (struct address_info *info, rtx x)
6057 gcc_assert (MEM_P (x));
6058 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6059 MEM_ADDR_SPACE (x), MEM);
6062 /* Update INFO after a change to the address it describes. */
6064 void
6065 update_address (struct address_info *info)
6067 decompose_address (info, info->outer, info->mode, info->as,
6068 info->addr_outer_code);
6071 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6072 more complicated than that. */
6074 HOST_WIDE_INT
6075 get_index_scale (const struct address_info *info)
6077 rtx index = *info->index;
6078 if (GET_CODE (index) == MULT
6079 && CONST_INT_P (XEXP (index, 1))
6080 && info->index_term == &XEXP (index, 0))
6081 return INTVAL (XEXP (index, 1));
6083 if (GET_CODE (index) == ASHIFT
6084 && CONST_INT_P (XEXP (index, 1))
6085 && info->index_term == &XEXP (index, 0))
6086 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6088 if (info->index == info->index_term)
6089 return 1;
6091 return 0;
6094 /* Return the "index code" of INFO, in the form required by
6095 ok_for_base_p_1. */
6097 enum rtx_code
6098 get_index_code (const struct address_info *info)
6100 if (info->index)
6101 return GET_CODE (*info->index);
6103 if (info->disp)
6104 return GET_CODE (*info->disp);
6106 return SCRATCH;
6109 /* Return true if X contains a thread-local symbol. */
6111 bool
6112 tls_referenced_p (const_rtx x)
6114 if (!targetm.have_tls)
6115 return false;
6117 subrtx_iterator::array_type array;
6118 FOR_EACH_SUBRTX (iter, array, x, ALL)
6119 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6120 return true;
6121 return false;