Introduce LABEL_REF_LABEL
[official-gcc.git] / gcc / rtlanal.c
blob299b8162f5cc3b06e7ff5732f0105e3008b7ee22
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "df.h"
37 #include "tree.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
40 #include "rtl-iter.h"
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, enum machine_mode,
50 const_rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, enum machine_mode,
53 const_rtx, enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, enum machine_mode, const_rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, enum machine_mode, const_rtx,
59 enum machine_mode, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
65 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
66 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
68 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
69 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
70 SIGN_EXTEND then while narrowing we also have to enforce the
71 representation and sign-extend the value to mode DESTINATION_REP.
73 If the value is already sign-extended to DESTINATION_REP mode we
74 can just switch to DESTINATION mode on it. For each pair of
75 integral modes SOURCE and DESTINATION, when truncating from SOURCE
76 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
77 contains the number of high-order bits in SOURCE that have to be
78 copies of the sign-bit so that we can do this mode-switch to
79 DESTINATION. */
81 static unsigned int
82 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
84 /* Store X into index I of ARRAY. ARRAY is known to have at least I
85 elements. Return the new base of ARRAY. */
87 template <typename T>
88 typename T::value_type *
89 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
90 value_type *base,
91 size_t i, value_type x)
93 if (base == array.stack)
95 if (i < LOCAL_ELEMS)
97 base[i] = x;
98 return base;
100 gcc_checking_assert (i == LOCAL_ELEMS);
101 vec_safe_grow (array.heap, i + 1);
102 base = array.heap->address ();
103 memcpy (base, array.stack, sizeof (array.stack));
104 base[LOCAL_ELEMS] = x;
105 return base;
107 unsigned int length = array.heap->length ();
108 if (length > i)
110 gcc_checking_assert (base == array.heap->address ());
111 base[i] = x;
112 return base;
114 else
116 gcc_checking_assert (i == length);
117 vec_safe_push (array.heap, x);
118 return array.heap->address ();
122 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
125 template <typename T>
126 size_t
127 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 value_type *base,
129 size_t end, rtx_type x)
131 const char *format = GET_RTX_FORMAT (GET_CODE (x));
132 size_t orig_end = end;
133 for (int i = 0; format[i]; ++i)
134 if (format[i] == 'e')
136 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
137 if (__builtin_expect (end < LOCAL_ELEMS, true))
138 base[end++] = subx;
139 else
140 base = add_single_to_queue (array, base, end++, subx);
142 else if (format[i] == 'E')
144 int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
145 rtx *vec = x->u.fld[i].rt_rtvec->elem;
146 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
147 for (int j = 0; j < length; j++)
148 base[end++] = T::get_value (vec[j]);
149 else
150 for (int j = 0; j < length; j++)
151 base = add_single_to_queue (array, base, end++,
152 T::get_value (vec[j]));
154 return end - orig_end;
157 template <typename T>
158 void
159 generic_subrtx_iterator <T>::free_array (array_type &array)
161 vec_free (array.heap);
164 template <typename T>
165 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
167 template class generic_subrtx_iterator <const_rtx_accessor>;
168 template class generic_subrtx_iterator <rtx_var_accessor>;
169 template class generic_subrtx_iterator <rtx_ptr_accessor>;
171 /* Return 1 if the value of X is unstable
172 (would be different at a different point in the program).
173 The frame pointer, arg pointer, etc. are considered stable
174 (within one function) and so is anything marked `unchanging'. */
177 rtx_unstable_p (const_rtx x)
179 const RTX_CODE code = GET_CODE (x);
180 int i;
181 const char *fmt;
183 switch (code)
185 case MEM:
186 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
188 case CONST:
189 CASE_CONST_ANY:
190 case SYMBOL_REF:
191 case LABEL_REF:
192 return 0;
194 case REG:
195 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
196 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
197 /* The arg pointer varies if it is not a fixed register. */
198 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
199 return 0;
200 /* ??? When call-clobbered, the value is stable modulo the restore
201 that must happen after a call. This currently screws up local-alloc
202 into believing that the restore is not needed. */
203 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
204 return 0;
205 return 1;
207 case ASM_OPERANDS:
208 if (MEM_VOLATILE_P (x))
209 return 1;
211 /* Fall through. */
213 default:
214 break;
217 fmt = GET_RTX_FORMAT (code);
218 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
219 if (fmt[i] == 'e')
221 if (rtx_unstable_p (XEXP (x, i)))
222 return 1;
224 else if (fmt[i] == 'E')
226 int j;
227 for (j = 0; j < XVECLEN (x, i); j++)
228 if (rtx_unstable_p (XVECEXP (x, i, j)))
229 return 1;
232 return 0;
235 /* Return 1 if X has a value that can vary even between two
236 executions of the program. 0 means X can be compared reliably
237 against certain constants or near-constants.
238 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
239 zero, we are slightly more conservative.
240 The frame pointer and the arg pointer are considered constant. */
242 bool
243 rtx_varies_p (const_rtx x, bool for_alias)
245 RTX_CODE code;
246 int i;
247 const char *fmt;
249 if (!x)
250 return 0;
252 code = GET_CODE (x);
253 switch (code)
255 case MEM:
256 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
258 case CONST:
259 CASE_CONST_ANY:
260 case SYMBOL_REF:
261 case LABEL_REF:
262 return 0;
264 case REG:
265 /* Note that we have to test for the actual rtx used for the frame
266 and arg pointers and not just the register number in case we have
267 eliminated the frame and/or arg pointer and are using it
268 for pseudos. */
269 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
270 /* The arg pointer varies if it is not a fixed register. */
271 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
272 return 0;
273 if (x == pic_offset_table_rtx
274 /* ??? When call-clobbered, the value is stable modulo the restore
275 that must happen after a call. This currently screws up
276 local-alloc into believing that the restore is not needed, so we
277 must return 0 only if we are called from alias analysis. */
278 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
279 return 0;
280 return 1;
282 case LO_SUM:
283 /* The operand 0 of a LO_SUM is considered constant
284 (in fact it is related specifically to operand 1)
285 during alias analysis. */
286 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
287 || rtx_varies_p (XEXP (x, 1), for_alias);
289 case ASM_OPERANDS:
290 if (MEM_VOLATILE_P (x))
291 return 1;
293 /* Fall through. */
295 default:
296 break;
299 fmt = GET_RTX_FORMAT (code);
300 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
301 if (fmt[i] == 'e')
303 if (rtx_varies_p (XEXP (x, i), for_alias))
304 return 1;
306 else if (fmt[i] == 'E')
308 int j;
309 for (j = 0; j < XVECLEN (x, i); j++)
310 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
311 return 1;
314 return 0;
317 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
318 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
319 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
320 references on strict alignment machines. */
322 static int
323 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
324 enum machine_mode mode, bool unaligned_mems)
326 enum rtx_code code = GET_CODE (x);
328 /* The offset must be a multiple of the mode size if we are considering
329 unaligned memory references on strict alignment machines. */
330 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
332 HOST_WIDE_INT actual_offset = offset;
334 #ifdef SPARC_STACK_BOUNDARY_HACK
335 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
336 the real alignment of %sp. However, when it does this, the
337 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
338 if (SPARC_STACK_BOUNDARY_HACK
339 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
340 actual_offset -= STACK_POINTER_OFFSET;
341 #endif
343 if (actual_offset % GET_MODE_SIZE (mode) != 0)
344 return 1;
347 switch (code)
349 case SYMBOL_REF:
350 if (SYMBOL_REF_WEAK (x))
351 return 1;
352 if (!CONSTANT_POOL_ADDRESS_P (x))
354 tree decl;
355 HOST_WIDE_INT decl_size;
357 if (offset < 0)
358 return 1;
359 if (size == 0)
360 size = GET_MODE_SIZE (mode);
361 if (size == 0)
362 return offset != 0;
364 /* If the size of the access or of the symbol is unknown,
365 assume the worst. */
366 decl = SYMBOL_REF_DECL (x);
368 /* Else check that the access is in bounds. TODO: restructure
369 expr_size/tree_expr_size/int_expr_size and just use the latter. */
370 if (!decl)
371 decl_size = -1;
372 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
373 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
374 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
375 : -1);
376 else if (TREE_CODE (decl) == STRING_CST)
377 decl_size = TREE_STRING_LENGTH (decl);
378 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
379 decl_size = int_size_in_bytes (TREE_TYPE (decl));
380 else
381 decl_size = -1;
383 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
386 return 0;
388 case LABEL_REF:
389 return 0;
391 case REG:
392 /* Stack references are assumed not to trap, but we need to deal with
393 nonsensical offsets. */
394 if (x == frame_pointer_rtx)
396 HOST_WIDE_INT adj_offset = offset - STARTING_FRAME_OFFSET;
397 if (size == 0)
398 size = GET_MODE_SIZE (mode);
399 if (FRAME_GROWS_DOWNWARD)
401 if (adj_offset < frame_offset || adj_offset + size - 1 >= 0)
402 return 1;
404 else
406 if (adj_offset < 0 || adj_offset + size - 1 >= frame_offset)
407 return 1;
409 return 0;
411 /* ??? Need to add a similar guard for nonsensical offsets. */
412 if (x == hard_frame_pointer_rtx
413 || x == stack_pointer_rtx
414 /* The arg pointer varies if it is not a fixed register. */
415 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
416 return 0;
417 /* All of the virtual frame registers are stack references. */
418 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
419 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
420 return 0;
421 return 1;
423 case CONST:
424 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
425 mode, unaligned_mems);
427 case PLUS:
428 /* An address is assumed not to trap if:
429 - it is the pic register plus a constant. */
430 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
431 return 0;
433 /* - or it is an address that can't trap plus a constant integer. */
434 if (CONST_INT_P (XEXP (x, 1))
435 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
436 size, mode, unaligned_mems))
437 return 0;
439 return 1;
441 case LO_SUM:
442 case PRE_MODIFY:
443 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
444 mode, unaligned_mems);
446 case PRE_DEC:
447 case PRE_INC:
448 case POST_DEC:
449 case POST_INC:
450 case POST_MODIFY:
451 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
452 mode, unaligned_mems);
454 default:
455 break;
458 /* If it isn't one of the case above, it can cause a trap. */
459 return 1;
462 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
465 rtx_addr_can_trap_p (const_rtx x)
467 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
470 /* Return true if X is an address that is known to not be zero. */
472 bool
473 nonzero_address_p (const_rtx x)
475 const enum rtx_code code = GET_CODE (x);
477 switch (code)
479 case SYMBOL_REF:
480 return !SYMBOL_REF_WEAK (x);
482 case LABEL_REF:
483 return true;
485 case REG:
486 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
487 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
488 || x == stack_pointer_rtx
489 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
490 return true;
491 /* All of the virtual frame registers are stack references. */
492 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
493 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
494 return true;
495 return false;
497 case CONST:
498 return nonzero_address_p (XEXP (x, 0));
500 case PLUS:
501 /* Handle PIC references. */
502 if (XEXP (x, 0) == pic_offset_table_rtx
503 && CONSTANT_P (XEXP (x, 1)))
504 return true;
505 return false;
507 case PRE_MODIFY:
508 /* Similar to the above; allow positive offsets. Further, since
509 auto-inc is only allowed in memories, the register must be a
510 pointer. */
511 if (CONST_INT_P (XEXP (x, 1))
512 && INTVAL (XEXP (x, 1)) > 0)
513 return true;
514 return nonzero_address_p (XEXP (x, 0));
516 case PRE_INC:
517 /* Similarly. Further, the offset is always positive. */
518 return true;
520 case PRE_DEC:
521 case POST_DEC:
522 case POST_INC:
523 case POST_MODIFY:
524 return nonzero_address_p (XEXP (x, 0));
526 case LO_SUM:
527 return nonzero_address_p (XEXP (x, 1));
529 default:
530 break;
533 /* If it isn't one of the case above, might be zero. */
534 return false;
537 /* Return 1 if X refers to a memory location whose address
538 cannot be compared reliably with constant addresses,
539 or if X refers to a BLKmode memory object.
540 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
541 zero, we are slightly more conservative. */
543 bool
544 rtx_addr_varies_p (const_rtx x, bool for_alias)
546 enum rtx_code code;
547 int i;
548 const char *fmt;
550 if (x == 0)
551 return 0;
553 code = GET_CODE (x);
554 if (code == MEM)
555 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
557 fmt = GET_RTX_FORMAT (code);
558 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
559 if (fmt[i] == 'e')
561 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
562 return 1;
564 else if (fmt[i] == 'E')
566 int j;
567 for (j = 0; j < XVECLEN (x, i); j++)
568 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
569 return 1;
571 return 0;
574 /* Return the CALL in X if there is one. */
577 get_call_rtx_from (rtx x)
579 if (INSN_P (x))
580 x = PATTERN (x);
581 if (GET_CODE (x) == PARALLEL)
582 x = XVECEXP (x, 0, 0);
583 if (GET_CODE (x) == SET)
584 x = SET_SRC (x);
585 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
586 return x;
587 return NULL_RTX;
590 /* Return the value of the integer term in X, if one is apparent;
591 otherwise return 0.
592 Only obvious integer terms are detected.
593 This is used in cse.c with the `related_value' field. */
595 HOST_WIDE_INT
596 get_integer_term (const_rtx x)
598 if (GET_CODE (x) == CONST)
599 x = XEXP (x, 0);
601 if (GET_CODE (x) == MINUS
602 && CONST_INT_P (XEXP (x, 1)))
603 return - INTVAL (XEXP (x, 1));
604 if (GET_CODE (x) == PLUS
605 && CONST_INT_P (XEXP (x, 1)))
606 return INTVAL (XEXP (x, 1));
607 return 0;
610 /* If X is a constant, return the value sans apparent integer term;
611 otherwise return 0.
612 Only obvious integer terms are detected. */
615 get_related_value (const_rtx x)
617 if (GET_CODE (x) != CONST)
618 return 0;
619 x = XEXP (x, 0);
620 if (GET_CODE (x) == PLUS
621 && CONST_INT_P (XEXP (x, 1)))
622 return XEXP (x, 0);
623 else if (GET_CODE (x) == MINUS
624 && CONST_INT_P (XEXP (x, 1)))
625 return XEXP (x, 0);
626 return 0;
629 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
630 to somewhere in the same object or object_block as SYMBOL. */
632 bool
633 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
635 tree decl;
637 if (GET_CODE (symbol) != SYMBOL_REF)
638 return false;
640 if (offset == 0)
641 return true;
643 if (offset > 0)
645 if (CONSTANT_POOL_ADDRESS_P (symbol)
646 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
647 return true;
649 decl = SYMBOL_REF_DECL (symbol);
650 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
651 return true;
654 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
655 && SYMBOL_REF_BLOCK (symbol)
656 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
657 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
658 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
659 return true;
661 return false;
664 /* Split X into a base and a constant offset, storing them in *BASE_OUT
665 and *OFFSET_OUT respectively. */
667 void
668 split_const (rtx x, rtx *base_out, rtx *offset_out)
670 if (GET_CODE (x) == CONST)
672 x = XEXP (x, 0);
673 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
675 *base_out = XEXP (x, 0);
676 *offset_out = XEXP (x, 1);
677 return;
680 *base_out = x;
681 *offset_out = const0_rtx;
684 /* Return the number of places FIND appears within X. If COUNT_DEST is
685 zero, we do not count occurrences inside the destination of a SET. */
688 count_occurrences (const_rtx x, const_rtx find, int count_dest)
690 int i, j;
691 enum rtx_code code;
692 const char *format_ptr;
693 int count;
695 if (x == find)
696 return 1;
698 code = GET_CODE (x);
700 switch (code)
702 case REG:
703 CASE_CONST_ANY:
704 case SYMBOL_REF:
705 case CODE_LABEL:
706 case PC:
707 case CC0:
708 return 0;
710 case EXPR_LIST:
711 count = count_occurrences (XEXP (x, 0), find, count_dest);
712 if (XEXP (x, 1))
713 count += count_occurrences (XEXP (x, 1), find, count_dest);
714 return count;
716 case MEM:
717 if (MEM_P (find) && rtx_equal_p (x, find))
718 return 1;
719 break;
721 case SET:
722 if (SET_DEST (x) == find && ! count_dest)
723 return count_occurrences (SET_SRC (x), find, count_dest);
724 break;
726 default:
727 break;
730 format_ptr = GET_RTX_FORMAT (code);
731 count = 0;
733 for (i = 0; i < GET_RTX_LENGTH (code); i++)
735 switch (*format_ptr++)
737 case 'e':
738 count += count_occurrences (XEXP (x, i), find, count_dest);
739 break;
741 case 'E':
742 for (j = 0; j < XVECLEN (x, i); j++)
743 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
744 break;
747 return count;
751 /* Return TRUE if OP is a register or subreg of a register that
752 holds an unsigned quantity. Otherwise, return FALSE. */
754 bool
755 unsigned_reg_p (rtx op)
757 if (REG_P (op)
758 && REG_EXPR (op)
759 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
760 return true;
762 if (GET_CODE (op) == SUBREG
763 && SUBREG_PROMOTED_SIGN (op))
764 return true;
766 return false;
770 /* Nonzero if register REG appears somewhere within IN.
771 Also works if REG is not a register; in this case it checks
772 for a subexpression of IN that is Lisp "equal" to REG. */
775 reg_mentioned_p (const_rtx reg, const_rtx in)
777 const char *fmt;
778 int i;
779 enum rtx_code code;
781 if (in == 0)
782 return 0;
784 if (reg == in)
785 return 1;
787 if (GET_CODE (in) == LABEL_REF)
788 return reg == LABEL_REF_LABEL (in);
790 code = GET_CODE (in);
792 switch (code)
794 /* Compare registers by number. */
795 case REG:
796 return REG_P (reg) && REGNO (in) == REGNO (reg);
798 /* These codes have no constituent expressions
799 and are unique. */
800 case SCRATCH:
801 case CC0:
802 case PC:
803 return 0;
805 CASE_CONST_ANY:
806 /* These are kept unique for a given value. */
807 return 0;
809 default:
810 break;
813 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
814 return 1;
816 fmt = GET_RTX_FORMAT (code);
818 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
820 if (fmt[i] == 'E')
822 int j;
823 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
824 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
825 return 1;
827 else if (fmt[i] == 'e'
828 && reg_mentioned_p (reg, XEXP (in, i)))
829 return 1;
831 return 0;
834 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
835 no CODE_LABEL insn. */
838 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
840 rtx_insn *p;
841 if (beg == end)
842 return 0;
843 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
844 if (LABEL_P (p))
845 return 0;
846 return 1;
849 /* Nonzero if register REG is used in an insn between
850 FROM_INSN and TO_INSN (exclusive of those two). */
853 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
854 const rtx_insn *to_insn)
856 rtx_insn *insn;
858 if (from_insn == to_insn)
859 return 0;
861 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
862 if (NONDEBUG_INSN_P (insn)
863 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
864 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
865 return 1;
866 return 0;
869 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
870 is entirely replaced by a new value and the only use is as a SET_DEST,
871 we do not consider it a reference. */
874 reg_referenced_p (const_rtx x, const_rtx body)
876 int i;
878 switch (GET_CODE (body))
880 case SET:
881 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
882 return 1;
884 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
885 of a REG that occupies all of the REG, the insn references X if
886 it is mentioned in the destination. */
887 if (GET_CODE (SET_DEST (body)) != CC0
888 && GET_CODE (SET_DEST (body)) != PC
889 && !REG_P (SET_DEST (body))
890 && ! (GET_CODE (SET_DEST (body)) == SUBREG
891 && REG_P (SUBREG_REG (SET_DEST (body)))
892 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
893 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
894 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
895 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
896 && reg_overlap_mentioned_p (x, SET_DEST (body)))
897 return 1;
898 return 0;
900 case ASM_OPERANDS:
901 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
902 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
903 return 1;
904 return 0;
906 case CALL:
907 case USE:
908 case IF_THEN_ELSE:
909 return reg_overlap_mentioned_p (x, body);
911 case TRAP_IF:
912 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
914 case PREFETCH:
915 return reg_overlap_mentioned_p (x, XEXP (body, 0));
917 case UNSPEC:
918 case UNSPEC_VOLATILE:
919 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
920 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
921 return 1;
922 return 0;
924 case PARALLEL:
925 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
926 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
927 return 1;
928 return 0;
930 case CLOBBER:
931 if (MEM_P (XEXP (body, 0)))
932 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
933 return 1;
934 return 0;
936 case COND_EXEC:
937 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
938 return 1;
939 return reg_referenced_p (x, COND_EXEC_CODE (body));
941 default:
942 return 0;
946 /* Nonzero if register REG is set or clobbered in an insn between
947 FROM_INSN and TO_INSN (exclusive of those two). */
950 reg_set_between_p (const_rtx reg, const_rtx uncast_from_insn, const_rtx to_insn)
952 const rtx_insn *from_insn =
953 safe_as_a <const rtx_insn *> (uncast_from_insn);
954 const rtx_insn *insn;
956 if (from_insn == to_insn)
957 return 0;
959 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
960 if (INSN_P (insn) && reg_set_p (reg, insn))
961 return 1;
962 return 0;
965 /* Internals of reg_set_between_p. */
967 reg_set_p (const_rtx reg, const_rtx insn)
969 /* We can be passed an insn or part of one. If we are passed an insn,
970 check if a side-effect of the insn clobbers REG. */
971 if (INSN_P (insn)
972 && (FIND_REG_INC_NOTE (insn, reg)
973 || (CALL_P (insn)
974 && ((REG_P (reg)
975 && REGNO (reg) < FIRST_PSEUDO_REGISTER
976 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
977 GET_MODE (reg), REGNO (reg)))
978 || MEM_P (reg)
979 || find_reg_fusage (insn, CLOBBER, reg)))))
980 return 1;
982 return set_of (reg, insn) != NULL_RTX;
985 /* Similar to reg_set_between_p, but check all registers in X. Return 0
986 only if none of them are modified between START and END. Return 1 if
987 X contains a MEM; this routine does use memory aliasing. */
990 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
992 const enum rtx_code code = GET_CODE (x);
993 const char *fmt;
994 int i, j;
995 rtx_insn *insn;
997 if (start == end)
998 return 0;
1000 switch (code)
1002 CASE_CONST_ANY:
1003 case CONST:
1004 case SYMBOL_REF:
1005 case LABEL_REF:
1006 return 0;
1008 case PC:
1009 case CC0:
1010 return 1;
1012 case MEM:
1013 if (modified_between_p (XEXP (x, 0), start, end))
1014 return 1;
1015 if (MEM_READONLY_P (x))
1016 return 0;
1017 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1018 if (memory_modified_in_insn_p (x, insn))
1019 return 1;
1020 return 0;
1021 break;
1023 case REG:
1024 return reg_set_between_p (x, start, end);
1026 default:
1027 break;
1030 fmt = GET_RTX_FORMAT (code);
1031 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1033 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1034 return 1;
1036 else if (fmt[i] == 'E')
1037 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1038 if (modified_between_p (XVECEXP (x, i, j), start, end))
1039 return 1;
1042 return 0;
1045 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1046 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1047 does use memory aliasing. */
1050 modified_in_p (const_rtx x, const_rtx insn)
1052 const enum rtx_code code = GET_CODE (x);
1053 const char *fmt;
1054 int i, j;
1056 switch (code)
1058 CASE_CONST_ANY:
1059 case CONST:
1060 case SYMBOL_REF:
1061 case LABEL_REF:
1062 return 0;
1064 case PC:
1065 case CC0:
1066 return 1;
1068 case MEM:
1069 if (modified_in_p (XEXP (x, 0), insn))
1070 return 1;
1071 if (MEM_READONLY_P (x))
1072 return 0;
1073 if (memory_modified_in_insn_p (x, insn))
1074 return 1;
1075 return 0;
1076 break;
1078 case REG:
1079 return reg_set_p (x, insn);
1081 default:
1082 break;
1085 fmt = GET_RTX_FORMAT (code);
1086 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1088 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1089 return 1;
1091 else if (fmt[i] == 'E')
1092 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1093 if (modified_in_p (XVECEXP (x, i, j), insn))
1094 return 1;
1097 return 0;
1100 /* Helper function for set_of. */
1101 struct set_of_data
1103 const_rtx found;
1104 const_rtx pat;
1107 static void
1108 set_of_1 (rtx x, const_rtx pat, void *data1)
1110 struct set_of_data *const data = (struct set_of_data *) (data1);
1111 if (rtx_equal_p (x, data->pat)
1112 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1113 data->found = pat;
1116 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1117 (either directly or via STRICT_LOW_PART and similar modifiers). */
1118 const_rtx
1119 set_of (const_rtx pat, const_rtx insn)
1121 struct set_of_data data;
1122 data.found = NULL_RTX;
1123 data.pat = pat;
1124 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1125 return data.found;
1128 /* Add all hard register in X to *PSET. */
1129 void
1130 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1132 subrtx_iterator::array_type array;
1133 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1135 const_rtx x = *iter;
1136 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1137 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1141 /* This function, called through note_stores, collects sets and
1142 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1143 by DATA. */
1144 void
1145 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1147 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1148 if (REG_P (x) && HARD_REGISTER_P (x))
1149 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1152 /* Examine INSN, and compute the set of hard registers written by it.
1153 Store it in *PSET. Should only be called after reload. */
1154 void
1155 find_all_hard_reg_sets (const_rtx insn, HARD_REG_SET *pset, bool implicit)
1157 rtx link;
1159 CLEAR_HARD_REG_SET (*pset);
1160 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1161 if (CALL_P (insn))
1163 if (implicit)
1164 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1166 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1167 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1169 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1170 if (REG_NOTE_KIND (link) == REG_INC)
1171 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1174 /* Like record_hard_reg_sets, but called through note_uses. */
1175 void
1176 record_hard_reg_uses (rtx *px, void *data)
1178 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1181 /* Given an INSN, return a SET expression if this insn has only a single SET.
1182 It may also have CLOBBERs, USEs, or SET whose output
1183 will not be used, which we ignore. */
1186 single_set_2 (const rtx_insn *insn, const_rtx pat)
1188 rtx set = NULL;
1189 int set_verified = 1;
1190 int i;
1192 if (GET_CODE (pat) == PARALLEL)
1194 for (i = 0; i < XVECLEN (pat, 0); i++)
1196 rtx sub = XVECEXP (pat, 0, i);
1197 switch (GET_CODE (sub))
1199 case USE:
1200 case CLOBBER:
1201 break;
1203 case SET:
1204 /* We can consider insns having multiple sets, where all
1205 but one are dead as single set insns. In common case
1206 only single set is present in the pattern so we want
1207 to avoid checking for REG_UNUSED notes unless necessary.
1209 When we reach set first time, we just expect this is
1210 the single set we are looking for and only when more
1211 sets are found in the insn, we check them. */
1212 if (!set_verified)
1214 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1215 && !side_effects_p (set))
1216 set = NULL;
1217 else
1218 set_verified = 1;
1220 if (!set)
1221 set = sub, set_verified = 0;
1222 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1223 || side_effects_p (sub))
1224 return NULL_RTX;
1225 break;
1227 default:
1228 return NULL_RTX;
1232 return set;
1235 /* Given an INSN, return nonzero if it has more than one SET, else return
1236 zero. */
1239 multiple_sets (const_rtx insn)
1241 int found;
1242 int i;
1244 /* INSN must be an insn. */
1245 if (! INSN_P (insn))
1246 return 0;
1248 /* Only a PARALLEL can have multiple SETs. */
1249 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1251 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1252 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1254 /* If we have already found a SET, then return now. */
1255 if (found)
1256 return 1;
1257 else
1258 found = 1;
1262 /* Either zero or one SET. */
1263 return 0;
1266 /* Return nonzero if the destination of SET equals the source
1267 and there are no side effects. */
1270 set_noop_p (const_rtx set)
1272 rtx src = SET_SRC (set);
1273 rtx dst = SET_DEST (set);
1275 if (dst == pc_rtx && src == pc_rtx)
1276 return 1;
1278 if (MEM_P (dst) && MEM_P (src))
1279 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1281 if (GET_CODE (dst) == ZERO_EXTRACT)
1282 return rtx_equal_p (XEXP (dst, 0), src)
1283 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1284 && !side_effects_p (src);
1286 if (GET_CODE (dst) == STRICT_LOW_PART)
1287 dst = XEXP (dst, 0);
1289 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1291 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1292 return 0;
1293 src = SUBREG_REG (src);
1294 dst = SUBREG_REG (dst);
1297 /* It is a NOOP if destination overlaps with selected src vector
1298 elements. */
1299 if (GET_CODE (src) == VEC_SELECT
1300 && REG_P (XEXP (src, 0)) && REG_P (dst)
1301 && HARD_REGISTER_P (XEXP (src, 0))
1302 && HARD_REGISTER_P (dst))
1304 int i;
1305 rtx par = XEXP (src, 1);
1306 rtx src0 = XEXP (src, 0);
1307 int c0 = INTVAL (XVECEXP (par, 0, 0));
1308 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1310 for (i = 1; i < XVECLEN (par, 0); i++)
1311 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1312 return 0;
1313 return
1314 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1315 offset, GET_MODE (dst)) == (int) REGNO (dst);
1318 return (REG_P (src) && REG_P (dst)
1319 && REGNO (src) == REGNO (dst));
1322 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1323 value to itself. */
1326 noop_move_p (const_rtx insn)
1328 rtx pat = PATTERN (insn);
1330 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1331 return 1;
1333 /* Insns carrying these notes are useful later on. */
1334 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1335 return 0;
1337 /* Check the code to be executed for COND_EXEC. */
1338 if (GET_CODE (pat) == COND_EXEC)
1339 pat = COND_EXEC_CODE (pat);
1341 if (GET_CODE (pat) == SET && set_noop_p (pat))
1342 return 1;
1344 if (GET_CODE (pat) == PARALLEL)
1346 int i;
1347 /* If nothing but SETs of registers to themselves,
1348 this insn can also be deleted. */
1349 for (i = 0; i < XVECLEN (pat, 0); i++)
1351 rtx tem = XVECEXP (pat, 0, i);
1353 if (GET_CODE (tem) == USE
1354 || GET_CODE (tem) == CLOBBER)
1355 continue;
1357 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1358 return 0;
1361 return 1;
1363 return 0;
1367 /* Return nonzero if register in range [REGNO, ENDREGNO)
1368 appears either explicitly or implicitly in X
1369 other than being stored into.
1371 References contained within the substructure at LOC do not count.
1372 LOC may be zero, meaning don't ignore anything. */
1375 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1376 rtx *loc)
1378 int i;
1379 unsigned int x_regno;
1380 RTX_CODE code;
1381 const char *fmt;
1383 repeat:
1384 /* The contents of a REG_NONNEG note is always zero, so we must come here
1385 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1386 if (x == 0)
1387 return 0;
1389 code = GET_CODE (x);
1391 switch (code)
1393 case REG:
1394 x_regno = REGNO (x);
1396 /* If we modifying the stack, frame, or argument pointer, it will
1397 clobber a virtual register. In fact, we could be more precise,
1398 but it isn't worth it. */
1399 if ((x_regno == STACK_POINTER_REGNUM
1400 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1401 || x_regno == ARG_POINTER_REGNUM
1402 #endif
1403 || x_regno == FRAME_POINTER_REGNUM)
1404 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1405 return 1;
1407 return endregno > x_regno && regno < END_REGNO (x);
1409 case SUBREG:
1410 /* If this is a SUBREG of a hard reg, we can see exactly which
1411 registers are being modified. Otherwise, handle normally. */
1412 if (REG_P (SUBREG_REG (x))
1413 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1415 unsigned int inner_regno = subreg_regno (x);
1416 unsigned int inner_endregno
1417 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1418 ? subreg_nregs (x) : 1);
1420 return endregno > inner_regno && regno < inner_endregno;
1422 break;
1424 case CLOBBER:
1425 case SET:
1426 if (&SET_DEST (x) != loc
1427 /* Note setting a SUBREG counts as referring to the REG it is in for
1428 a pseudo but not for hard registers since we can
1429 treat each word individually. */
1430 && ((GET_CODE (SET_DEST (x)) == SUBREG
1431 && loc != &SUBREG_REG (SET_DEST (x))
1432 && REG_P (SUBREG_REG (SET_DEST (x)))
1433 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1434 && refers_to_regno_p (regno, endregno,
1435 SUBREG_REG (SET_DEST (x)), loc))
1436 || (!REG_P (SET_DEST (x))
1437 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1438 return 1;
1440 if (code == CLOBBER || loc == &SET_SRC (x))
1441 return 0;
1442 x = SET_SRC (x);
1443 goto repeat;
1445 default:
1446 break;
1449 /* X does not match, so try its subexpressions. */
1451 fmt = GET_RTX_FORMAT (code);
1452 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1454 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1456 if (i == 0)
1458 x = XEXP (x, 0);
1459 goto repeat;
1461 else
1462 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1463 return 1;
1465 else if (fmt[i] == 'E')
1467 int j;
1468 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1469 if (loc != &XVECEXP (x, i, j)
1470 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1471 return 1;
1474 return 0;
1477 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1478 we check if any register number in X conflicts with the relevant register
1479 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1480 contains a MEM (we don't bother checking for memory addresses that can't
1481 conflict because we expect this to be a rare case. */
1484 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1486 unsigned int regno, endregno;
1488 /* If either argument is a constant, then modifying X can not
1489 affect IN. Here we look at IN, we can profitably combine
1490 CONSTANT_P (x) with the switch statement below. */
1491 if (CONSTANT_P (in))
1492 return 0;
1494 recurse:
1495 switch (GET_CODE (x))
1497 case STRICT_LOW_PART:
1498 case ZERO_EXTRACT:
1499 case SIGN_EXTRACT:
1500 /* Overly conservative. */
1501 x = XEXP (x, 0);
1502 goto recurse;
1504 case SUBREG:
1505 regno = REGNO (SUBREG_REG (x));
1506 if (regno < FIRST_PSEUDO_REGISTER)
1507 regno = subreg_regno (x);
1508 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1509 ? subreg_nregs (x) : 1);
1510 goto do_reg;
1512 case REG:
1513 regno = REGNO (x);
1514 endregno = END_REGNO (x);
1515 do_reg:
1516 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1518 case MEM:
1520 const char *fmt;
1521 int i;
1523 if (MEM_P (in))
1524 return 1;
1526 fmt = GET_RTX_FORMAT (GET_CODE (in));
1527 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1528 if (fmt[i] == 'e')
1530 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1531 return 1;
1533 else if (fmt[i] == 'E')
1535 int j;
1536 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1537 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1538 return 1;
1541 return 0;
1544 case SCRATCH:
1545 case PC:
1546 case CC0:
1547 return reg_mentioned_p (x, in);
1549 case PARALLEL:
1551 int i;
1553 /* If any register in here refers to it we return true. */
1554 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1555 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1556 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1557 return 1;
1558 return 0;
1561 default:
1562 gcc_assert (CONSTANT_P (x));
1563 return 0;
1567 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1568 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1569 ignored by note_stores, but passed to FUN.
1571 FUN receives three arguments:
1572 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1573 2. the SET or CLOBBER rtx that does the store,
1574 3. the pointer DATA provided to note_stores.
1576 If the item being stored in or clobbered is a SUBREG of a hard register,
1577 the SUBREG will be passed. */
1579 void
1580 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1582 int i;
1584 if (GET_CODE (x) == COND_EXEC)
1585 x = COND_EXEC_CODE (x);
1587 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1589 rtx dest = SET_DEST (x);
1591 while ((GET_CODE (dest) == SUBREG
1592 && (!REG_P (SUBREG_REG (dest))
1593 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1594 || GET_CODE (dest) == ZERO_EXTRACT
1595 || GET_CODE (dest) == STRICT_LOW_PART)
1596 dest = XEXP (dest, 0);
1598 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1599 each of whose first operand is a register. */
1600 if (GET_CODE (dest) == PARALLEL)
1602 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1603 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1604 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1606 else
1607 (*fun) (dest, x, data);
1610 else if (GET_CODE (x) == PARALLEL)
1611 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1612 note_stores (XVECEXP (x, 0, i), fun, data);
1615 /* Like notes_stores, but call FUN for each expression that is being
1616 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1617 FUN for each expression, not any interior subexpressions. FUN receives a
1618 pointer to the expression and the DATA passed to this function.
1620 Note that this is not quite the same test as that done in reg_referenced_p
1621 since that considers something as being referenced if it is being
1622 partially set, while we do not. */
1624 void
1625 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1627 rtx body = *pbody;
1628 int i;
1630 switch (GET_CODE (body))
1632 case COND_EXEC:
1633 (*fun) (&COND_EXEC_TEST (body), data);
1634 note_uses (&COND_EXEC_CODE (body), fun, data);
1635 return;
1637 case PARALLEL:
1638 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1639 note_uses (&XVECEXP (body, 0, i), fun, data);
1640 return;
1642 case SEQUENCE:
1643 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1644 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1645 return;
1647 case USE:
1648 (*fun) (&XEXP (body, 0), data);
1649 return;
1651 case ASM_OPERANDS:
1652 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1653 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1654 return;
1656 case TRAP_IF:
1657 (*fun) (&TRAP_CONDITION (body), data);
1658 return;
1660 case PREFETCH:
1661 (*fun) (&XEXP (body, 0), data);
1662 return;
1664 case UNSPEC:
1665 case UNSPEC_VOLATILE:
1666 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1667 (*fun) (&XVECEXP (body, 0, i), data);
1668 return;
1670 case CLOBBER:
1671 if (MEM_P (XEXP (body, 0)))
1672 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1673 return;
1675 case SET:
1677 rtx dest = SET_DEST (body);
1679 /* For sets we replace everything in source plus registers in memory
1680 expression in store and operands of a ZERO_EXTRACT. */
1681 (*fun) (&SET_SRC (body), data);
1683 if (GET_CODE (dest) == ZERO_EXTRACT)
1685 (*fun) (&XEXP (dest, 1), data);
1686 (*fun) (&XEXP (dest, 2), data);
1689 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1690 dest = XEXP (dest, 0);
1692 if (MEM_P (dest))
1693 (*fun) (&XEXP (dest, 0), data);
1695 return;
1697 default:
1698 /* All the other possibilities never store. */
1699 (*fun) (pbody, data);
1700 return;
1704 /* Return nonzero if X's old contents don't survive after INSN.
1705 This will be true if X is (cc0) or if X is a register and
1706 X dies in INSN or because INSN entirely sets X.
1708 "Entirely set" means set directly and not through a SUBREG, or
1709 ZERO_EXTRACT, so no trace of the old contents remains.
1710 Likewise, REG_INC does not count.
1712 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1713 but for this use that makes no difference, since regs don't overlap
1714 during their lifetimes. Therefore, this function may be used
1715 at any time after deaths have been computed.
1717 If REG is a hard reg that occupies multiple machine registers, this
1718 function will only return 1 if each of those registers will be replaced
1719 by INSN. */
1722 dead_or_set_p (const_rtx insn, const_rtx x)
1724 unsigned int regno, end_regno;
1725 unsigned int i;
1727 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1728 if (GET_CODE (x) == CC0)
1729 return 1;
1731 gcc_assert (REG_P (x));
1733 regno = REGNO (x);
1734 end_regno = END_REGNO (x);
1735 for (i = regno; i < end_regno; i++)
1736 if (! dead_or_set_regno_p (insn, i))
1737 return 0;
1739 return 1;
1742 /* Return TRUE iff DEST is a register or subreg of a register and
1743 doesn't change the number of words of the inner register, and any
1744 part of the register is TEST_REGNO. */
1746 static bool
1747 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1749 unsigned int regno, endregno;
1751 if (GET_CODE (dest) == SUBREG
1752 && (((GET_MODE_SIZE (GET_MODE (dest))
1753 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1754 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1755 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1756 dest = SUBREG_REG (dest);
1758 if (!REG_P (dest))
1759 return false;
1761 regno = REGNO (dest);
1762 endregno = END_REGNO (dest);
1763 return (test_regno >= regno && test_regno < endregno);
1766 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1767 any member matches the covers_regno_no_parallel_p criteria. */
1769 static bool
1770 covers_regno_p (const_rtx dest, unsigned int test_regno)
1772 if (GET_CODE (dest) == PARALLEL)
1774 /* Some targets place small structures in registers for return
1775 values of functions, and those registers are wrapped in
1776 PARALLELs that we may see as the destination of a SET. */
1777 int i;
1779 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1781 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1782 if (inner != NULL_RTX
1783 && covers_regno_no_parallel_p (inner, test_regno))
1784 return true;
1787 return false;
1789 else
1790 return covers_regno_no_parallel_p (dest, test_regno);
1793 /* Utility function for dead_or_set_p to check an individual register. */
1796 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1798 const_rtx pattern;
1800 /* See if there is a death note for something that includes TEST_REGNO. */
1801 if (find_regno_note (insn, REG_DEAD, test_regno))
1802 return 1;
1804 if (CALL_P (insn)
1805 && find_regno_fusage (insn, CLOBBER, test_regno))
1806 return 1;
1808 pattern = PATTERN (insn);
1810 /* If a COND_EXEC is not executed, the value survives. */
1811 if (GET_CODE (pattern) == COND_EXEC)
1812 return 0;
1814 if (GET_CODE (pattern) == SET)
1815 return covers_regno_p (SET_DEST (pattern), test_regno);
1816 else if (GET_CODE (pattern) == PARALLEL)
1818 int i;
1820 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1822 rtx body = XVECEXP (pattern, 0, i);
1824 if (GET_CODE (body) == COND_EXEC)
1825 body = COND_EXEC_CODE (body);
1827 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1828 && covers_regno_p (SET_DEST (body), test_regno))
1829 return 1;
1833 return 0;
1836 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1837 If DATUM is nonzero, look for one whose datum is DATUM. */
1840 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1842 rtx link;
1844 gcc_checking_assert (insn);
1846 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1847 if (! INSN_P (insn))
1848 return 0;
1849 if (datum == 0)
1851 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1852 if (REG_NOTE_KIND (link) == kind)
1853 return link;
1854 return 0;
1857 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1858 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1859 return link;
1860 return 0;
1863 /* Return the reg-note of kind KIND in insn INSN which applies to register
1864 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1865 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1866 it might be the case that the note overlaps REGNO. */
1869 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1871 rtx link;
1873 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1874 if (! INSN_P (insn))
1875 return 0;
1877 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1878 if (REG_NOTE_KIND (link) == kind
1879 /* Verify that it is a register, so that scratch and MEM won't cause a
1880 problem here. */
1881 && REG_P (XEXP (link, 0))
1882 && REGNO (XEXP (link, 0)) <= regno
1883 && END_REGNO (XEXP (link, 0)) > regno)
1884 return link;
1885 return 0;
1888 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1889 has such a note. */
1892 find_reg_equal_equiv_note (const_rtx insn)
1894 rtx link;
1896 if (!INSN_P (insn))
1897 return 0;
1899 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1900 if (REG_NOTE_KIND (link) == REG_EQUAL
1901 || REG_NOTE_KIND (link) == REG_EQUIV)
1903 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1904 insns that have multiple sets. Checking single_set to
1905 make sure of this is not the proper check, as explained
1906 in the comment in set_unique_reg_note.
1908 This should be changed into an assert. */
1909 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1910 return 0;
1911 return link;
1913 return NULL;
1916 /* Check whether INSN is a single_set whose source is known to be
1917 equivalent to a constant. Return that constant if so, otherwise
1918 return null. */
1921 find_constant_src (const rtx_insn *insn)
1923 rtx note, set, x;
1925 set = single_set (insn);
1926 if (set)
1928 x = avoid_constant_pool_reference (SET_SRC (set));
1929 if (CONSTANT_P (x))
1930 return x;
1933 note = find_reg_equal_equiv_note (insn);
1934 if (note && CONSTANT_P (XEXP (note, 0)))
1935 return XEXP (note, 0);
1937 return NULL_RTX;
1940 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1941 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1944 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1946 /* If it's not a CALL_INSN, it can't possibly have a
1947 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1948 if (!CALL_P (insn))
1949 return 0;
1951 gcc_assert (datum);
1953 if (!REG_P (datum))
1955 rtx link;
1957 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1958 link;
1959 link = XEXP (link, 1))
1960 if (GET_CODE (XEXP (link, 0)) == code
1961 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1962 return 1;
1964 else
1966 unsigned int regno = REGNO (datum);
1968 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1969 to pseudo registers, so don't bother checking. */
1971 if (regno < FIRST_PSEUDO_REGISTER)
1973 unsigned int end_regno = END_HARD_REGNO (datum);
1974 unsigned int i;
1976 for (i = regno; i < end_regno; i++)
1977 if (find_regno_fusage (insn, code, i))
1978 return 1;
1982 return 0;
1985 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1986 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1989 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
1991 rtx link;
1993 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1994 to pseudo registers, so don't bother checking. */
1996 if (regno >= FIRST_PSEUDO_REGISTER
1997 || !CALL_P (insn) )
1998 return 0;
2000 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2002 rtx op, reg;
2004 if (GET_CODE (op = XEXP (link, 0)) == code
2005 && REG_P (reg = XEXP (op, 0))
2006 && REGNO (reg) <= regno
2007 && END_HARD_REGNO (reg) > regno)
2008 return 1;
2011 return 0;
2015 /* Return true if KIND is an integer REG_NOTE. */
2017 static bool
2018 int_reg_note_p (enum reg_note kind)
2020 return kind == REG_BR_PROB;
2023 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2024 stored as the pointer to the next register note. */
2027 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2029 rtx note;
2031 gcc_checking_assert (!int_reg_note_p (kind));
2032 switch (kind)
2034 case REG_CC_SETTER:
2035 case REG_CC_USER:
2036 case REG_LABEL_TARGET:
2037 case REG_LABEL_OPERAND:
2038 case REG_TM:
2039 /* These types of register notes use an INSN_LIST rather than an
2040 EXPR_LIST, so that copying is done right and dumps look
2041 better. */
2042 note = alloc_INSN_LIST (datum, list);
2043 PUT_REG_NOTE_KIND (note, kind);
2044 break;
2046 default:
2047 note = alloc_EXPR_LIST (kind, datum, list);
2048 break;
2051 return note;
2054 /* Add register note with kind KIND and datum DATUM to INSN. */
2056 void
2057 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2059 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2062 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2064 void
2065 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2067 gcc_checking_assert (int_reg_note_p (kind));
2068 REG_NOTES (insn) = gen_rtx_INT_LIST ((enum machine_mode) kind,
2069 datum, REG_NOTES (insn));
2072 /* Add a register note like NOTE to INSN. */
2074 void
2075 add_shallow_copy_of_reg_note (rtx insn, rtx note)
2077 if (GET_CODE (note) == INT_LIST)
2078 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2079 else
2080 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2083 /* Remove register note NOTE from the REG_NOTES of INSN. */
2085 void
2086 remove_note (rtx insn, const_rtx note)
2088 rtx link;
2090 if (note == NULL_RTX)
2091 return;
2093 if (REG_NOTES (insn) == note)
2094 REG_NOTES (insn) = XEXP (note, 1);
2095 else
2096 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2097 if (XEXP (link, 1) == note)
2099 XEXP (link, 1) = XEXP (note, 1);
2100 break;
2103 switch (REG_NOTE_KIND (note))
2105 case REG_EQUAL:
2106 case REG_EQUIV:
2107 df_notes_rescan (as_a <rtx_insn *> (insn));
2108 break;
2109 default:
2110 break;
2114 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2116 void
2117 remove_reg_equal_equiv_notes (rtx insn)
2119 rtx *loc;
2121 loc = &REG_NOTES (insn);
2122 while (*loc)
2124 enum reg_note kind = REG_NOTE_KIND (*loc);
2125 if (kind == REG_EQUAL || kind == REG_EQUIV)
2126 *loc = XEXP (*loc, 1);
2127 else
2128 loc = &XEXP (*loc, 1);
2132 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2134 void
2135 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2137 df_ref eq_use;
2139 if (!df)
2140 return;
2142 /* This loop is a little tricky. We cannot just go down the chain because
2143 it is being modified by some actions in the loop. So we just iterate
2144 over the head. We plan to drain the list anyway. */
2145 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2147 rtx_insn *insn = DF_REF_INSN (eq_use);
2148 rtx note = find_reg_equal_equiv_note (insn);
2150 /* This assert is generally triggered when someone deletes a REG_EQUAL
2151 or REG_EQUIV note by hacking the list manually rather than calling
2152 remove_note. */
2153 gcc_assert (note);
2155 remove_note (insn, note);
2159 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2160 return 1 if it is found. A simple equality test is used to determine if
2161 NODE matches. */
2164 in_expr_list_p (const_rtx listp, const_rtx node)
2166 const_rtx x;
2168 for (x = listp; x; x = XEXP (x, 1))
2169 if (node == XEXP (x, 0))
2170 return 1;
2172 return 0;
2175 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2176 remove that entry from the list if it is found.
2178 A simple equality test is used to determine if NODE matches. */
2180 void
2181 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2183 rtx_expr_list *temp = *listp;
2184 rtx prev = NULL_RTX;
2186 while (temp)
2188 if (node == temp->element ())
2190 /* Splice the node out of the list. */
2191 if (prev)
2192 XEXP (prev, 1) = temp->next ();
2193 else
2194 *listp = temp->next ();
2196 return;
2199 prev = temp;
2200 temp = temp->next ();
2204 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2205 remove that entry from the list if it is found.
2207 A simple equality test is used to determine if NODE matches. */
2209 void
2210 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2212 rtx_insn_list *temp = *listp;
2213 rtx prev = NULL;
2215 while (temp)
2217 if (node == temp->insn ())
2219 /* Splice the node out of the list. */
2220 if (prev)
2221 XEXP (prev, 1) = temp->next ();
2222 else
2223 *listp = temp->next ();
2225 return;
2228 prev = temp;
2229 temp = temp->next ();
2233 /* Nonzero if X contains any volatile instructions. These are instructions
2234 which may cause unpredictable machine state instructions, and thus no
2235 instructions or register uses should be moved or combined across them.
2236 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2239 volatile_insn_p (const_rtx x)
2241 const RTX_CODE code = GET_CODE (x);
2242 switch (code)
2244 case LABEL_REF:
2245 case SYMBOL_REF:
2246 case CONST:
2247 CASE_CONST_ANY:
2248 case CC0:
2249 case PC:
2250 case REG:
2251 case SCRATCH:
2252 case CLOBBER:
2253 case ADDR_VEC:
2254 case ADDR_DIFF_VEC:
2255 case CALL:
2256 case MEM:
2257 return 0;
2259 case UNSPEC_VOLATILE:
2260 return 1;
2262 case ASM_INPUT:
2263 case ASM_OPERANDS:
2264 if (MEM_VOLATILE_P (x))
2265 return 1;
2267 default:
2268 break;
2271 /* Recursively scan the operands of this expression. */
2274 const char *const fmt = GET_RTX_FORMAT (code);
2275 int i;
2277 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2279 if (fmt[i] == 'e')
2281 if (volatile_insn_p (XEXP (x, i)))
2282 return 1;
2284 else if (fmt[i] == 'E')
2286 int j;
2287 for (j = 0; j < XVECLEN (x, i); j++)
2288 if (volatile_insn_p (XVECEXP (x, i, j)))
2289 return 1;
2293 return 0;
2296 /* Nonzero if X contains any volatile memory references
2297 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2300 volatile_refs_p (const_rtx x)
2302 const RTX_CODE code = GET_CODE (x);
2303 switch (code)
2305 case LABEL_REF:
2306 case SYMBOL_REF:
2307 case CONST:
2308 CASE_CONST_ANY:
2309 case CC0:
2310 case PC:
2311 case REG:
2312 case SCRATCH:
2313 case CLOBBER:
2314 case ADDR_VEC:
2315 case ADDR_DIFF_VEC:
2316 return 0;
2318 case UNSPEC_VOLATILE:
2319 return 1;
2321 case MEM:
2322 case ASM_INPUT:
2323 case ASM_OPERANDS:
2324 if (MEM_VOLATILE_P (x))
2325 return 1;
2327 default:
2328 break;
2331 /* Recursively scan the operands of this expression. */
2334 const char *const fmt = GET_RTX_FORMAT (code);
2335 int i;
2337 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2339 if (fmt[i] == 'e')
2341 if (volatile_refs_p (XEXP (x, i)))
2342 return 1;
2344 else if (fmt[i] == 'E')
2346 int j;
2347 for (j = 0; j < XVECLEN (x, i); j++)
2348 if (volatile_refs_p (XVECEXP (x, i, j)))
2349 return 1;
2353 return 0;
2356 /* Similar to above, except that it also rejects register pre- and post-
2357 incrementing. */
2360 side_effects_p (const_rtx x)
2362 const RTX_CODE code = GET_CODE (x);
2363 switch (code)
2365 case LABEL_REF:
2366 case SYMBOL_REF:
2367 case CONST:
2368 CASE_CONST_ANY:
2369 case CC0:
2370 case PC:
2371 case REG:
2372 case SCRATCH:
2373 case ADDR_VEC:
2374 case ADDR_DIFF_VEC:
2375 case VAR_LOCATION:
2376 return 0;
2378 case CLOBBER:
2379 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2380 when some combination can't be done. If we see one, don't think
2381 that we can simplify the expression. */
2382 return (GET_MODE (x) != VOIDmode);
2384 case PRE_INC:
2385 case PRE_DEC:
2386 case POST_INC:
2387 case POST_DEC:
2388 case PRE_MODIFY:
2389 case POST_MODIFY:
2390 case CALL:
2391 case UNSPEC_VOLATILE:
2392 return 1;
2394 case MEM:
2395 case ASM_INPUT:
2396 case ASM_OPERANDS:
2397 if (MEM_VOLATILE_P (x))
2398 return 1;
2400 default:
2401 break;
2404 /* Recursively scan the operands of this expression. */
2407 const char *fmt = GET_RTX_FORMAT (code);
2408 int i;
2410 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2412 if (fmt[i] == 'e')
2414 if (side_effects_p (XEXP (x, i)))
2415 return 1;
2417 else if (fmt[i] == 'E')
2419 int j;
2420 for (j = 0; j < XVECLEN (x, i); j++)
2421 if (side_effects_p (XVECEXP (x, i, j)))
2422 return 1;
2426 return 0;
2429 /* Return nonzero if evaluating rtx X might cause a trap.
2430 FLAGS controls how to consider MEMs. A nonzero means the context
2431 of the access may have changed from the original, such that the
2432 address may have become invalid. */
2435 may_trap_p_1 (const_rtx x, unsigned flags)
2437 int i;
2438 enum rtx_code code;
2439 const char *fmt;
2441 /* We make no distinction currently, but this function is part of
2442 the internal target-hooks ABI so we keep the parameter as
2443 "unsigned flags". */
2444 bool code_changed = flags != 0;
2446 if (x == 0)
2447 return 0;
2448 code = GET_CODE (x);
2449 switch (code)
2451 /* Handle these cases quickly. */
2452 CASE_CONST_ANY:
2453 case SYMBOL_REF:
2454 case LABEL_REF:
2455 case CONST:
2456 case PC:
2457 case CC0:
2458 case REG:
2459 case SCRATCH:
2460 return 0;
2462 case UNSPEC:
2463 return targetm.unspec_may_trap_p (x, flags);
2465 case UNSPEC_VOLATILE:
2466 case ASM_INPUT:
2467 case TRAP_IF:
2468 return 1;
2470 case ASM_OPERANDS:
2471 return MEM_VOLATILE_P (x);
2473 /* Memory ref can trap unless it's a static var or a stack slot. */
2474 case MEM:
2475 /* Recognize specific pattern of stack checking probes. */
2476 if (flag_stack_check
2477 && MEM_VOLATILE_P (x)
2478 && XEXP (x, 0) == stack_pointer_rtx)
2479 return 1;
2480 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2481 reference; moving it out of context such as when moving code
2482 when optimizing, might cause its address to become invalid. */
2483 code_changed
2484 || !MEM_NOTRAP_P (x))
2486 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2487 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2488 GET_MODE (x), code_changed);
2491 return 0;
2493 /* Division by a non-constant might trap. */
2494 case DIV:
2495 case MOD:
2496 case UDIV:
2497 case UMOD:
2498 if (HONOR_SNANS (GET_MODE (x)))
2499 return 1;
2500 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2501 return flag_trapping_math;
2502 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2503 return 1;
2504 break;
2506 case EXPR_LIST:
2507 /* An EXPR_LIST is used to represent a function call. This
2508 certainly may trap. */
2509 return 1;
2511 case GE:
2512 case GT:
2513 case LE:
2514 case LT:
2515 case LTGT:
2516 case COMPARE:
2517 /* Some floating point comparisons may trap. */
2518 if (!flag_trapping_math)
2519 break;
2520 /* ??? There is no machine independent way to check for tests that trap
2521 when COMPARE is used, though many targets do make this distinction.
2522 For instance, sparc uses CCFPE for compares which generate exceptions
2523 and CCFP for compares which do not generate exceptions. */
2524 if (HONOR_NANS (GET_MODE (x)))
2525 return 1;
2526 /* But often the compare has some CC mode, so check operand
2527 modes as well. */
2528 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2529 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2530 return 1;
2531 break;
2533 case EQ:
2534 case NE:
2535 if (HONOR_SNANS (GET_MODE (x)))
2536 return 1;
2537 /* Often comparison is CC mode, so check operand modes. */
2538 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2539 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2540 return 1;
2541 break;
2543 case FIX:
2544 /* Conversion of floating point might trap. */
2545 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2546 return 1;
2547 break;
2549 case NEG:
2550 case ABS:
2551 case SUBREG:
2552 /* These operations don't trap even with floating point. */
2553 break;
2555 default:
2556 /* Any floating arithmetic may trap. */
2557 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2558 return 1;
2561 fmt = GET_RTX_FORMAT (code);
2562 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2564 if (fmt[i] == 'e')
2566 if (may_trap_p_1 (XEXP (x, i), flags))
2567 return 1;
2569 else if (fmt[i] == 'E')
2571 int j;
2572 for (j = 0; j < XVECLEN (x, i); j++)
2573 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2574 return 1;
2577 return 0;
2580 /* Return nonzero if evaluating rtx X might cause a trap. */
2583 may_trap_p (const_rtx x)
2585 return may_trap_p_1 (x, 0);
2588 /* Same as above, but additionally return nonzero if evaluating rtx X might
2589 cause a fault. We define a fault for the purpose of this function as a
2590 erroneous execution condition that cannot be encountered during the normal
2591 execution of a valid program; the typical example is an unaligned memory
2592 access on a strict alignment machine. The compiler guarantees that it
2593 doesn't generate code that will fault from a valid program, but this
2594 guarantee doesn't mean anything for individual instructions. Consider
2595 the following example:
2597 struct S { int d; union { char *cp; int *ip; }; };
2599 int foo(struct S *s)
2601 if (s->d == 1)
2602 return *s->ip;
2603 else
2604 return *s->cp;
2607 on a strict alignment machine. In a valid program, foo will never be
2608 invoked on a structure for which d is equal to 1 and the underlying
2609 unique field of the union not aligned on a 4-byte boundary, but the
2610 expression *s->ip might cause a fault if considered individually.
2612 At the RTL level, potentially problematic expressions will almost always
2613 verify may_trap_p; for example, the above dereference can be emitted as
2614 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2615 However, suppose that foo is inlined in a caller that causes s->cp to
2616 point to a local character variable and guarantees that s->d is not set
2617 to 1; foo may have been effectively translated into pseudo-RTL as:
2619 if ((reg:SI) == 1)
2620 (set (reg:SI) (mem:SI (%fp - 7)))
2621 else
2622 (set (reg:QI) (mem:QI (%fp - 7)))
2624 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2625 memory reference to a stack slot, but it will certainly cause a fault
2626 on a strict alignment machine. */
2629 may_trap_or_fault_p (const_rtx x)
2631 return may_trap_p_1 (x, 1);
2634 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2635 i.e., an inequality. */
2638 inequality_comparisons_p (const_rtx x)
2640 const char *fmt;
2641 int len, i;
2642 const enum rtx_code code = GET_CODE (x);
2644 switch (code)
2646 case REG:
2647 case SCRATCH:
2648 case PC:
2649 case CC0:
2650 CASE_CONST_ANY:
2651 case CONST:
2652 case LABEL_REF:
2653 case SYMBOL_REF:
2654 return 0;
2656 case LT:
2657 case LTU:
2658 case GT:
2659 case GTU:
2660 case LE:
2661 case LEU:
2662 case GE:
2663 case GEU:
2664 return 1;
2666 default:
2667 break;
2670 len = GET_RTX_LENGTH (code);
2671 fmt = GET_RTX_FORMAT (code);
2673 for (i = 0; i < len; i++)
2675 if (fmt[i] == 'e')
2677 if (inequality_comparisons_p (XEXP (x, i)))
2678 return 1;
2680 else if (fmt[i] == 'E')
2682 int j;
2683 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2684 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2685 return 1;
2689 return 0;
2692 /* Replace any occurrence of FROM in X with TO. The function does
2693 not enter into CONST_DOUBLE for the replace.
2695 Note that copying is not done so X must not be shared unless all copies
2696 are to be modified. */
2699 replace_rtx (rtx x, rtx from, rtx to)
2701 int i, j;
2702 const char *fmt;
2704 if (x == from)
2705 return to;
2707 /* Allow this function to make replacements in EXPR_LISTs. */
2708 if (x == 0)
2709 return 0;
2711 if (GET_CODE (x) == SUBREG)
2713 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2715 if (CONST_INT_P (new_rtx))
2717 x = simplify_subreg (GET_MODE (x), new_rtx,
2718 GET_MODE (SUBREG_REG (x)),
2719 SUBREG_BYTE (x));
2720 gcc_assert (x);
2722 else
2723 SUBREG_REG (x) = new_rtx;
2725 return x;
2727 else if (GET_CODE (x) == ZERO_EXTEND)
2729 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2731 if (CONST_INT_P (new_rtx))
2733 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2734 new_rtx, GET_MODE (XEXP (x, 0)));
2735 gcc_assert (x);
2737 else
2738 XEXP (x, 0) = new_rtx;
2740 return x;
2743 fmt = GET_RTX_FORMAT (GET_CODE (x));
2744 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2746 if (fmt[i] == 'e')
2747 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2748 else if (fmt[i] == 'E')
2749 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2750 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2753 return x;
2756 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2757 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2759 void
2760 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
2762 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2763 rtx x = *loc;
2764 if (JUMP_TABLE_DATA_P (x))
2766 x = PATTERN (x);
2767 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
2768 int len = GET_NUM_ELEM (vec);
2769 for (int i = 0; i < len; ++i)
2771 rtx ref = RTVEC_ELT (vec, i);
2772 if (XEXP (ref, 0) == old_label)
2774 XEXP (ref, 0) = new_label;
2775 if (update_label_nuses)
2777 ++LABEL_NUSES (new_label);
2778 --LABEL_NUSES (old_label);
2782 return;
2785 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2786 field. This is not handled by the iterator because it doesn't
2787 handle unprinted ('0') fields. */
2788 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
2789 JUMP_LABEL (x) = new_label;
2791 subrtx_ptr_iterator::array_type array;
2792 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
2794 rtx *loc = *iter;
2795 if (rtx x = *loc)
2797 if (GET_CODE (x) == SYMBOL_REF
2798 && CONSTANT_POOL_ADDRESS_P (x))
2800 rtx c = get_pool_constant (x);
2801 if (rtx_referenced_p (old_label, c))
2803 /* Create a copy of constant C; replace the label inside
2804 but do not update LABEL_NUSES because uses in constant pool
2805 are not counted. */
2806 rtx new_c = copy_rtx (c);
2807 replace_label (&new_c, old_label, new_label, false);
2809 /* Add the new constant NEW_C to constant pool and replace
2810 the old reference to constant by new reference. */
2811 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
2812 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
2816 if ((GET_CODE (x) == LABEL_REF
2817 || GET_CODE (x) == INSN_LIST)
2818 && XEXP (x, 0) == old_label)
2820 XEXP (x, 0) = new_label;
2821 if (update_label_nuses)
2823 ++LABEL_NUSES (new_label);
2824 --LABEL_NUSES (old_label);
2831 void
2832 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
2833 bool update_label_nuses)
2835 rtx insn_as_rtx = insn;
2836 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
2837 gcc_checking_assert (insn_as_rtx == insn);
2840 /* Return true if X is referenced in BODY. */
2842 bool
2843 rtx_referenced_p (const_rtx x, const_rtx body)
2845 subrtx_iterator::array_type array;
2846 FOR_EACH_SUBRTX (iter, array, body, ALL)
2847 if (const_rtx y = *iter)
2849 /* Check if a label_ref Y refers to label X. */
2850 if (GET_CODE (y) == LABEL_REF
2851 && LABEL_P (x)
2852 && LABEL_REF_LABEL (y) == x)
2853 return true;
2855 if (rtx_equal_p (x, y))
2856 return true;
2858 /* If Y is a reference to pool constant traverse the constant. */
2859 if (GET_CODE (y) == SYMBOL_REF
2860 && CONSTANT_POOL_ADDRESS_P (y))
2861 iter.substitute (get_pool_constant (y));
2863 return false;
2866 /* If INSN is a tablejump return true and store the label (before jump table) to
2867 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2869 bool
2870 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
2872 rtx label, table;
2874 if (!JUMP_P (insn))
2875 return false;
2877 label = JUMP_LABEL (insn);
2878 if (label != NULL_RTX && !ANY_RETURN_P (label)
2879 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
2880 && JUMP_TABLE_DATA_P (table))
2882 if (labelp)
2883 *labelp = label;
2884 if (tablep)
2885 *tablep = as_a <rtx_jump_table_data *> (table);
2886 return true;
2888 return false;
2891 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2892 constant that is not in the constant pool and not in the condition
2893 of an IF_THEN_ELSE. */
2895 static int
2896 computed_jump_p_1 (const_rtx x)
2898 const enum rtx_code code = GET_CODE (x);
2899 int i, j;
2900 const char *fmt;
2902 switch (code)
2904 case LABEL_REF:
2905 case PC:
2906 return 0;
2908 case CONST:
2909 CASE_CONST_ANY:
2910 case SYMBOL_REF:
2911 case REG:
2912 return 1;
2914 case MEM:
2915 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2916 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2918 case IF_THEN_ELSE:
2919 return (computed_jump_p_1 (XEXP (x, 1))
2920 || computed_jump_p_1 (XEXP (x, 2)));
2922 default:
2923 break;
2926 fmt = GET_RTX_FORMAT (code);
2927 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2929 if (fmt[i] == 'e'
2930 && computed_jump_p_1 (XEXP (x, i)))
2931 return 1;
2933 else if (fmt[i] == 'E')
2934 for (j = 0; j < XVECLEN (x, i); j++)
2935 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2936 return 1;
2939 return 0;
2942 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2944 Tablejumps and casesi insns are not considered indirect jumps;
2945 we can recognize them by a (use (label_ref)). */
2948 computed_jump_p (const_rtx insn)
2950 int i;
2951 if (JUMP_P (insn))
2953 rtx pat = PATTERN (insn);
2955 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2956 if (JUMP_LABEL (insn) != NULL)
2957 return 0;
2959 if (GET_CODE (pat) == PARALLEL)
2961 int len = XVECLEN (pat, 0);
2962 int has_use_labelref = 0;
2964 for (i = len - 1; i >= 0; i--)
2965 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2966 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2967 == LABEL_REF))
2969 has_use_labelref = 1;
2970 break;
2973 if (! has_use_labelref)
2974 for (i = len - 1; i >= 0; i--)
2975 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2976 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2977 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2978 return 1;
2980 else if (GET_CODE (pat) == SET
2981 && SET_DEST (pat) == pc_rtx
2982 && computed_jump_p_1 (SET_SRC (pat)))
2983 return 1;
2985 return 0;
2988 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2989 calls. Processes the subexpressions of EXP and passes them to F. */
2990 static int
2991 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2993 int result, i, j;
2994 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2995 rtx *x;
2997 for (; format[n] != '\0'; n++)
2999 switch (format[n])
3001 case 'e':
3002 /* Call F on X. */
3003 x = &XEXP (exp, n);
3004 result = (*f) (x, data);
3005 if (result == -1)
3006 /* Do not traverse sub-expressions. */
3007 continue;
3008 else if (result != 0)
3009 /* Stop the traversal. */
3010 return result;
3012 if (*x == NULL_RTX)
3013 /* There are no sub-expressions. */
3014 continue;
3016 i = non_rtx_starting_operands[GET_CODE (*x)];
3017 if (i >= 0)
3019 result = for_each_rtx_1 (*x, i, f, data);
3020 if (result != 0)
3021 return result;
3023 break;
3025 case 'V':
3026 case 'E':
3027 if (XVEC (exp, n) == 0)
3028 continue;
3029 for (j = 0; j < XVECLEN (exp, n); ++j)
3031 /* Call F on X. */
3032 x = &XVECEXP (exp, n, j);
3033 result = (*f) (x, data);
3034 if (result == -1)
3035 /* Do not traverse sub-expressions. */
3036 continue;
3037 else if (result != 0)
3038 /* Stop the traversal. */
3039 return result;
3041 if (*x == NULL_RTX)
3042 /* There are no sub-expressions. */
3043 continue;
3045 i = non_rtx_starting_operands[GET_CODE (*x)];
3046 if (i >= 0)
3048 result = for_each_rtx_1 (*x, i, f, data);
3049 if (result != 0)
3050 return result;
3053 break;
3055 default:
3056 /* Nothing to do. */
3057 break;
3061 return 0;
3064 /* Traverse X via depth-first search, calling F for each
3065 sub-expression (including X itself). F is also passed the DATA.
3066 If F returns -1, do not traverse sub-expressions, but continue
3067 traversing the rest of the tree. If F ever returns any other
3068 nonzero value, stop the traversal, and return the value returned
3069 by F. Otherwise, return 0. This function does not traverse inside
3070 tree structure that contains RTX_EXPRs, or into sub-expressions
3071 whose format code is `0' since it is not known whether or not those
3072 codes are actually RTL.
3074 This routine is very general, and could (should?) be used to
3075 implement many of the other routines in this file. */
3078 for_each_rtx (rtx *x, rtx_function f, void *data)
3080 int result;
3081 int i;
3083 /* Call F on X. */
3084 result = (*f) (x, data);
3085 if (result == -1)
3086 /* Do not traverse sub-expressions. */
3087 return 0;
3088 else if (result != 0)
3089 /* Stop the traversal. */
3090 return result;
3092 if (*x == NULL_RTX)
3093 /* There are no sub-expressions. */
3094 return 0;
3096 i = non_rtx_starting_operands[GET_CODE (*x)];
3097 if (i < 0)
3098 return 0;
3100 return for_each_rtx_1 (*x, i, f, data);
3103 /* Like "for_each_rtx", but for calling on an rtx_insn **. */
3106 for_each_rtx_in_insn (rtx_insn **insn, rtx_function f, void *data)
3108 rtx insn_as_rtx = *insn;
3109 int result;
3111 result = for_each_rtx (&insn_as_rtx, f, data);
3113 if (insn_as_rtx != *insn)
3114 *insn = safe_as_a <rtx_insn *> (insn_as_rtx);
3116 return result;
3121 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3122 the equivalent add insn and pass the result to FN, using DATA as the
3123 final argument. */
3125 static int
3126 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3128 rtx x = XEXP (mem, 0);
3129 switch (GET_CODE (x))
3131 case PRE_INC:
3132 case POST_INC:
3134 int size = GET_MODE_SIZE (GET_MODE (mem));
3135 rtx r1 = XEXP (x, 0);
3136 rtx c = gen_int_mode (size, GET_MODE (r1));
3137 return fn (mem, x, r1, r1, c, data);
3140 case PRE_DEC:
3141 case POST_DEC:
3143 int size = GET_MODE_SIZE (GET_MODE (mem));
3144 rtx r1 = XEXP (x, 0);
3145 rtx c = gen_int_mode (-size, GET_MODE (r1));
3146 return fn (mem, x, r1, r1, c, data);
3149 case PRE_MODIFY:
3150 case POST_MODIFY:
3152 rtx r1 = XEXP (x, 0);
3153 rtx add = XEXP (x, 1);
3154 return fn (mem, x, r1, add, NULL, data);
3157 default:
3158 gcc_unreachable ();
3162 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3163 For each such autoinc operation found, call FN, passing it
3164 the innermost enclosing MEM, the operation itself, the RTX modified
3165 by the operation, two RTXs (the second may be NULL) that, once
3166 added, represent the value to be held by the modified RTX
3167 afterwards, and DATA. FN is to return 0 to continue the
3168 traversal or any other value to have it returned to the caller of
3169 for_each_inc_dec. */
3172 for_each_inc_dec (rtx x,
3173 for_each_inc_dec_fn fn,
3174 void *data)
3176 subrtx_var_iterator::array_type array;
3177 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3179 rtx mem = *iter;
3180 if (mem
3181 && MEM_P (mem)
3182 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3184 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3185 if (res != 0)
3186 return res;
3187 iter.skip_subrtxes ();
3190 return 0;
3194 /* Searches X for any reference to REGNO, returning the rtx of the
3195 reference found if any. Otherwise, returns NULL_RTX. */
3198 regno_use_in (unsigned int regno, rtx x)
3200 const char *fmt;
3201 int i, j;
3202 rtx tem;
3204 if (REG_P (x) && REGNO (x) == regno)
3205 return x;
3207 fmt = GET_RTX_FORMAT (GET_CODE (x));
3208 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3210 if (fmt[i] == 'e')
3212 if ((tem = regno_use_in (regno, XEXP (x, i))))
3213 return tem;
3215 else if (fmt[i] == 'E')
3216 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3217 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3218 return tem;
3221 return NULL_RTX;
3224 /* Return a value indicating whether OP, an operand of a commutative
3225 operation, is preferred as the first or second operand. The higher
3226 the value, the stronger the preference for being the first operand.
3227 We use negative values to indicate a preference for the first operand
3228 and positive values for the second operand. */
3231 commutative_operand_precedence (rtx op)
3233 enum rtx_code code = GET_CODE (op);
3235 /* Constants always come the second operand. Prefer "nice" constants. */
3236 if (code == CONST_INT)
3237 return -8;
3238 if (code == CONST_WIDE_INT)
3239 return -8;
3240 if (code == CONST_DOUBLE)
3241 return -7;
3242 if (code == CONST_FIXED)
3243 return -7;
3244 op = avoid_constant_pool_reference (op);
3245 code = GET_CODE (op);
3247 switch (GET_RTX_CLASS (code))
3249 case RTX_CONST_OBJ:
3250 if (code == CONST_INT)
3251 return -6;
3252 if (code == CONST_WIDE_INT)
3253 return -6;
3254 if (code == CONST_DOUBLE)
3255 return -5;
3256 if (code == CONST_FIXED)
3257 return -5;
3258 return -4;
3260 case RTX_EXTRA:
3261 /* SUBREGs of objects should come second. */
3262 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3263 return -3;
3264 return 0;
3266 case RTX_OBJ:
3267 /* Complex expressions should be the first, so decrease priority
3268 of objects. Prefer pointer objects over non pointer objects. */
3269 if ((REG_P (op) && REG_POINTER (op))
3270 || (MEM_P (op) && MEM_POINTER (op)))
3271 return -1;
3272 return -2;
3274 case RTX_COMM_ARITH:
3275 /* Prefer operands that are themselves commutative to be first.
3276 This helps to make things linear. In particular,
3277 (and (and (reg) (reg)) (not (reg))) is canonical. */
3278 return 4;
3280 case RTX_BIN_ARITH:
3281 /* If only one operand is a binary expression, it will be the first
3282 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3283 is canonical, although it will usually be further simplified. */
3284 return 2;
3286 case RTX_UNARY:
3287 /* Then prefer NEG and NOT. */
3288 if (code == NEG || code == NOT)
3289 return 1;
3291 default:
3292 return 0;
3296 /* Return 1 iff it is necessary to swap operands of commutative operation
3297 in order to canonicalize expression. */
3299 bool
3300 swap_commutative_operands_p (rtx x, rtx y)
3302 return (commutative_operand_precedence (x)
3303 < commutative_operand_precedence (y));
3306 /* Return 1 if X is an autoincrement side effect and the register is
3307 not the stack pointer. */
3309 auto_inc_p (const_rtx x)
3311 switch (GET_CODE (x))
3313 case PRE_INC:
3314 case POST_INC:
3315 case PRE_DEC:
3316 case POST_DEC:
3317 case PRE_MODIFY:
3318 case POST_MODIFY:
3319 /* There are no REG_INC notes for SP. */
3320 if (XEXP (x, 0) != stack_pointer_rtx)
3321 return 1;
3322 default:
3323 break;
3325 return 0;
3328 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3330 loc_mentioned_in_p (rtx *loc, const_rtx in)
3332 enum rtx_code code;
3333 const char *fmt;
3334 int i, j;
3336 if (!in)
3337 return 0;
3339 code = GET_CODE (in);
3340 fmt = GET_RTX_FORMAT (code);
3341 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3343 if (fmt[i] == 'e')
3345 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3346 return 1;
3348 else if (fmt[i] == 'E')
3349 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3350 if (loc == &XVECEXP (in, i, j)
3351 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3352 return 1;
3354 return 0;
3357 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3358 and SUBREG_BYTE, return the bit offset where the subreg begins
3359 (counting from the least significant bit of the operand). */
3361 unsigned int
3362 subreg_lsb_1 (enum machine_mode outer_mode,
3363 enum machine_mode inner_mode,
3364 unsigned int subreg_byte)
3366 unsigned int bitpos;
3367 unsigned int byte;
3368 unsigned int word;
3370 /* A paradoxical subreg begins at bit position 0. */
3371 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3372 return 0;
3374 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3375 /* If the subreg crosses a word boundary ensure that
3376 it also begins and ends on a word boundary. */
3377 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3378 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3379 && (subreg_byte % UNITS_PER_WORD
3380 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3382 if (WORDS_BIG_ENDIAN)
3383 word = (GET_MODE_SIZE (inner_mode)
3384 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3385 else
3386 word = subreg_byte / UNITS_PER_WORD;
3387 bitpos = word * BITS_PER_WORD;
3389 if (BYTES_BIG_ENDIAN)
3390 byte = (GET_MODE_SIZE (inner_mode)
3391 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3392 else
3393 byte = subreg_byte % UNITS_PER_WORD;
3394 bitpos += byte * BITS_PER_UNIT;
3396 return bitpos;
3399 /* Given a subreg X, return the bit offset where the subreg begins
3400 (counting from the least significant bit of the reg). */
3402 unsigned int
3403 subreg_lsb (const_rtx x)
3405 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3406 SUBREG_BYTE (x));
3409 /* Fill in information about a subreg of a hard register.
3410 xregno - A regno of an inner hard subreg_reg (or what will become one).
3411 xmode - The mode of xregno.
3412 offset - The byte offset.
3413 ymode - The mode of a top level SUBREG (or what may become one).
3414 info - Pointer to structure to fill in. */
3415 void
3416 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3417 unsigned int offset, enum machine_mode ymode,
3418 struct subreg_info *info)
3420 int nregs_xmode, nregs_ymode;
3421 int mode_multiple, nregs_multiple;
3422 int offset_adj, y_offset, y_offset_adj;
3423 int regsize_xmode, regsize_ymode;
3424 bool rknown;
3426 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3428 rknown = false;
3430 /* If there are holes in a non-scalar mode in registers, we expect
3431 that it is made up of its units concatenated together. */
3432 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3434 enum machine_mode xmode_unit;
3436 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3437 if (GET_MODE_INNER (xmode) == VOIDmode)
3438 xmode_unit = xmode;
3439 else
3440 xmode_unit = GET_MODE_INNER (xmode);
3441 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3442 gcc_assert (nregs_xmode
3443 == (GET_MODE_NUNITS (xmode)
3444 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3445 gcc_assert (hard_regno_nregs[xregno][xmode]
3446 == (hard_regno_nregs[xregno][xmode_unit]
3447 * GET_MODE_NUNITS (xmode)));
3449 /* You can only ask for a SUBREG of a value with holes in the middle
3450 if you don't cross the holes. (Such a SUBREG should be done by
3451 picking a different register class, or doing it in memory if
3452 necessary.) An example of a value with holes is XCmode on 32-bit
3453 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3454 3 for each part, but in memory it's two 128-bit parts.
3455 Padding is assumed to be at the end (not necessarily the 'high part')
3456 of each unit. */
3457 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3458 < GET_MODE_NUNITS (xmode))
3459 && (offset / GET_MODE_SIZE (xmode_unit)
3460 != ((offset + GET_MODE_SIZE (ymode) - 1)
3461 / GET_MODE_SIZE (xmode_unit))))
3463 info->representable_p = false;
3464 rknown = true;
3467 else
3468 nregs_xmode = hard_regno_nregs[xregno][xmode];
3470 nregs_ymode = hard_regno_nregs[xregno][ymode];
3472 /* Paradoxical subregs are otherwise valid. */
3473 if (!rknown
3474 && offset == 0
3475 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3477 info->representable_p = true;
3478 /* If this is a big endian paradoxical subreg, which uses more
3479 actual hard registers than the original register, we must
3480 return a negative offset so that we find the proper highpart
3481 of the register. */
3482 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3483 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3484 info->offset = nregs_xmode - nregs_ymode;
3485 else
3486 info->offset = 0;
3487 info->nregs = nregs_ymode;
3488 return;
3491 /* If registers store different numbers of bits in the different
3492 modes, we cannot generally form this subreg. */
3493 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3494 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3495 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3496 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3498 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3499 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3500 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3502 info->representable_p = false;
3503 info->nregs
3504 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3505 info->offset = offset / regsize_xmode;
3506 return;
3508 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3510 info->representable_p = false;
3511 info->nregs
3512 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3513 info->offset = offset / regsize_xmode;
3514 return;
3518 /* Lowpart subregs are otherwise valid. */
3519 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3521 info->representable_p = true;
3522 rknown = true;
3524 if (offset == 0 || nregs_xmode == nregs_ymode)
3526 info->offset = 0;
3527 info->nregs = nregs_ymode;
3528 return;
3532 /* This should always pass, otherwise we don't know how to verify
3533 the constraint. These conditions may be relaxed but
3534 subreg_regno_offset would need to be redesigned. */
3535 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3536 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3538 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3539 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3541 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3542 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3543 HOST_WIDE_INT off_low = offset & (ysize - 1);
3544 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3545 offset = (xsize - ysize - off_high) | off_low;
3547 /* The XMODE value can be seen as a vector of NREGS_XMODE
3548 values. The subreg must represent a lowpart of given field.
3549 Compute what field it is. */
3550 offset_adj = offset;
3551 offset_adj -= subreg_lowpart_offset (ymode,
3552 mode_for_size (GET_MODE_BITSIZE (xmode)
3553 / nregs_xmode,
3554 MODE_INT, 0));
3556 /* Size of ymode must not be greater than the size of xmode. */
3557 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3558 gcc_assert (mode_multiple != 0);
3560 y_offset = offset / GET_MODE_SIZE (ymode);
3561 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3562 nregs_multiple = nregs_xmode / nregs_ymode;
3564 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3565 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3567 if (!rknown)
3569 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3570 rknown = true;
3572 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3573 info->nregs = nregs_ymode;
3576 /* This function returns the regno offset of a subreg expression.
3577 xregno - A regno of an inner hard subreg_reg (or what will become one).
3578 xmode - The mode of xregno.
3579 offset - The byte offset.
3580 ymode - The mode of a top level SUBREG (or what may become one).
3581 RETURN - The regno offset which would be used. */
3582 unsigned int
3583 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3584 unsigned int offset, enum machine_mode ymode)
3586 struct subreg_info info;
3587 subreg_get_info (xregno, xmode, offset, ymode, &info);
3588 return info.offset;
3591 /* This function returns true when the offset is representable via
3592 subreg_offset in the given regno.
3593 xregno - A regno of an inner hard subreg_reg (or what will become one).
3594 xmode - The mode of xregno.
3595 offset - The byte offset.
3596 ymode - The mode of a top level SUBREG (or what may become one).
3597 RETURN - Whether the offset is representable. */
3598 bool
3599 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3600 unsigned int offset, enum machine_mode ymode)
3602 struct subreg_info info;
3603 subreg_get_info (xregno, xmode, offset, ymode, &info);
3604 return info.representable_p;
3607 /* Return the number of a YMODE register to which
3609 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3611 can be simplified. Return -1 if the subreg can't be simplified.
3613 XREGNO is a hard register number. */
3616 simplify_subreg_regno (unsigned int xregno, enum machine_mode xmode,
3617 unsigned int offset, enum machine_mode ymode)
3619 struct subreg_info info;
3620 unsigned int yregno;
3622 #ifdef CANNOT_CHANGE_MODE_CLASS
3623 /* Give the backend a chance to disallow the mode change. */
3624 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3625 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3626 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3627 /* We can use mode change in LRA for some transformations. */
3628 && ! lra_in_progress)
3629 return -1;
3630 #endif
3632 /* We shouldn't simplify stack-related registers. */
3633 if ((!reload_completed || frame_pointer_needed)
3634 && xregno == FRAME_POINTER_REGNUM)
3635 return -1;
3637 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3638 && xregno == ARG_POINTER_REGNUM)
3639 return -1;
3641 if (xregno == STACK_POINTER_REGNUM
3642 /* We should convert hard stack register in LRA if it is
3643 possible. */
3644 && ! lra_in_progress)
3645 return -1;
3647 /* Try to get the register offset. */
3648 subreg_get_info (xregno, xmode, offset, ymode, &info);
3649 if (!info.representable_p)
3650 return -1;
3652 /* Make sure that the offsetted register value is in range. */
3653 yregno = xregno + info.offset;
3654 if (!HARD_REGISTER_NUM_P (yregno))
3655 return -1;
3657 /* See whether (reg:YMODE YREGNO) is valid.
3659 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3660 This is a kludge to work around how complex FP arguments are passed
3661 on IA-64 and should be fixed. See PR target/49226. */
3662 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3663 && HARD_REGNO_MODE_OK (xregno, xmode))
3664 return -1;
3666 return (int) yregno;
3669 /* Return the final regno that a subreg expression refers to. */
3670 unsigned int
3671 subreg_regno (const_rtx x)
3673 unsigned int ret;
3674 rtx subreg = SUBREG_REG (x);
3675 int regno = REGNO (subreg);
3677 ret = regno + subreg_regno_offset (regno,
3678 GET_MODE (subreg),
3679 SUBREG_BYTE (x),
3680 GET_MODE (x));
3681 return ret;
3685 /* Return the number of registers that a subreg expression refers
3686 to. */
3687 unsigned int
3688 subreg_nregs (const_rtx x)
3690 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3693 /* Return the number of registers that a subreg REG with REGNO
3694 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3695 changed so that the regno can be passed in. */
3697 unsigned int
3698 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3700 struct subreg_info info;
3701 rtx subreg = SUBREG_REG (x);
3703 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3704 &info);
3705 return info.nregs;
3709 struct parms_set_data
3711 int nregs;
3712 HARD_REG_SET regs;
3715 /* Helper function for noticing stores to parameter registers. */
3716 static void
3717 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3719 struct parms_set_data *const d = (struct parms_set_data *) data;
3720 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3721 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3723 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3724 d->nregs--;
3728 /* Look backward for first parameter to be loaded.
3729 Note that loads of all parameters will not necessarily be
3730 found if CSE has eliminated some of them (e.g., an argument
3731 to the outer function is passed down as a parameter).
3732 Do not skip BOUNDARY. */
3733 rtx_insn *
3734 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3736 struct parms_set_data parm;
3737 rtx p;
3738 rtx_insn *before, *first_set;
3740 /* Since different machines initialize their parameter registers
3741 in different orders, assume nothing. Collect the set of all
3742 parameter registers. */
3743 CLEAR_HARD_REG_SET (parm.regs);
3744 parm.nregs = 0;
3745 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3746 if (GET_CODE (XEXP (p, 0)) == USE
3747 && REG_P (XEXP (XEXP (p, 0), 0)))
3749 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3751 /* We only care about registers which can hold function
3752 arguments. */
3753 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3754 continue;
3756 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3757 parm.nregs++;
3759 before = call_insn;
3760 first_set = call_insn;
3762 /* Search backward for the first set of a register in this set. */
3763 while (parm.nregs && before != boundary)
3765 before = PREV_INSN (before);
3767 /* It is possible that some loads got CSEed from one call to
3768 another. Stop in that case. */
3769 if (CALL_P (before))
3770 break;
3772 /* Our caller needs either ensure that we will find all sets
3773 (in case code has not been optimized yet), or take care
3774 for possible labels in a way by setting boundary to preceding
3775 CODE_LABEL. */
3776 if (LABEL_P (before))
3778 gcc_assert (before == boundary);
3779 break;
3782 if (INSN_P (before))
3784 int nregs_old = parm.nregs;
3785 note_stores (PATTERN (before), parms_set, &parm);
3786 /* If we found something that did not set a parameter reg,
3787 we're done. Do not keep going, as that might result
3788 in hoisting an insn before the setting of a pseudo
3789 that is used by the hoisted insn. */
3790 if (nregs_old != parm.nregs)
3791 first_set = before;
3792 else
3793 break;
3796 return first_set;
3799 /* Return true if we should avoid inserting code between INSN and preceding
3800 call instruction. */
3802 bool
3803 keep_with_call_p (const rtx_insn *insn)
3805 rtx set;
3807 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3809 if (REG_P (SET_DEST (set))
3810 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3811 && fixed_regs[REGNO (SET_DEST (set))]
3812 && general_operand (SET_SRC (set), VOIDmode))
3813 return true;
3814 if (REG_P (SET_SRC (set))
3815 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3816 && REG_P (SET_DEST (set))
3817 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3818 return true;
3819 /* There may be a stack pop just after the call and before the store
3820 of the return register. Search for the actual store when deciding
3821 if we can break or not. */
3822 if (SET_DEST (set) == stack_pointer_rtx)
3824 /* This CONST_CAST is okay because next_nonnote_insn just
3825 returns its argument and we assign it to a const_rtx
3826 variable. */
3827 const rtx_insn *i2
3828 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3829 if (i2 && keep_with_call_p (i2))
3830 return true;
3833 return false;
3836 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3837 to non-complex jumps. That is, direct unconditional, conditional,
3838 and tablejumps, but not computed jumps or returns. It also does
3839 not apply to the fallthru case of a conditional jump. */
3841 bool
3842 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3844 rtx tmp = JUMP_LABEL (jump_insn);
3845 rtx_jump_table_data *table;
3847 if (label == tmp)
3848 return true;
3850 if (tablejump_p (jump_insn, NULL, &table))
3852 rtvec vec = table->get_labels ();
3853 int i, veclen = GET_NUM_ELEM (vec);
3855 for (i = 0; i < veclen; ++i)
3856 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3857 return true;
3860 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3861 return true;
3863 return false;
3867 /* Return an estimate of the cost of computing rtx X.
3868 One use is in cse, to decide which expression to keep in the hash table.
3869 Another is in rtl generation, to pick the cheapest way to multiply.
3870 Other uses like the latter are expected in the future.
3872 X appears as operand OPNO in an expression with code OUTER_CODE.
3873 SPEED specifies whether costs optimized for speed or size should
3874 be returned. */
3877 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3879 int i, j;
3880 enum rtx_code code;
3881 const char *fmt;
3882 int total;
3883 int factor;
3885 if (x == 0)
3886 return 0;
3888 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3889 many insns, taking N times as long. */
3890 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3891 if (factor == 0)
3892 factor = 1;
3894 /* Compute the default costs of certain things.
3895 Note that targetm.rtx_costs can override the defaults. */
3897 code = GET_CODE (x);
3898 switch (code)
3900 case MULT:
3901 /* Multiplication has time-complexity O(N*N), where N is the
3902 number of units (translated from digits) when using
3903 schoolbook long multiplication. */
3904 total = factor * factor * COSTS_N_INSNS (5);
3905 break;
3906 case DIV:
3907 case UDIV:
3908 case MOD:
3909 case UMOD:
3910 /* Similarly, complexity for schoolbook long division. */
3911 total = factor * factor * COSTS_N_INSNS (7);
3912 break;
3913 case USE:
3914 /* Used in combine.c as a marker. */
3915 total = 0;
3916 break;
3917 case SET:
3918 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3919 the mode for the factor. */
3920 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3921 if (factor == 0)
3922 factor = 1;
3923 /* Pass through. */
3924 default:
3925 total = factor * COSTS_N_INSNS (1);
3928 switch (code)
3930 case REG:
3931 return 0;
3933 case SUBREG:
3934 total = 0;
3935 /* If we can't tie these modes, make this expensive. The larger
3936 the mode, the more expensive it is. */
3937 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3938 return COSTS_N_INSNS (2 + factor);
3939 break;
3941 default:
3942 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3943 return total;
3944 break;
3947 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3948 which is already in total. */
3950 fmt = GET_RTX_FORMAT (code);
3951 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3952 if (fmt[i] == 'e')
3953 total += rtx_cost (XEXP (x, i), code, i, speed);
3954 else if (fmt[i] == 'E')
3955 for (j = 0; j < XVECLEN (x, i); j++)
3956 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3958 return total;
3961 /* Fill in the structure C with information about both speed and size rtx
3962 costs for X, which is operand OPNO in an expression with code OUTER. */
3964 void
3965 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3966 struct full_rtx_costs *c)
3968 c->speed = rtx_cost (x, outer, opno, true);
3969 c->size = rtx_cost (x, outer, opno, false);
3973 /* Return cost of address expression X.
3974 Expect that X is properly formed address reference.
3976 SPEED parameter specify whether costs optimized for speed or size should
3977 be returned. */
3980 address_cost (rtx x, enum machine_mode mode, addr_space_t as, bool speed)
3982 /* We may be asked for cost of various unusual addresses, such as operands
3983 of push instruction. It is not worthwhile to complicate writing
3984 of the target hook by such cases. */
3986 if (!memory_address_addr_space_p (mode, x, as))
3987 return 1000;
3989 return targetm.address_cost (x, mode, as, speed);
3992 /* If the target doesn't override, compute the cost as with arithmetic. */
3995 default_address_cost (rtx x, enum machine_mode, addr_space_t, bool speed)
3997 return rtx_cost (x, MEM, 0, speed);
4001 unsigned HOST_WIDE_INT
4002 nonzero_bits (const_rtx x, enum machine_mode mode)
4004 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4007 unsigned int
4008 num_sign_bit_copies (const_rtx x, enum machine_mode mode)
4010 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4013 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4014 It avoids exponential behavior in nonzero_bits1 when X has
4015 identical subexpressions on the first or the second level. */
4017 static unsigned HOST_WIDE_INT
4018 cached_nonzero_bits (const_rtx x, enum machine_mode mode, const_rtx known_x,
4019 enum machine_mode known_mode,
4020 unsigned HOST_WIDE_INT known_ret)
4022 if (x == known_x && mode == known_mode)
4023 return known_ret;
4025 /* Try to find identical subexpressions. If found call
4026 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4027 precomputed value for the subexpression as KNOWN_RET. */
4029 if (ARITHMETIC_P (x))
4031 rtx x0 = XEXP (x, 0);
4032 rtx x1 = XEXP (x, 1);
4034 /* Check the first level. */
4035 if (x0 == x1)
4036 return nonzero_bits1 (x, mode, x0, mode,
4037 cached_nonzero_bits (x0, mode, known_x,
4038 known_mode, known_ret));
4040 /* Check the second level. */
4041 if (ARITHMETIC_P (x0)
4042 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4043 return nonzero_bits1 (x, mode, x1, mode,
4044 cached_nonzero_bits (x1, mode, known_x,
4045 known_mode, known_ret));
4047 if (ARITHMETIC_P (x1)
4048 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4049 return nonzero_bits1 (x, mode, x0, mode,
4050 cached_nonzero_bits (x0, mode, known_x,
4051 known_mode, known_ret));
4054 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4057 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4058 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4059 is less useful. We can't allow both, because that results in exponential
4060 run time recursion. There is a nullstone testcase that triggered
4061 this. This macro avoids accidental uses of num_sign_bit_copies. */
4062 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4064 /* Given an expression, X, compute which bits in X can be nonzero.
4065 We don't care about bits outside of those defined in MODE.
4067 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4068 an arithmetic operation, we can do better. */
4070 static unsigned HOST_WIDE_INT
4071 nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4072 enum machine_mode known_mode,
4073 unsigned HOST_WIDE_INT known_ret)
4075 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4076 unsigned HOST_WIDE_INT inner_nz;
4077 enum rtx_code code;
4078 enum machine_mode inner_mode;
4079 unsigned int mode_width = GET_MODE_PRECISION (mode);
4081 /* For floating-point and vector values, assume all bits are needed. */
4082 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4083 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4084 return nonzero;
4086 /* If X is wider than MODE, use its mode instead. */
4087 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4089 mode = GET_MODE (x);
4090 nonzero = GET_MODE_MASK (mode);
4091 mode_width = GET_MODE_PRECISION (mode);
4094 if (mode_width > HOST_BITS_PER_WIDE_INT)
4095 /* Our only callers in this case look for single bit values. So
4096 just return the mode mask. Those tests will then be false. */
4097 return nonzero;
4099 #ifndef WORD_REGISTER_OPERATIONS
4100 /* If MODE is wider than X, but both are a single word for both the host
4101 and target machines, we can compute this from which bits of the
4102 object might be nonzero in its own mode, taking into account the fact
4103 that on many CISC machines, accessing an object in a wider mode
4104 causes the high-order bits to become undefined. So they are
4105 not known to be zero. */
4107 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4108 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4109 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4110 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4112 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4113 known_x, known_mode, known_ret);
4114 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4115 return nonzero;
4117 #endif
4119 code = GET_CODE (x);
4120 switch (code)
4122 case REG:
4123 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4124 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4125 all the bits above ptr_mode are known to be zero. */
4126 /* As we do not know which address space the pointer is referring to,
4127 we can do this only if the target does not support different pointer
4128 or address modes depending on the address space. */
4129 if (target_default_pointer_address_modes_p ()
4130 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4131 && REG_POINTER (x))
4132 nonzero &= GET_MODE_MASK (ptr_mode);
4133 #endif
4135 /* Include declared information about alignment of pointers. */
4136 /* ??? We don't properly preserve REG_POINTER changes across
4137 pointer-to-integer casts, so we can't trust it except for
4138 things that we know must be pointers. See execute/960116-1.c. */
4139 if ((x == stack_pointer_rtx
4140 || x == frame_pointer_rtx
4141 || x == arg_pointer_rtx)
4142 && REGNO_POINTER_ALIGN (REGNO (x)))
4144 unsigned HOST_WIDE_INT alignment
4145 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4147 #ifdef PUSH_ROUNDING
4148 /* If PUSH_ROUNDING is defined, it is possible for the
4149 stack to be momentarily aligned only to that amount,
4150 so we pick the least alignment. */
4151 if (x == stack_pointer_rtx && PUSH_ARGS)
4152 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4153 alignment);
4154 #endif
4156 nonzero &= ~(alignment - 1);
4160 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4161 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4162 known_mode, known_ret,
4163 &nonzero_for_hook);
4165 if (new_rtx)
4166 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4167 known_mode, known_ret);
4169 return nonzero_for_hook;
4172 case CONST_INT:
4173 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4174 /* If X is negative in MODE, sign-extend the value. */
4175 if (INTVAL (x) > 0
4176 && mode_width < BITS_PER_WORD
4177 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4178 != 0)
4179 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4180 #endif
4182 return UINTVAL (x);
4184 case MEM:
4185 #ifdef LOAD_EXTEND_OP
4186 /* In many, if not most, RISC machines, reading a byte from memory
4187 zeros the rest of the register. Noticing that fact saves a lot
4188 of extra zero-extends. */
4189 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4190 nonzero &= GET_MODE_MASK (GET_MODE (x));
4191 #endif
4192 break;
4194 case EQ: case NE:
4195 case UNEQ: case LTGT:
4196 case GT: case GTU: case UNGT:
4197 case LT: case LTU: case UNLT:
4198 case GE: case GEU: case UNGE:
4199 case LE: case LEU: case UNLE:
4200 case UNORDERED: case ORDERED:
4201 /* If this produces an integer result, we know which bits are set.
4202 Code here used to clear bits outside the mode of X, but that is
4203 now done above. */
4204 /* Mind that MODE is the mode the caller wants to look at this
4205 operation in, and not the actual operation mode. We can wind
4206 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4207 that describes the results of a vector compare. */
4208 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4209 && mode_width <= HOST_BITS_PER_WIDE_INT)
4210 nonzero = STORE_FLAG_VALUE;
4211 break;
4213 case NEG:
4214 #if 0
4215 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4216 and num_sign_bit_copies. */
4217 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4218 == GET_MODE_PRECISION (GET_MODE (x)))
4219 nonzero = 1;
4220 #endif
4222 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4223 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4224 break;
4226 case ABS:
4227 #if 0
4228 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4229 and num_sign_bit_copies. */
4230 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4231 == GET_MODE_PRECISION (GET_MODE (x)))
4232 nonzero = 1;
4233 #endif
4234 break;
4236 case TRUNCATE:
4237 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4238 known_x, known_mode, known_ret)
4239 & GET_MODE_MASK (mode));
4240 break;
4242 case ZERO_EXTEND:
4243 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4244 known_x, known_mode, known_ret);
4245 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4246 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4247 break;
4249 case SIGN_EXTEND:
4250 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4251 Otherwise, show all the bits in the outer mode but not the inner
4252 may be nonzero. */
4253 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4254 known_x, known_mode, known_ret);
4255 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4257 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4258 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4259 inner_nz |= (GET_MODE_MASK (mode)
4260 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4263 nonzero &= inner_nz;
4264 break;
4266 case AND:
4267 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4268 known_x, known_mode, known_ret)
4269 & cached_nonzero_bits (XEXP (x, 1), mode,
4270 known_x, known_mode, known_ret);
4271 break;
4273 case XOR: case IOR:
4274 case UMIN: case UMAX: case SMIN: case SMAX:
4276 unsigned HOST_WIDE_INT nonzero0
4277 = cached_nonzero_bits (XEXP (x, 0), mode,
4278 known_x, known_mode, known_ret);
4280 /* Don't call nonzero_bits for the second time if it cannot change
4281 anything. */
4282 if ((nonzero & nonzero0) != nonzero)
4283 nonzero &= nonzero0
4284 | cached_nonzero_bits (XEXP (x, 1), mode,
4285 known_x, known_mode, known_ret);
4287 break;
4289 case PLUS: case MINUS:
4290 case MULT:
4291 case DIV: case UDIV:
4292 case MOD: case UMOD:
4293 /* We can apply the rules of arithmetic to compute the number of
4294 high- and low-order zero bits of these operations. We start by
4295 computing the width (position of the highest-order nonzero bit)
4296 and the number of low-order zero bits for each value. */
4298 unsigned HOST_WIDE_INT nz0
4299 = cached_nonzero_bits (XEXP (x, 0), mode,
4300 known_x, known_mode, known_ret);
4301 unsigned HOST_WIDE_INT nz1
4302 = cached_nonzero_bits (XEXP (x, 1), mode,
4303 known_x, known_mode, known_ret);
4304 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4305 int width0 = floor_log2 (nz0) + 1;
4306 int width1 = floor_log2 (nz1) + 1;
4307 int low0 = floor_log2 (nz0 & -nz0);
4308 int low1 = floor_log2 (nz1 & -nz1);
4309 unsigned HOST_WIDE_INT op0_maybe_minusp
4310 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4311 unsigned HOST_WIDE_INT op1_maybe_minusp
4312 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4313 unsigned int result_width = mode_width;
4314 int result_low = 0;
4316 switch (code)
4318 case PLUS:
4319 result_width = MAX (width0, width1) + 1;
4320 result_low = MIN (low0, low1);
4321 break;
4322 case MINUS:
4323 result_low = MIN (low0, low1);
4324 break;
4325 case MULT:
4326 result_width = width0 + width1;
4327 result_low = low0 + low1;
4328 break;
4329 case DIV:
4330 if (width1 == 0)
4331 break;
4332 if (!op0_maybe_minusp && !op1_maybe_minusp)
4333 result_width = width0;
4334 break;
4335 case UDIV:
4336 if (width1 == 0)
4337 break;
4338 result_width = width0;
4339 break;
4340 case MOD:
4341 if (width1 == 0)
4342 break;
4343 if (!op0_maybe_minusp && !op1_maybe_minusp)
4344 result_width = MIN (width0, width1);
4345 result_low = MIN (low0, low1);
4346 break;
4347 case UMOD:
4348 if (width1 == 0)
4349 break;
4350 result_width = MIN (width0, width1);
4351 result_low = MIN (low0, low1);
4352 break;
4353 default:
4354 gcc_unreachable ();
4357 if (result_width < mode_width)
4358 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4360 if (result_low > 0)
4361 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4363 break;
4365 case ZERO_EXTRACT:
4366 if (CONST_INT_P (XEXP (x, 1))
4367 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4368 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4369 break;
4371 case SUBREG:
4372 /* If this is a SUBREG formed for a promoted variable that has
4373 been zero-extended, we know that at least the high-order bits
4374 are zero, though others might be too. */
4376 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4377 nonzero = GET_MODE_MASK (GET_MODE (x))
4378 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4379 known_x, known_mode, known_ret);
4381 inner_mode = GET_MODE (SUBREG_REG (x));
4382 /* If the inner mode is a single word for both the host and target
4383 machines, we can compute this from which bits of the inner
4384 object might be nonzero. */
4385 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4386 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4388 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4389 known_x, known_mode, known_ret);
4391 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4392 /* If this is a typical RISC machine, we only have to worry
4393 about the way loads are extended. */
4394 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4395 ? val_signbit_known_set_p (inner_mode, nonzero)
4396 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4397 || !MEM_P (SUBREG_REG (x)))
4398 #endif
4400 /* On many CISC machines, accessing an object in a wider mode
4401 causes the high-order bits to become undefined. So they are
4402 not known to be zero. */
4403 if (GET_MODE_PRECISION (GET_MODE (x))
4404 > GET_MODE_PRECISION (inner_mode))
4405 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4406 & ~GET_MODE_MASK (inner_mode));
4409 break;
4411 case ASHIFTRT:
4412 case LSHIFTRT:
4413 case ASHIFT:
4414 case ROTATE:
4415 /* The nonzero bits are in two classes: any bits within MODE
4416 that aren't in GET_MODE (x) are always significant. The rest of the
4417 nonzero bits are those that are significant in the operand of
4418 the shift when shifted the appropriate number of bits. This
4419 shows that high-order bits are cleared by the right shift and
4420 low-order bits by left shifts. */
4421 if (CONST_INT_P (XEXP (x, 1))
4422 && INTVAL (XEXP (x, 1)) >= 0
4423 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4424 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4426 enum machine_mode inner_mode = GET_MODE (x);
4427 unsigned int width = GET_MODE_PRECISION (inner_mode);
4428 int count = INTVAL (XEXP (x, 1));
4429 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4430 unsigned HOST_WIDE_INT op_nonzero
4431 = cached_nonzero_bits (XEXP (x, 0), mode,
4432 known_x, known_mode, known_ret);
4433 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4434 unsigned HOST_WIDE_INT outer = 0;
4436 if (mode_width > width)
4437 outer = (op_nonzero & nonzero & ~mode_mask);
4439 if (code == LSHIFTRT)
4440 inner >>= count;
4441 else if (code == ASHIFTRT)
4443 inner >>= count;
4445 /* If the sign bit may have been nonzero before the shift, we
4446 need to mark all the places it could have been copied to
4447 by the shift as possibly nonzero. */
4448 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4449 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4450 << (width - count);
4452 else if (code == ASHIFT)
4453 inner <<= count;
4454 else
4455 inner = ((inner << (count % width)
4456 | (inner >> (width - (count % width)))) & mode_mask);
4458 nonzero &= (outer | inner);
4460 break;
4462 case FFS:
4463 case POPCOUNT:
4464 /* This is at most the number of bits in the mode. */
4465 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4466 break;
4468 case CLZ:
4469 /* If CLZ has a known value at zero, then the nonzero bits are
4470 that value, plus the number of bits in the mode minus one. */
4471 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4472 nonzero
4473 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4474 else
4475 nonzero = -1;
4476 break;
4478 case CTZ:
4479 /* If CTZ has a known value at zero, then the nonzero bits are
4480 that value, plus the number of bits in the mode minus one. */
4481 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4482 nonzero
4483 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4484 else
4485 nonzero = -1;
4486 break;
4488 case CLRSB:
4489 /* This is at most the number of bits in the mode minus 1. */
4490 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4491 break;
4493 case PARITY:
4494 nonzero = 1;
4495 break;
4497 case IF_THEN_ELSE:
4499 unsigned HOST_WIDE_INT nonzero_true
4500 = cached_nonzero_bits (XEXP (x, 1), mode,
4501 known_x, known_mode, known_ret);
4503 /* Don't call nonzero_bits for the second time if it cannot change
4504 anything. */
4505 if ((nonzero & nonzero_true) != nonzero)
4506 nonzero &= nonzero_true
4507 | cached_nonzero_bits (XEXP (x, 2), mode,
4508 known_x, known_mode, known_ret);
4510 break;
4512 default:
4513 break;
4516 return nonzero;
4519 /* See the macro definition above. */
4520 #undef cached_num_sign_bit_copies
4523 /* The function cached_num_sign_bit_copies is a wrapper around
4524 num_sign_bit_copies1. It avoids exponential behavior in
4525 num_sign_bit_copies1 when X has identical subexpressions on the
4526 first or the second level. */
4528 static unsigned int
4529 cached_num_sign_bit_copies (const_rtx x, enum machine_mode mode, const_rtx known_x,
4530 enum machine_mode known_mode,
4531 unsigned int known_ret)
4533 if (x == known_x && mode == known_mode)
4534 return known_ret;
4536 /* Try to find identical subexpressions. If found call
4537 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4538 the precomputed value for the subexpression as KNOWN_RET. */
4540 if (ARITHMETIC_P (x))
4542 rtx x0 = XEXP (x, 0);
4543 rtx x1 = XEXP (x, 1);
4545 /* Check the first level. */
4546 if (x0 == x1)
4547 return
4548 num_sign_bit_copies1 (x, mode, x0, mode,
4549 cached_num_sign_bit_copies (x0, mode, known_x,
4550 known_mode,
4551 known_ret));
4553 /* Check the second level. */
4554 if (ARITHMETIC_P (x0)
4555 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4556 return
4557 num_sign_bit_copies1 (x, mode, x1, mode,
4558 cached_num_sign_bit_copies (x1, mode, known_x,
4559 known_mode,
4560 known_ret));
4562 if (ARITHMETIC_P (x1)
4563 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4564 return
4565 num_sign_bit_copies1 (x, mode, x0, mode,
4566 cached_num_sign_bit_copies (x0, mode, known_x,
4567 known_mode,
4568 known_ret));
4571 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4574 /* Return the number of bits at the high-order end of X that are known to
4575 be equal to the sign bit. X will be used in mode MODE; if MODE is
4576 VOIDmode, X will be used in its own mode. The returned value will always
4577 be between 1 and the number of bits in MODE. */
4579 static unsigned int
4580 num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
4581 enum machine_mode known_mode,
4582 unsigned int known_ret)
4584 enum rtx_code code = GET_CODE (x);
4585 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4586 int num0, num1, result;
4587 unsigned HOST_WIDE_INT nonzero;
4589 /* If we weren't given a mode, use the mode of X. If the mode is still
4590 VOIDmode, we don't know anything. Likewise if one of the modes is
4591 floating-point. */
4593 if (mode == VOIDmode)
4594 mode = GET_MODE (x);
4596 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4597 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4598 return 1;
4600 /* For a smaller object, just ignore the high bits. */
4601 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4603 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4604 known_x, known_mode, known_ret);
4605 return MAX (1,
4606 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4609 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4611 #ifndef WORD_REGISTER_OPERATIONS
4612 /* If this machine does not do all register operations on the entire
4613 register and MODE is wider than the mode of X, we can say nothing
4614 at all about the high-order bits. */
4615 return 1;
4616 #else
4617 /* Likewise on machines that do, if the mode of the object is smaller
4618 than a word and loads of that size don't sign extend, we can say
4619 nothing about the high order bits. */
4620 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4621 #ifdef LOAD_EXTEND_OP
4622 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4623 #endif
4625 return 1;
4626 #endif
4629 switch (code)
4631 case REG:
4633 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4634 /* If pointers extend signed and this is a pointer in Pmode, say that
4635 all the bits above ptr_mode are known to be sign bit copies. */
4636 /* As we do not know which address space the pointer is referring to,
4637 we can do this only if the target does not support different pointer
4638 or address modes depending on the address space. */
4639 if (target_default_pointer_address_modes_p ()
4640 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4641 && mode == Pmode && REG_POINTER (x))
4642 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4643 #endif
4646 unsigned int copies_for_hook = 1, copies = 1;
4647 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4648 known_mode, known_ret,
4649 &copies_for_hook);
4651 if (new_rtx)
4652 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4653 known_mode, known_ret);
4655 if (copies > 1 || copies_for_hook > 1)
4656 return MAX (copies, copies_for_hook);
4658 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4660 break;
4662 case MEM:
4663 #ifdef LOAD_EXTEND_OP
4664 /* Some RISC machines sign-extend all loads of smaller than a word. */
4665 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4666 return MAX (1, ((int) bitwidth
4667 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4668 #endif
4669 break;
4671 case CONST_INT:
4672 /* If the constant is negative, take its 1's complement and remask.
4673 Then see how many zero bits we have. */
4674 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4675 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4676 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4677 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4679 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4681 case SUBREG:
4682 /* If this is a SUBREG for a promoted object that is sign-extended
4683 and we are looking at it in a wider mode, we know that at least the
4684 high-order bits are known to be sign bit copies. */
4686 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4688 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4689 known_x, known_mode, known_ret);
4690 return MAX ((int) bitwidth
4691 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4692 num0);
4695 /* For a smaller object, just ignore the high bits. */
4696 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4698 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4699 known_x, known_mode, known_ret);
4700 return MAX (1, (num0
4701 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4702 - bitwidth)));
4705 #ifdef WORD_REGISTER_OPERATIONS
4706 #ifdef LOAD_EXTEND_OP
4707 /* For paradoxical SUBREGs on machines where all register operations
4708 affect the entire register, just look inside. Note that we are
4709 passing MODE to the recursive call, so the number of sign bit copies
4710 will remain relative to that mode, not the inner mode. */
4712 /* This works only if loads sign extend. Otherwise, if we get a
4713 reload for the inner part, it may be loaded from the stack, and
4714 then we lose all sign bit copies that existed before the store
4715 to the stack. */
4717 if (paradoxical_subreg_p (x)
4718 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4719 && MEM_P (SUBREG_REG (x)))
4720 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4721 known_x, known_mode, known_ret);
4722 #endif
4723 #endif
4724 break;
4726 case SIGN_EXTRACT:
4727 if (CONST_INT_P (XEXP (x, 1)))
4728 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4729 break;
4731 case SIGN_EXTEND:
4732 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4733 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4734 known_x, known_mode, known_ret));
4736 case TRUNCATE:
4737 /* For a smaller object, just ignore the high bits. */
4738 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4739 known_x, known_mode, known_ret);
4740 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4741 - bitwidth)));
4743 case NOT:
4744 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4745 known_x, known_mode, known_ret);
4747 case ROTATE: case ROTATERT:
4748 /* If we are rotating left by a number of bits less than the number
4749 of sign bit copies, we can just subtract that amount from the
4750 number. */
4751 if (CONST_INT_P (XEXP (x, 1))
4752 && INTVAL (XEXP (x, 1)) >= 0
4753 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4755 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4756 known_x, known_mode, known_ret);
4757 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4758 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4760 break;
4762 case NEG:
4763 /* In general, this subtracts one sign bit copy. But if the value
4764 is known to be positive, the number of sign bit copies is the
4765 same as that of the input. Finally, if the input has just one bit
4766 that might be nonzero, all the bits are copies of the sign bit. */
4767 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4768 known_x, known_mode, known_ret);
4769 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4770 return num0 > 1 ? num0 - 1 : 1;
4772 nonzero = nonzero_bits (XEXP (x, 0), mode);
4773 if (nonzero == 1)
4774 return bitwidth;
4776 if (num0 > 1
4777 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4778 num0--;
4780 return num0;
4782 case IOR: case AND: case XOR:
4783 case SMIN: case SMAX: case UMIN: case UMAX:
4784 /* Logical operations will preserve the number of sign-bit copies.
4785 MIN and MAX operations always return one of the operands. */
4786 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4787 known_x, known_mode, known_ret);
4788 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4789 known_x, known_mode, known_ret);
4791 /* If num1 is clearing some of the top bits then regardless of
4792 the other term, we are guaranteed to have at least that many
4793 high-order zero bits. */
4794 if (code == AND
4795 && num1 > 1
4796 && bitwidth <= HOST_BITS_PER_WIDE_INT
4797 && CONST_INT_P (XEXP (x, 1))
4798 && (UINTVAL (XEXP (x, 1))
4799 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4800 return num1;
4802 /* Similarly for IOR when setting high-order bits. */
4803 if (code == IOR
4804 && num1 > 1
4805 && bitwidth <= HOST_BITS_PER_WIDE_INT
4806 && CONST_INT_P (XEXP (x, 1))
4807 && (UINTVAL (XEXP (x, 1))
4808 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4809 return num1;
4811 return MIN (num0, num1);
4813 case PLUS: case MINUS:
4814 /* For addition and subtraction, we can have a 1-bit carry. However,
4815 if we are subtracting 1 from a positive number, there will not
4816 be such a carry. Furthermore, if the positive number is known to
4817 be 0 or 1, we know the result is either -1 or 0. */
4819 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4820 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4822 nonzero = nonzero_bits (XEXP (x, 0), mode);
4823 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4824 return (nonzero == 1 || nonzero == 0 ? bitwidth
4825 : bitwidth - floor_log2 (nonzero) - 1);
4828 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4829 known_x, known_mode, known_ret);
4830 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4831 known_x, known_mode, known_ret);
4832 result = MAX (1, MIN (num0, num1) - 1);
4834 return result;
4836 case MULT:
4837 /* The number of bits of the product is the sum of the number of
4838 bits of both terms. However, unless one of the terms if known
4839 to be positive, we must allow for an additional bit since negating
4840 a negative number can remove one sign bit copy. */
4842 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4843 known_x, known_mode, known_ret);
4844 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4845 known_x, known_mode, known_ret);
4847 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4848 if (result > 0
4849 && (bitwidth > HOST_BITS_PER_WIDE_INT
4850 || (((nonzero_bits (XEXP (x, 0), mode)
4851 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4852 && ((nonzero_bits (XEXP (x, 1), mode)
4853 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4854 != 0))))
4855 result--;
4857 return MAX (1, result);
4859 case UDIV:
4860 /* The result must be <= the first operand. If the first operand
4861 has the high bit set, we know nothing about the number of sign
4862 bit copies. */
4863 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4864 return 1;
4865 else if ((nonzero_bits (XEXP (x, 0), mode)
4866 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4867 return 1;
4868 else
4869 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4870 known_x, known_mode, known_ret);
4872 case UMOD:
4873 /* The result must be <= the second operand. If the second operand
4874 has (or just might have) the high bit set, we know nothing about
4875 the number of sign bit copies. */
4876 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4877 return 1;
4878 else if ((nonzero_bits (XEXP (x, 1), mode)
4879 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4880 return 1;
4881 else
4882 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4883 known_x, known_mode, known_ret);
4885 case DIV:
4886 /* Similar to unsigned division, except that we have to worry about
4887 the case where the divisor is negative, in which case we have
4888 to add 1. */
4889 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4890 known_x, known_mode, known_ret);
4891 if (result > 1
4892 && (bitwidth > HOST_BITS_PER_WIDE_INT
4893 || (nonzero_bits (XEXP (x, 1), mode)
4894 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4895 result--;
4897 return result;
4899 case MOD:
4900 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4901 known_x, known_mode, known_ret);
4902 if (result > 1
4903 && (bitwidth > HOST_BITS_PER_WIDE_INT
4904 || (nonzero_bits (XEXP (x, 1), mode)
4905 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4906 result--;
4908 return result;
4910 case ASHIFTRT:
4911 /* Shifts by a constant add to the number of bits equal to the
4912 sign bit. */
4913 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4914 known_x, known_mode, known_ret);
4915 if (CONST_INT_P (XEXP (x, 1))
4916 && INTVAL (XEXP (x, 1)) > 0
4917 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4918 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4920 return num0;
4922 case ASHIFT:
4923 /* Left shifts destroy copies. */
4924 if (!CONST_INT_P (XEXP (x, 1))
4925 || INTVAL (XEXP (x, 1)) < 0
4926 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4927 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4928 return 1;
4930 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4931 known_x, known_mode, known_ret);
4932 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4934 case IF_THEN_ELSE:
4935 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4936 known_x, known_mode, known_ret);
4937 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4938 known_x, known_mode, known_ret);
4939 return MIN (num0, num1);
4941 case EQ: case NE: case GE: case GT: case LE: case LT:
4942 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4943 case GEU: case GTU: case LEU: case LTU:
4944 case UNORDERED: case ORDERED:
4945 /* If the constant is negative, take its 1's complement and remask.
4946 Then see how many zero bits we have. */
4947 nonzero = STORE_FLAG_VALUE;
4948 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4949 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4950 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4952 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4954 default:
4955 break;
4958 /* If we haven't been able to figure it out by one of the above rules,
4959 see if some of the high-order bits are known to be zero. If so,
4960 count those bits and return one less than that amount. If we can't
4961 safely compute the mask for this mode, always return BITWIDTH. */
4963 bitwidth = GET_MODE_PRECISION (mode);
4964 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4965 return 1;
4967 nonzero = nonzero_bits (x, mode);
4968 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4969 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4972 /* Calculate the rtx_cost of a single instruction. A return value of
4973 zero indicates an instruction pattern without a known cost. */
4976 insn_rtx_cost (rtx pat, bool speed)
4978 int i, cost;
4979 rtx set;
4981 /* Extract the single set rtx from the instruction pattern.
4982 We can't use single_set since we only have the pattern. */
4983 if (GET_CODE (pat) == SET)
4984 set = pat;
4985 else if (GET_CODE (pat) == PARALLEL)
4987 set = NULL_RTX;
4988 for (i = 0; i < XVECLEN (pat, 0); i++)
4990 rtx x = XVECEXP (pat, 0, i);
4991 if (GET_CODE (x) == SET)
4993 if (set)
4994 return 0;
4995 set = x;
4998 if (!set)
4999 return 0;
5001 else
5002 return 0;
5004 cost = set_src_cost (SET_SRC (set), speed);
5005 return cost > 0 ? cost : COSTS_N_INSNS (1);
5008 /* Given an insn INSN and condition COND, return the condition in a
5009 canonical form to simplify testing by callers. Specifically:
5011 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5012 (2) Both operands will be machine operands; (cc0) will have been replaced.
5013 (3) If an operand is a constant, it will be the second operand.
5014 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5015 for GE, GEU, and LEU.
5017 If the condition cannot be understood, or is an inequality floating-point
5018 comparison which needs to be reversed, 0 will be returned.
5020 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5022 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5023 insn used in locating the condition was found. If a replacement test
5024 of the condition is desired, it should be placed in front of that
5025 insn and we will be sure that the inputs are still valid.
5027 If WANT_REG is nonzero, we wish the condition to be relative to that
5028 register, if possible. Therefore, do not canonicalize the condition
5029 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5030 to be a compare to a CC mode register.
5032 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5033 and at INSN. */
5036 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5037 rtx_insn **earliest,
5038 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5040 enum rtx_code code;
5041 rtx_insn *prev = insn;
5042 const_rtx set;
5043 rtx tem;
5044 rtx op0, op1;
5045 int reverse_code = 0;
5046 enum machine_mode mode;
5047 basic_block bb = BLOCK_FOR_INSN (insn);
5049 code = GET_CODE (cond);
5050 mode = GET_MODE (cond);
5051 op0 = XEXP (cond, 0);
5052 op1 = XEXP (cond, 1);
5054 if (reverse)
5055 code = reversed_comparison_code (cond, insn);
5056 if (code == UNKNOWN)
5057 return 0;
5059 if (earliest)
5060 *earliest = insn;
5062 /* If we are comparing a register with zero, see if the register is set
5063 in the previous insn to a COMPARE or a comparison operation. Perform
5064 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5065 in cse.c */
5067 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5068 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5069 && op1 == CONST0_RTX (GET_MODE (op0))
5070 && op0 != want_reg)
5072 /* Set nonzero when we find something of interest. */
5073 rtx x = 0;
5075 #ifdef HAVE_cc0
5076 /* If comparison with cc0, import actual comparison from compare
5077 insn. */
5078 if (op0 == cc0_rtx)
5080 if ((prev = prev_nonnote_insn (prev)) == 0
5081 || !NONJUMP_INSN_P (prev)
5082 || (set = single_set (prev)) == 0
5083 || SET_DEST (set) != cc0_rtx)
5084 return 0;
5086 op0 = SET_SRC (set);
5087 op1 = CONST0_RTX (GET_MODE (op0));
5088 if (earliest)
5089 *earliest = prev;
5091 #endif
5093 /* If this is a COMPARE, pick up the two things being compared. */
5094 if (GET_CODE (op0) == COMPARE)
5096 op1 = XEXP (op0, 1);
5097 op0 = XEXP (op0, 0);
5098 continue;
5100 else if (!REG_P (op0))
5101 break;
5103 /* Go back to the previous insn. Stop if it is not an INSN. We also
5104 stop if it isn't a single set or if it has a REG_INC note because
5105 we don't want to bother dealing with it. */
5107 prev = prev_nonnote_nondebug_insn (prev);
5109 if (prev == 0
5110 || !NONJUMP_INSN_P (prev)
5111 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5112 /* In cfglayout mode, there do not have to be labels at the
5113 beginning of a block, or jumps at the end, so the previous
5114 conditions would not stop us when we reach bb boundary. */
5115 || BLOCK_FOR_INSN (prev) != bb)
5116 break;
5118 set = set_of (op0, prev);
5120 if (set
5121 && (GET_CODE (set) != SET
5122 || !rtx_equal_p (SET_DEST (set), op0)))
5123 break;
5125 /* If this is setting OP0, get what it sets it to if it looks
5126 relevant. */
5127 if (set)
5129 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
5130 #ifdef FLOAT_STORE_FLAG_VALUE
5131 REAL_VALUE_TYPE fsfv;
5132 #endif
5134 /* ??? We may not combine comparisons done in a CCmode with
5135 comparisons not done in a CCmode. This is to aid targets
5136 like Alpha that have an IEEE compliant EQ instruction, and
5137 a non-IEEE compliant BEQ instruction. The use of CCmode is
5138 actually artificial, simply to prevent the combination, but
5139 should not affect other platforms.
5141 However, we must allow VOIDmode comparisons to match either
5142 CCmode or non-CCmode comparison, because some ports have
5143 modeless comparisons inside branch patterns.
5145 ??? This mode check should perhaps look more like the mode check
5146 in simplify_comparison in combine. */
5147 if (((GET_MODE_CLASS (mode) == MODE_CC)
5148 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5149 && mode != VOIDmode
5150 && inner_mode != VOIDmode)
5151 break;
5152 if (GET_CODE (SET_SRC (set)) == COMPARE
5153 || (((code == NE
5154 || (code == LT
5155 && val_signbit_known_set_p (inner_mode,
5156 STORE_FLAG_VALUE))
5157 #ifdef FLOAT_STORE_FLAG_VALUE
5158 || (code == LT
5159 && SCALAR_FLOAT_MODE_P (inner_mode)
5160 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5161 REAL_VALUE_NEGATIVE (fsfv)))
5162 #endif
5164 && COMPARISON_P (SET_SRC (set))))
5165 x = SET_SRC (set);
5166 else if (((code == EQ
5167 || (code == GE
5168 && val_signbit_known_set_p (inner_mode,
5169 STORE_FLAG_VALUE))
5170 #ifdef FLOAT_STORE_FLAG_VALUE
5171 || (code == GE
5172 && SCALAR_FLOAT_MODE_P (inner_mode)
5173 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5174 REAL_VALUE_NEGATIVE (fsfv)))
5175 #endif
5177 && COMPARISON_P (SET_SRC (set)))
5179 reverse_code = 1;
5180 x = SET_SRC (set);
5182 else if ((code == EQ || code == NE)
5183 && GET_CODE (SET_SRC (set)) == XOR)
5184 /* Handle sequences like:
5186 (set op0 (xor X Y))
5187 ...(eq|ne op0 (const_int 0))...
5189 in which case:
5191 (eq op0 (const_int 0)) reduces to (eq X Y)
5192 (ne op0 (const_int 0)) reduces to (ne X Y)
5194 This is the form used by MIPS16, for example. */
5195 x = SET_SRC (set);
5196 else
5197 break;
5200 else if (reg_set_p (op0, prev))
5201 /* If this sets OP0, but not directly, we have to give up. */
5202 break;
5204 if (x)
5206 /* If the caller is expecting the condition to be valid at INSN,
5207 make sure X doesn't change before INSN. */
5208 if (valid_at_insn_p)
5209 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5210 break;
5211 if (COMPARISON_P (x))
5212 code = GET_CODE (x);
5213 if (reverse_code)
5215 code = reversed_comparison_code (x, prev);
5216 if (code == UNKNOWN)
5217 return 0;
5218 reverse_code = 0;
5221 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5222 if (earliest)
5223 *earliest = prev;
5227 /* If constant is first, put it last. */
5228 if (CONSTANT_P (op0))
5229 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5231 /* If OP0 is the result of a comparison, we weren't able to find what
5232 was really being compared, so fail. */
5233 if (!allow_cc_mode
5234 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5235 return 0;
5237 /* Canonicalize any ordered comparison with integers involving equality
5238 if we can do computations in the relevant mode and we do not
5239 overflow. */
5241 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5242 && CONST_INT_P (op1)
5243 && GET_MODE (op0) != VOIDmode
5244 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5246 HOST_WIDE_INT const_val = INTVAL (op1);
5247 unsigned HOST_WIDE_INT uconst_val = const_val;
5248 unsigned HOST_WIDE_INT max_val
5249 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5251 switch (code)
5253 case LE:
5254 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5255 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5256 break;
5258 /* When cross-compiling, const_val might be sign-extended from
5259 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5260 case GE:
5261 if ((const_val & max_val)
5262 != ((unsigned HOST_WIDE_INT) 1
5263 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5264 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5265 break;
5267 case LEU:
5268 if (uconst_val < max_val)
5269 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5270 break;
5272 case GEU:
5273 if (uconst_val != 0)
5274 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5275 break;
5277 default:
5278 break;
5282 /* Never return CC0; return zero instead. */
5283 if (CC0_P (op0))
5284 return 0;
5286 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5289 /* Given a jump insn JUMP, return the condition that will cause it to branch
5290 to its JUMP_LABEL. If the condition cannot be understood, or is an
5291 inequality floating-point comparison which needs to be reversed, 0 will
5292 be returned.
5294 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5295 insn used in locating the condition was found. If a replacement test
5296 of the condition is desired, it should be placed in front of that
5297 insn and we will be sure that the inputs are still valid. If EARLIEST
5298 is null, the returned condition will be valid at INSN.
5300 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5301 compare CC mode register.
5303 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5306 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5307 int valid_at_insn_p)
5309 rtx cond;
5310 int reverse;
5311 rtx set;
5313 /* If this is not a standard conditional jump, we can't parse it. */
5314 if (!JUMP_P (jump)
5315 || ! any_condjump_p (jump))
5316 return 0;
5317 set = pc_set (jump);
5319 cond = XEXP (SET_SRC (set), 0);
5321 /* If this branches to JUMP_LABEL when the condition is false, reverse
5322 the condition. */
5323 reverse
5324 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5325 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5327 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5328 allow_cc_mode, valid_at_insn_p);
5331 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5332 TARGET_MODE_REP_EXTENDED.
5334 Note that we assume that the property of
5335 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5336 narrower than mode B. I.e., if A is a mode narrower than B then in
5337 order to be able to operate on it in mode B, mode A needs to
5338 satisfy the requirements set by the representation of mode B. */
5340 static void
5341 init_num_sign_bit_copies_in_rep (void)
5343 enum machine_mode mode, in_mode;
5345 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5346 in_mode = GET_MODE_WIDER_MODE (mode))
5347 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5348 mode = GET_MODE_WIDER_MODE (mode))
5350 enum machine_mode i;
5352 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5353 extends to the next widest mode. */
5354 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5355 || GET_MODE_WIDER_MODE (mode) == in_mode);
5357 /* We are in in_mode. Count how many bits outside of mode
5358 have to be copies of the sign-bit. */
5359 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5361 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
5363 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5364 /* We can only check sign-bit copies starting from the
5365 top-bit. In order to be able to check the bits we
5366 have already seen we pretend that subsequent bits
5367 have to be sign-bit copies too. */
5368 || num_sign_bit_copies_in_rep [in_mode][mode])
5369 num_sign_bit_copies_in_rep [in_mode][mode]
5370 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5375 /* Suppose that truncation from the machine mode of X to MODE is not a
5376 no-op. See if there is anything special about X so that we can
5377 assume it already contains a truncated value of MODE. */
5379 bool
5380 truncated_to_mode (enum machine_mode mode, const_rtx x)
5382 /* This register has already been used in MODE without explicit
5383 truncation. */
5384 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5385 return true;
5387 /* See if we already satisfy the requirements of MODE. If yes we
5388 can just switch to MODE. */
5389 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5390 && (num_sign_bit_copies (x, GET_MODE (x))
5391 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5392 return true;
5394 return false;
5397 /* Return true if RTX code CODE has a single sequence of zero or more
5398 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5399 entry in that case. */
5401 static bool
5402 setup_reg_subrtx_bounds (unsigned int code)
5404 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5405 unsigned int i = 0;
5406 for (; format[i] != 'e'; ++i)
5408 if (!format[i])
5409 /* No subrtxes. Leave start and count as 0. */
5410 return true;
5411 if (format[i] == 'E' || format[i] == 'V')
5412 return false;
5415 /* Record the sequence of 'e's. */
5416 rtx_all_subrtx_bounds[code].start = i;
5418 ++i;
5419 while (format[i] == 'e');
5420 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5421 /* rtl-iter.h relies on this. */
5422 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5424 for (; format[i]; ++i)
5425 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5426 return false;
5428 return true;
5431 /* Initialize non_rtx_starting_operands, which is used to speed up
5432 for_each_rtx, and rtx_all_subrtx_bounds. */
5433 void
5434 init_rtlanal (void)
5436 int i;
5437 for (i = 0; i < NUM_RTX_CODE; i++)
5439 const char *format = GET_RTX_FORMAT (i);
5440 const char *first = strpbrk (format, "eEV");
5441 non_rtx_starting_operands[i] = first ? first - format : -1;
5442 if (!setup_reg_subrtx_bounds (i))
5443 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5444 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5445 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5448 init_num_sign_bit_copies_in_rep ();
5451 /* Check whether this is a constant pool constant. */
5452 bool
5453 constant_pool_constant_p (rtx x)
5455 x = avoid_constant_pool_reference (x);
5456 return CONST_DOUBLE_P (x);
5459 /* If M is a bitmask that selects a field of low-order bits within an item but
5460 not the entire word, return the length of the field. Return -1 otherwise.
5461 M is used in machine mode MODE. */
5464 low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m)
5466 if (mode != VOIDmode)
5468 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5469 return -1;
5470 m &= GET_MODE_MASK (mode);
5473 return exact_log2 (m + 1);
5476 /* Return the mode of MEM's address. */
5478 enum machine_mode
5479 get_address_mode (rtx mem)
5481 enum machine_mode mode;
5483 gcc_assert (MEM_P (mem));
5484 mode = GET_MODE (XEXP (mem, 0));
5485 if (mode != VOIDmode)
5486 return mode;
5487 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5490 /* Split up a CONST_DOUBLE or integer constant rtx
5491 into two rtx's for single words,
5492 storing in *FIRST the word that comes first in memory in the target
5493 and in *SECOND the other.
5495 TODO: This function needs to be rewritten to work on any size
5496 integer. */
5498 void
5499 split_double (rtx value, rtx *first, rtx *second)
5501 if (CONST_INT_P (value))
5503 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5505 /* In this case the CONST_INT holds both target words.
5506 Extract the bits from it into two word-sized pieces.
5507 Sign extend each half to HOST_WIDE_INT. */
5508 unsigned HOST_WIDE_INT low, high;
5509 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5510 unsigned bits_per_word = BITS_PER_WORD;
5512 /* Set sign_bit to the most significant bit of a word. */
5513 sign_bit = 1;
5514 sign_bit <<= bits_per_word - 1;
5516 /* Set mask so that all bits of the word are set. We could
5517 have used 1 << BITS_PER_WORD instead of basing the
5518 calculation on sign_bit. However, on machines where
5519 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5520 compiler warning, even though the code would never be
5521 executed. */
5522 mask = sign_bit << 1;
5523 mask--;
5525 /* Set sign_extend as any remaining bits. */
5526 sign_extend = ~mask;
5528 /* Pick the lower word and sign-extend it. */
5529 low = INTVAL (value);
5530 low &= mask;
5531 if (low & sign_bit)
5532 low |= sign_extend;
5534 /* Pick the higher word, shifted to the least significant
5535 bits, and sign-extend it. */
5536 high = INTVAL (value);
5537 high >>= bits_per_word - 1;
5538 high >>= 1;
5539 high &= mask;
5540 if (high & sign_bit)
5541 high |= sign_extend;
5543 /* Store the words in the target machine order. */
5544 if (WORDS_BIG_ENDIAN)
5546 *first = GEN_INT (high);
5547 *second = GEN_INT (low);
5549 else
5551 *first = GEN_INT (low);
5552 *second = GEN_INT (high);
5555 else
5557 /* The rule for using CONST_INT for a wider mode
5558 is that we regard the value as signed.
5559 So sign-extend it. */
5560 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5561 if (WORDS_BIG_ENDIAN)
5563 *first = high;
5564 *second = value;
5566 else
5568 *first = value;
5569 *second = high;
5573 else if (GET_CODE (value) == CONST_WIDE_INT)
5575 /* All of this is scary code and needs to be converted to
5576 properly work with any size integer. */
5577 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5578 if (WORDS_BIG_ENDIAN)
5580 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5581 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5583 else
5585 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5586 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5589 else if (!CONST_DOUBLE_P (value))
5591 if (WORDS_BIG_ENDIAN)
5593 *first = const0_rtx;
5594 *second = value;
5596 else
5598 *first = value;
5599 *second = const0_rtx;
5602 else if (GET_MODE (value) == VOIDmode
5603 /* This is the old way we did CONST_DOUBLE integers. */
5604 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5606 /* In an integer, the words are defined as most and least significant.
5607 So order them by the target's convention. */
5608 if (WORDS_BIG_ENDIAN)
5610 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5611 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5613 else
5615 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5616 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5619 else
5621 REAL_VALUE_TYPE r;
5622 long l[2];
5623 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5625 /* Note, this converts the REAL_VALUE_TYPE to the target's
5626 format, splits up the floating point double and outputs
5627 exactly 32 bits of it into each of l[0] and l[1] --
5628 not necessarily BITS_PER_WORD bits. */
5629 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5631 /* If 32 bits is an entire word for the target, but not for the host,
5632 then sign-extend on the host so that the number will look the same
5633 way on the host that it would on the target. See for instance
5634 simplify_unary_operation. The #if is needed to avoid compiler
5635 warnings. */
5637 #if HOST_BITS_PER_LONG > 32
5638 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5640 if (l[0] & ((long) 1 << 31))
5641 l[0] |= ((long) (-1) << 32);
5642 if (l[1] & ((long) 1 << 31))
5643 l[1] |= ((long) (-1) << 32);
5645 #endif
5647 *first = GEN_INT (l[0]);
5648 *second = GEN_INT (l[1]);
5652 /* Return true if X is a sign_extract or zero_extract from the least
5653 significant bit. */
5655 static bool
5656 lsb_bitfield_op_p (rtx x)
5658 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5660 enum machine_mode mode = GET_MODE (XEXP (x, 0));
5661 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5662 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5664 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5666 return false;
5669 /* Strip outer address "mutations" from LOC and return a pointer to the
5670 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5671 stripped expression there.
5673 "Mutations" either convert between modes or apply some kind of
5674 extension, truncation or alignment. */
5676 rtx *
5677 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5679 for (;;)
5681 enum rtx_code code = GET_CODE (*loc);
5682 if (GET_RTX_CLASS (code) == RTX_UNARY)
5683 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5684 used to convert between pointer sizes. */
5685 loc = &XEXP (*loc, 0);
5686 else if (lsb_bitfield_op_p (*loc))
5687 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5688 acts as a combined truncation and extension. */
5689 loc = &XEXP (*loc, 0);
5690 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5691 /* (and ... (const_int -X)) is used to align to X bytes. */
5692 loc = &XEXP (*loc, 0);
5693 else if (code == SUBREG
5694 && !OBJECT_P (SUBREG_REG (*loc))
5695 && subreg_lowpart_p (*loc))
5696 /* (subreg (operator ...) ...) inside and is used for mode
5697 conversion too. */
5698 loc = &SUBREG_REG (*loc);
5699 else
5700 return loc;
5701 if (outer_code)
5702 *outer_code = code;
5706 /* Return true if CODE applies some kind of scale. The scaled value is
5707 is the first operand and the scale is the second. */
5709 static bool
5710 binary_scale_code_p (enum rtx_code code)
5712 return (code == MULT
5713 || code == ASHIFT
5714 /* Needed by ARM targets. */
5715 || code == ASHIFTRT
5716 || code == LSHIFTRT
5717 || code == ROTATE
5718 || code == ROTATERT);
5721 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5722 (see address_info). Return null otherwise. */
5724 static rtx *
5725 get_base_term (rtx *inner)
5727 if (GET_CODE (*inner) == LO_SUM)
5728 inner = strip_address_mutations (&XEXP (*inner, 0));
5729 if (REG_P (*inner)
5730 || MEM_P (*inner)
5731 || GET_CODE (*inner) == SUBREG)
5732 return inner;
5733 return 0;
5736 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5737 (see address_info). Return null otherwise. */
5739 static rtx *
5740 get_index_term (rtx *inner)
5742 /* At present, only constant scales are allowed. */
5743 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5744 inner = strip_address_mutations (&XEXP (*inner, 0));
5745 if (REG_P (*inner)
5746 || MEM_P (*inner)
5747 || GET_CODE (*inner) == SUBREG)
5748 return inner;
5749 return 0;
5752 /* Set the segment part of address INFO to LOC, given that INNER is the
5753 unmutated value. */
5755 static void
5756 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5758 gcc_assert (!info->segment);
5759 info->segment = loc;
5760 info->segment_term = inner;
5763 /* Set the base part of address INFO to LOC, given that INNER is the
5764 unmutated value. */
5766 static void
5767 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5769 gcc_assert (!info->base);
5770 info->base = loc;
5771 info->base_term = inner;
5774 /* Set the index part of address INFO to LOC, given that INNER is the
5775 unmutated value. */
5777 static void
5778 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5780 gcc_assert (!info->index);
5781 info->index = loc;
5782 info->index_term = inner;
5785 /* Set the displacement part of address INFO to LOC, given that INNER
5786 is the constant term. */
5788 static void
5789 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5791 gcc_assert (!info->disp);
5792 info->disp = loc;
5793 info->disp_term = inner;
5796 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5797 rest of INFO accordingly. */
5799 static void
5800 decompose_incdec_address (struct address_info *info)
5802 info->autoinc_p = true;
5804 rtx *base = &XEXP (*info->inner, 0);
5805 set_address_base (info, base, base);
5806 gcc_checking_assert (info->base == info->base_term);
5808 /* These addresses are only valid when the size of the addressed
5809 value is known. */
5810 gcc_checking_assert (info->mode != VOIDmode);
5813 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5814 of INFO accordingly. */
5816 static void
5817 decompose_automod_address (struct address_info *info)
5819 info->autoinc_p = true;
5821 rtx *base = &XEXP (*info->inner, 0);
5822 set_address_base (info, base, base);
5823 gcc_checking_assert (info->base == info->base_term);
5825 rtx plus = XEXP (*info->inner, 1);
5826 gcc_assert (GET_CODE (plus) == PLUS);
5828 info->base_term2 = &XEXP (plus, 0);
5829 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5831 rtx *step = &XEXP (plus, 1);
5832 rtx *inner_step = strip_address_mutations (step);
5833 if (CONSTANT_P (*inner_step))
5834 set_address_disp (info, step, inner_step);
5835 else
5836 set_address_index (info, step, inner_step);
5839 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5840 values in [PTR, END). Return a pointer to the end of the used array. */
5842 static rtx **
5843 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5845 rtx x = *loc;
5846 if (GET_CODE (x) == PLUS)
5848 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5849 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5851 else
5853 gcc_assert (ptr != end);
5854 *ptr++ = loc;
5856 return ptr;
5859 /* Evaluate the likelihood of X being a base or index value, returning
5860 positive if it is likely to be a base, negative if it is likely to be
5861 an index, and 0 if we can't tell. Make the magnitude of the return
5862 value reflect the amount of confidence we have in the answer.
5864 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5866 static int
5867 baseness (rtx x, enum machine_mode mode, addr_space_t as,
5868 enum rtx_code outer_code, enum rtx_code index_code)
5870 /* Believe *_POINTER unless the address shape requires otherwise. */
5871 if (REG_P (x) && REG_POINTER (x))
5872 return 2;
5873 if (MEM_P (x) && MEM_POINTER (x))
5874 return 2;
5876 if (REG_P (x) && HARD_REGISTER_P (x))
5878 /* X is a hard register. If it only fits one of the base
5879 or index classes, choose that interpretation. */
5880 int regno = REGNO (x);
5881 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5882 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5883 if (base_p != index_p)
5884 return base_p ? 1 : -1;
5886 return 0;
5889 /* INFO->INNER describes a normal, non-automodified address.
5890 Fill in the rest of INFO accordingly. */
5892 static void
5893 decompose_normal_address (struct address_info *info)
5895 /* Treat the address as the sum of up to four values. */
5896 rtx *ops[4];
5897 size_t n_ops = extract_plus_operands (info->inner, ops,
5898 ops + ARRAY_SIZE (ops)) - ops;
5900 /* If there is more than one component, any base component is in a PLUS. */
5901 if (n_ops > 1)
5902 info->base_outer_code = PLUS;
5904 /* Try to classify each sum operand now. Leave those that could be
5905 either a base or an index in OPS. */
5906 rtx *inner_ops[4];
5907 size_t out = 0;
5908 for (size_t in = 0; in < n_ops; ++in)
5910 rtx *loc = ops[in];
5911 rtx *inner = strip_address_mutations (loc);
5912 if (CONSTANT_P (*inner))
5913 set_address_disp (info, loc, inner);
5914 else if (GET_CODE (*inner) == UNSPEC)
5915 set_address_segment (info, loc, inner);
5916 else
5918 /* The only other possibilities are a base or an index. */
5919 rtx *base_term = get_base_term (inner);
5920 rtx *index_term = get_index_term (inner);
5921 gcc_assert (base_term || index_term);
5922 if (!base_term)
5923 set_address_index (info, loc, index_term);
5924 else if (!index_term)
5925 set_address_base (info, loc, base_term);
5926 else
5928 gcc_assert (base_term == index_term);
5929 ops[out] = loc;
5930 inner_ops[out] = base_term;
5931 ++out;
5936 /* Classify the remaining OPS members as bases and indexes. */
5937 if (out == 1)
5939 /* If we haven't seen a base or an index yet, assume that this is
5940 the base. If we were confident that another term was the base
5941 or index, treat the remaining operand as the other kind. */
5942 if (!info->base)
5943 set_address_base (info, ops[0], inner_ops[0]);
5944 else
5945 set_address_index (info, ops[0], inner_ops[0]);
5947 else if (out == 2)
5949 /* In the event of a tie, assume the base comes first. */
5950 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5951 GET_CODE (*ops[1]))
5952 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5953 GET_CODE (*ops[0])))
5955 set_address_base (info, ops[0], inner_ops[0]);
5956 set_address_index (info, ops[1], inner_ops[1]);
5958 else
5960 set_address_base (info, ops[1], inner_ops[1]);
5961 set_address_index (info, ops[0], inner_ops[0]);
5964 else
5965 gcc_assert (out == 0);
5968 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5969 or VOIDmode if not known. AS is the address space associated with LOC.
5970 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5972 void
5973 decompose_address (struct address_info *info, rtx *loc, enum machine_mode mode,
5974 addr_space_t as, enum rtx_code outer_code)
5976 memset (info, 0, sizeof (*info));
5977 info->mode = mode;
5978 info->as = as;
5979 info->addr_outer_code = outer_code;
5980 info->outer = loc;
5981 info->inner = strip_address_mutations (loc, &outer_code);
5982 info->base_outer_code = outer_code;
5983 switch (GET_CODE (*info->inner))
5985 case PRE_DEC:
5986 case PRE_INC:
5987 case POST_DEC:
5988 case POST_INC:
5989 decompose_incdec_address (info);
5990 break;
5992 case PRE_MODIFY:
5993 case POST_MODIFY:
5994 decompose_automod_address (info);
5995 break;
5997 default:
5998 decompose_normal_address (info);
5999 break;
6003 /* Describe address operand LOC in INFO. */
6005 void
6006 decompose_lea_address (struct address_info *info, rtx *loc)
6008 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6011 /* Describe the address of MEM X in INFO. */
6013 void
6014 decompose_mem_address (struct address_info *info, rtx x)
6016 gcc_assert (MEM_P (x));
6017 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6018 MEM_ADDR_SPACE (x), MEM);
6021 /* Update INFO after a change to the address it describes. */
6023 void
6024 update_address (struct address_info *info)
6026 decompose_address (info, info->outer, info->mode, info->as,
6027 info->addr_outer_code);
6030 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6031 more complicated than that. */
6033 HOST_WIDE_INT
6034 get_index_scale (const struct address_info *info)
6036 rtx index = *info->index;
6037 if (GET_CODE (index) == MULT
6038 && CONST_INT_P (XEXP (index, 1))
6039 && info->index_term == &XEXP (index, 0))
6040 return INTVAL (XEXP (index, 1));
6042 if (GET_CODE (index) == ASHIFT
6043 && CONST_INT_P (XEXP (index, 1))
6044 && info->index_term == &XEXP (index, 0))
6045 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6047 if (info->index == info->index_term)
6048 return 1;
6050 return 0;
6053 /* Return the "index code" of INFO, in the form required by
6054 ok_for_base_p_1. */
6056 enum rtx_code
6057 get_index_code (const struct address_info *info)
6059 if (info->index)
6060 return GET_CODE (*info->index);
6062 if (info->disp)
6063 return GET_CODE (*info->disp);
6065 return SCRATCH;
6068 /* Return true if X contains a thread-local symbol. */
6070 bool
6071 tls_referenced_p (const_rtx x)
6073 if (!targetm.have_tls)
6074 return false;
6076 subrtx_iterator::array_type array;
6077 FOR_EACH_SUBRTX (iter, array, x, ALL)
6078 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6079 return true;
6080 return false;