PR testsuite/66621
[official-gcc.git] / gcc / rtlanal.c
blobd4a9eda26297b374b6bea7ff9d2f38e77873d029
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "predict.h"
37 #include "basic-block.h"
38 #include "df.h"
39 #include "symtab.h"
40 #include "tree.h"
41 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 #include "addresses.h"
43 #include "rtl-iter.h"
45 /* Forward declarations */
46 static void set_of_1 (rtx, const_rtx, void *);
47 static bool covers_regno_p (const_rtx, unsigned int);
48 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
49 static int computed_jump_p_1 (const_rtx);
50 static void parms_set (rtx, const_rtx, void *);
52 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
53 const_rtx, machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
56 const_rtx, machine_mode,
57 unsigned HOST_WIDE_INT);
58 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
59 machine_mode,
60 unsigned int);
61 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
62 machine_mode, unsigned int);
64 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
65 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
67 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
68 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
69 SIGN_EXTEND then while narrowing we also have to enforce the
70 representation and sign-extend the value to mode DESTINATION_REP.
72 If the value is already sign-extended to DESTINATION_REP mode we
73 can just switch to DESTINATION mode on it. For each pair of
74 integral modes SOURCE and DESTINATION, when truncating from SOURCE
75 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
76 contains the number of high-order bits in SOURCE that have to be
77 copies of the sign-bit so that we can do this mode-switch to
78 DESTINATION. */
80 static unsigned int
81 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
83 /* Store X into index I of ARRAY. ARRAY is known to have at least I
84 elements. Return the new base of ARRAY. */
86 template <typename T>
87 typename T::value_type *
88 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
89 value_type *base,
90 size_t i, value_type x)
92 if (base == array.stack)
94 if (i < LOCAL_ELEMS)
96 base[i] = x;
97 return base;
99 gcc_checking_assert (i == LOCAL_ELEMS);
100 /* A previous iteration might also have moved from the stack to the
101 heap, in which case the heap array will already be big enough. */
102 if (vec_safe_length (array.heap) <= i)
103 vec_safe_grow (array.heap, i + 1);
104 base = array.heap->address ();
105 memcpy (base, array.stack, sizeof (array.stack));
106 base[LOCAL_ELEMS] = x;
107 return base;
109 unsigned int length = array.heap->length ();
110 if (length > i)
112 gcc_checking_assert (base == array.heap->address ());
113 base[i] = x;
114 return base;
116 else
118 gcc_checking_assert (i == length);
119 vec_safe_push (array.heap, x);
120 return array.heap->address ();
124 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
125 number of elements added to the worklist. */
127 template <typename T>
128 size_t
129 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
130 value_type *base,
131 size_t end, rtx_type x)
133 enum rtx_code code = GET_CODE (x);
134 const char *format = GET_RTX_FORMAT (code);
135 size_t orig_end = end;
136 if (__builtin_expect (INSN_P (x), false))
138 /* Put the pattern at the top of the queue, since that's what
139 we're likely to want most. It also allows for the SEQUENCE
140 code below. */
141 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
142 if (format[i] == 'e')
144 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
145 if (__builtin_expect (end < LOCAL_ELEMS, true))
146 base[end++] = subx;
147 else
148 base = add_single_to_queue (array, base, end++, subx);
151 else
152 for (int i = 0; format[i]; ++i)
153 if (format[i] == 'e')
155 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
156 if (__builtin_expect (end < LOCAL_ELEMS, true))
157 base[end++] = subx;
158 else
159 base = add_single_to_queue (array, base, end++, subx);
161 else if (format[i] == 'E')
163 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
164 rtx *vec = x->u.fld[i].rt_rtvec->elem;
165 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
166 for (unsigned int j = 0; j < length; j++)
167 base[end++] = T::get_value (vec[j]);
168 else
169 for (unsigned int j = 0; j < length; j++)
170 base = add_single_to_queue (array, base, end++,
171 T::get_value (vec[j]));
172 if (code == SEQUENCE && end == length)
173 /* If the subrtxes of the sequence fill the entire array then
174 we know that no other parts of a containing insn are queued.
175 The caller is therefore iterating over the sequence as a
176 PATTERN (...), so we also want the patterns of the
177 subinstructions. */
178 for (unsigned int j = 0; j < length; j++)
180 typename T::rtx_type x = T::get_rtx (base[j]);
181 if (INSN_P (x))
182 base[j] = T::get_value (PATTERN (x));
185 return end - orig_end;
188 template <typename T>
189 void
190 generic_subrtx_iterator <T>::free_array (array_type &array)
192 vec_free (array.heap);
195 template <typename T>
196 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
198 template class generic_subrtx_iterator <const_rtx_accessor>;
199 template class generic_subrtx_iterator <rtx_var_accessor>;
200 template class generic_subrtx_iterator <rtx_ptr_accessor>;
202 /* Return 1 if the value of X is unstable
203 (would be different at a different point in the program).
204 The frame pointer, arg pointer, etc. are considered stable
205 (within one function) and so is anything marked `unchanging'. */
208 rtx_unstable_p (const_rtx x)
210 const RTX_CODE code = GET_CODE (x);
211 int i;
212 const char *fmt;
214 switch (code)
216 case MEM:
217 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
219 case CONST:
220 CASE_CONST_ANY:
221 case SYMBOL_REF:
222 case LABEL_REF:
223 return 0;
225 case REG:
226 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
227 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
228 /* The arg pointer varies if it is not a fixed register. */
229 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
230 return 0;
231 /* ??? When call-clobbered, the value is stable modulo the restore
232 that must happen after a call. This currently screws up local-alloc
233 into believing that the restore is not needed. */
234 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
235 return 0;
236 return 1;
238 case ASM_OPERANDS:
239 if (MEM_VOLATILE_P (x))
240 return 1;
242 /* Fall through. */
244 default:
245 break;
248 fmt = GET_RTX_FORMAT (code);
249 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
250 if (fmt[i] == 'e')
252 if (rtx_unstable_p (XEXP (x, i)))
253 return 1;
255 else if (fmt[i] == 'E')
257 int j;
258 for (j = 0; j < XVECLEN (x, i); j++)
259 if (rtx_unstable_p (XVECEXP (x, i, j)))
260 return 1;
263 return 0;
266 /* Return 1 if X has a value that can vary even between two
267 executions of the program. 0 means X can be compared reliably
268 against certain constants or near-constants.
269 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
270 zero, we are slightly more conservative.
271 The frame pointer and the arg pointer are considered constant. */
273 bool
274 rtx_varies_p (const_rtx x, bool for_alias)
276 RTX_CODE code;
277 int i;
278 const char *fmt;
280 if (!x)
281 return 0;
283 code = GET_CODE (x);
284 switch (code)
286 case MEM:
287 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
289 case CONST:
290 CASE_CONST_ANY:
291 case SYMBOL_REF:
292 case LABEL_REF:
293 return 0;
295 case REG:
296 /* Note that we have to test for the actual rtx used for the frame
297 and arg pointers and not just the register number in case we have
298 eliminated the frame and/or arg pointer and are using it
299 for pseudos. */
300 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
301 /* The arg pointer varies if it is not a fixed register. */
302 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
303 return 0;
304 if (x == pic_offset_table_rtx
305 /* ??? When call-clobbered, the value is stable modulo the restore
306 that must happen after a call. This currently screws up
307 local-alloc into believing that the restore is not needed, so we
308 must return 0 only if we are called from alias analysis. */
309 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
310 return 0;
311 return 1;
313 case LO_SUM:
314 /* The operand 0 of a LO_SUM is considered constant
315 (in fact it is related specifically to operand 1)
316 during alias analysis. */
317 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
318 || rtx_varies_p (XEXP (x, 1), for_alias);
320 case ASM_OPERANDS:
321 if (MEM_VOLATILE_P (x))
322 return 1;
324 /* Fall through. */
326 default:
327 break;
330 fmt = GET_RTX_FORMAT (code);
331 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
332 if (fmt[i] == 'e')
334 if (rtx_varies_p (XEXP (x, i), for_alias))
335 return 1;
337 else if (fmt[i] == 'E')
339 int j;
340 for (j = 0; j < XVECLEN (x, i); j++)
341 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
342 return 1;
345 return 0;
348 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
349 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
350 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
351 references on strict alignment machines. */
353 static int
354 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
355 machine_mode mode, bool unaligned_mems)
357 enum rtx_code code = GET_CODE (x);
359 /* The offset must be a multiple of the mode size if we are considering
360 unaligned memory references on strict alignment machines. */
361 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
363 HOST_WIDE_INT actual_offset = offset;
365 #ifdef SPARC_STACK_BOUNDARY_HACK
366 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
367 the real alignment of %sp. However, when it does this, the
368 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
369 if (SPARC_STACK_BOUNDARY_HACK
370 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
371 actual_offset -= STACK_POINTER_OFFSET;
372 #endif
374 if (actual_offset % GET_MODE_SIZE (mode) != 0)
375 return 1;
378 switch (code)
380 case SYMBOL_REF:
381 if (SYMBOL_REF_WEAK (x))
382 return 1;
383 if (!CONSTANT_POOL_ADDRESS_P (x))
385 tree decl;
386 HOST_WIDE_INT decl_size;
388 if (offset < 0)
389 return 1;
390 if (size == 0)
391 size = GET_MODE_SIZE (mode);
392 if (size == 0)
393 return offset != 0;
395 /* If the size of the access or of the symbol is unknown,
396 assume the worst. */
397 decl = SYMBOL_REF_DECL (x);
399 /* Else check that the access is in bounds. TODO: restructure
400 expr_size/tree_expr_size/int_expr_size and just use the latter. */
401 if (!decl)
402 decl_size = -1;
403 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
404 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
405 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
406 : -1);
407 else if (TREE_CODE (decl) == STRING_CST)
408 decl_size = TREE_STRING_LENGTH (decl);
409 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
410 decl_size = int_size_in_bytes (TREE_TYPE (decl));
411 else
412 decl_size = -1;
414 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
417 return 0;
419 case LABEL_REF:
420 return 0;
422 case REG:
423 /* Stack references are assumed not to trap, but we need to deal with
424 nonsensical offsets. */
425 if (x == frame_pointer_rtx)
427 HOST_WIDE_INT adj_offset = offset - STARTING_FRAME_OFFSET;
428 if (size == 0)
429 size = GET_MODE_SIZE (mode);
430 if (FRAME_GROWS_DOWNWARD)
432 if (adj_offset < frame_offset || adj_offset + size - 1 >= 0)
433 return 1;
435 else
437 if (adj_offset < 0 || adj_offset + size - 1 >= frame_offset)
438 return 1;
440 return 0;
442 /* ??? Need to add a similar guard for nonsensical offsets. */
443 if (x == hard_frame_pointer_rtx
444 || x == stack_pointer_rtx
445 /* The arg pointer varies if it is not a fixed register. */
446 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
447 return 0;
448 /* All of the virtual frame registers are stack references. */
449 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
450 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
451 return 0;
452 return 1;
454 case CONST:
455 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
456 mode, unaligned_mems);
458 case PLUS:
459 /* An address is assumed not to trap if:
460 - it is the pic register plus a constant. */
461 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
462 return 0;
464 /* - or it is an address that can't trap plus a constant integer. */
465 if (CONST_INT_P (XEXP (x, 1))
466 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
467 size, mode, unaligned_mems))
468 return 0;
470 return 1;
472 case LO_SUM:
473 case PRE_MODIFY:
474 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
475 mode, unaligned_mems);
477 case PRE_DEC:
478 case PRE_INC:
479 case POST_DEC:
480 case POST_INC:
481 case POST_MODIFY:
482 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
483 mode, unaligned_mems);
485 default:
486 break;
489 /* If it isn't one of the case above, it can cause a trap. */
490 return 1;
493 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
496 rtx_addr_can_trap_p (const_rtx x)
498 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
501 /* Return true if X is an address that is known to not be zero. */
503 bool
504 nonzero_address_p (const_rtx x)
506 const enum rtx_code code = GET_CODE (x);
508 switch (code)
510 case SYMBOL_REF:
511 return !SYMBOL_REF_WEAK (x);
513 case LABEL_REF:
514 return true;
516 case REG:
517 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
518 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
519 || x == stack_pointer_rtx
520 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
521 return true;
522 /* All of the virtual frame registers are stack references. */
523 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
524 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
525 return true;
526 return false;
528 case CONST:
529 return nonzero_address_p (XEXP (x, 0));
531 case PLUS:
532 /* Handle PIC references. */
533 if (XEXP (x, 0) == pic_offset_table_rtx
534 && CONSTANT_P (XEXP (x, 1)))
535 return true;
536 return false;
538 case PRE_MODIFY:
539 /* Similar to the above; allow positive offsets. Further, since
540 auto-inc is only allowed in memories, the register must be a
541 pointer. */
542 if (CONST_INT_P (XEXP (x, 1))
543 && INTVAL (XEXP (x, 1)) > 0)
544 return true;
545 return nonzero_address_p (XEXP (x, 0));
547 case PRE_INC:
548 /* Similarly. Further, the offset is always positive. */
549 return true;
551 case PRE_DEC:
552 case POST_DEC:
553 case POST_INC:
554 case POST_MODIFY:
555 return nonzero_address_p (XEXP (x, 0));
557 case LO_SUM:
558 return nonzero_address_p (XEXP (x, 1));
560 default:
561 break;
564 /* If it isn't one of the case above, might be zero. */
565 return false;
568 /* Return 1 if X refers to a memory location whose address
569 cannot be compared reliably with constant addresses,
570 or if X refers to a BLKmode memory object.
571 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
572 zero, we are slightly more conservative. */
574 bool
575 rtx_addr_varies_p (const_rtx x, bool for_alias)
577 enum rtx_code code;
578 int i;
579 const char *fmt;
581 if (x == 0)
582 return 0;
584 code = GET_CODE (x);
585 if (code == MEM)
586 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
588 fmt = GET_RTX_FORMAT (code);
589 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
590 if (fmt[i] == 'e')
592 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
593 return 1;
595 else if (fmt[i] == 'E')
597 int j;
598 for (j = 0; j < XVECLEN (x, i); j++)
599 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
600 return 1;
602 return 0;
605 /* Return the CALL in X if there is one. */
608 get_call_rtx_from (rtx x)
610 if (INSN_P (x))
611 x = PATTERN (x);
612 if (GET_CODE (x) == PARALLEL)
613 x = XVECEXP (x, 0, 0);
614 if (GET_CODE (x) == SET)
615 x = SET_SRC (x);
616 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
617 return x;
618 return NULL_RTX;
621 /* Return the value of the integer term in X, if one is apparent;
622 otherwise return 0.
623 Only obvious integer terms are detected.
624 This is used in cse.c with the `related_value' field. */
626 HOST_WIDE_INT
627 get_integer_term (const_rtx x)
629 if (GET_CODE (x) == CONST)
630 x = XEXP (x, 0);
632 if (GET_CODE (x) == MINUS
633 && CONST_INT_P (XEXP (x, 1)))
634 return - INTVAL (XEXP (x, 1));
635 if (GET_CODE (x) == PLUS
636 && CONST_INT_P (XEXP (x, 1)))
637 return INTVAL (XEXP (x, 1));
638 return 0;
641 /* If X is a constant, return the value sans apparent integer term;
642 otherwise return 0.
643 Only obvious integer terms are detected. */
646 get_related_value (const_rtx x)
648 if (GET_CODE (x) != CONST)
649 return 0;
650 x = XEXP (x, 0);
651 if (GET_CODE (x) == PLUS
652 && CONST_INT_P (XEXP (x, 1)))
653 return XEXP (x, 0);
654 else if (GET_CODE (x) == MINUS
655 && CONST_INT_P (XEXP (x, 1)))
656 return XEXP (x, 0);
657 return 0;
660 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
661 to somewhere in the same object or object_block as SYMBOL. */
663 bool
664 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
666 tree decl;
668 if (GET_CODE (symbol) != SYMBOL_REF)
669 return false;
671 if (offset == 0)
672 return true;
674 if (offset > 0)
676 if (CONSTANT_POOL_ADDRESS_P (symbol)
677 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
678 return true;
680 decl = SYMBOL_REF_DECL (symbol);
681 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
682 return true;
685 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
686 && SYMBOL_REF_BLOCK (symbol)
687 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
688 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
689 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
690 return true;
692 return false;
695 /* Split X into a base and a constant offset, storing them in *BASE_OUT
696 and *OFFSET_OUT respectively. */
698 void
699 split_const (rtx x, rtx *base_out, rtx *offset_out)
701 if (GET_CODE (x) == CONST)
703 x = XEXP (x, 0);
704 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
706 *base_out = XEXP (x, 0);
707 *offset_out = XEXP (x, 1);
708 return;
711 *base_out = x;
712 *offset_out = const0_rtx;
715 /* Return the number of places FIND appears within X. If COUNT_DEST is
716 zero, we do not count occurrences inside the destination of a SET. */
719 count_occurrences (const_rtx x, const_rtx find, int count_dest)
721 int i, j;
722 enum rtx_code code;
723 const char *format_ptr;
724 int count;
726 if (x == find)
727 return 1;
729 code = GET_CODE (x);
731 switch (code)
733 case REG:
734 CASE_CONST_ANY:
735 case SYMBOL_REF:
736 case CODE_LABEL:
737 case PC:
738 case CC0:
739 return 0;
741 case EXPR_LIST:
742 count = count_occurrences (XEXP (x, 0), find, count_dest);
743 if (XEXP (x, 1))
744 count += count_occurrences (XEXP (x, 1), find, count_dest);
745 return count;
747 case MEM:
748 if (MEM_P (find) && rtx_equal_p (x, find))
749 return 1;
750 break;
752 case SET:
753 if (SET_DEST (x) == find && ! count_dest)
754 return count_occurrences (SET_SRC (x), find, count_dest);
755 break;
757 default:
758 break;
761 format_ptr = GET_RTX_FORMAT (code);
762 count = 0;
764 for (i = 0; i < GET_RTX_LENGTH (code); i++)
766 switch (*format_ptr++)
768 case 'e':
769 count += count_occurrences (XEXP (x, i), find, count_dest);
770 break;
772 case 'E':
773 for (j = 0; j < XVECLEN (x, i); j++)
774 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
775 break;
778 return count;
782 /* Return TRUE if OP is a register or subreg of a register that
783 holds an unsigned quantity. Otherwise, return FALSE. */
785 bool
786 unsigned_reg_p (rtx op)
788 if (REG_P (op)
789 && REG_EXPR (op)
790 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
791 return true;
793 if (GET_CODE (op) == SUBREG
794 && SUBREG_PROMOTED_SIGN (op))
795 return true;
797 return false;
801 /* Nonzero if register REG appears somewhere within IN.
802 Also works if REG is not a register; in this case it checks
803 for a subexpression of IN that is Lisp "equal" to REG. */
806 reg_mentioned_p (const_rtx reg, const_rtx in)
808 const char *fmt;
809 int i;
810 enum rtx_code code;
812 if (in == 0)
813 return 0;
815 if (reg == in)
816 return 1;
818 if (GET_CODE (in) == LABEL_REF)
819 return reg == LABEL_REF_LABEL (in);
821 code = GET_CODE (in);
823 switch (code)
825 /* Compare registers by number. */
826 case REG:
827 return REG_P (reg) && REGNO (in) == REGNO (reg);
829 /* These codes have no constituent expressions
830 and are unique. */
831 case SCRATCH:
832 case CC0:
833 case PC:
834 return 0;
836 CASE_CONST_ANY:
837 /* These are kept unique for a given value. */
838 return 0;
840 default:
841 break;
844 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
845 return 1;
847 fmt = GET_RTX_FORMAT (code);
849 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
851 if (fmt[i] == 'E')
853 int j;
854 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
855 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
856 return 1;
858 else if (fmt[i] == 'e'
859 && reg_mentioned_p (reg, XEXP (in, i)))
860 return 1;
862 return 0;
865 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
866 no CODE_LABEL insn. */
869 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
871 rtx_insn *p;
872 if (beg == end)
873 return 0;
874 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
875 if (LABEL_P (p))
876 return 0;
877 return 1;
880 /* Nonzero if register REG is used in an insn between
881 FROM_INSN and TO_INSN (exclusive of those two). */
884 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
885 const rtx_insn *to_insn)
887 rtx_insn *insn;
889 if (from_insn == to_insn)
890 return 0;
892 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
893 if (NONDEBUG_INSN_P (insn)
894 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
895 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
896 return 1;
897 return 0;
900 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
901 is entirely replaced by a new value and the only use is as a SET_DEST,
902 we do not consider it a reference. */
905 reg_referenced_p (const_rtx x, const_rtx body)
907 int i;
909 switch (GET_CODE (body))
911 case SET:
912 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
913 return 1;
915 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
916 of a REG that occupies all of the REG, the insn references X if
917 it is mentioned in the destination. */
918 if (GET_CODE (SET_DEST (body)) != CC0
919 && GET_CODE (SET_DEST (body)) != PC
920 && !REG_P (SET_DEST (body))
921 && ! (GET_CODE (SET_DEST (body)) == SUBREG
922 && REG_P (SUBREG_REG (SET_DEST (body)))
923 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
924 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
925 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
926 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
927 && reg_overlap_mentioned_p (x, SET_DEST (body)))
928 return 1;
929 return 0;
931 case ASM_OPERANDS:
932 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
933 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
934 return 1;
935 return 0;
937 case CALL:
938 case USE:
939 case IF_THEN_ELSE:
940 return reg_overlap_mentioned_p (x, body);
942 case TRAP_IF:
943 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
945 case PREFETCH:
946 return reg_overlap_mentioned_p (x, XEXP (body, 0));
948 case UNSPEC:
949 case UNSPEC_VOLATILE:
950 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
951 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
952 return 1;
953 return 0;
955 case PARALLEL:
956 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
957 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
958 return 1;
959 return 0;
961 case CLOBBER:
962 if (MEM_P (XEXP (body, 0)))
963 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
964 return 1;
965 return 0;
967 case COND_EXEC:
968 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
969 return 1;
970 return reg_referenced_p (x, COND_EXEC_CODE (body));
972 default:
973 return 0;
977 /* Nonzero if register REG is set or clobbered in an insn between
978 FROM_INSN and TO_INSN (exclusive of those two). */
981 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
982 const rtx_insn *to_insn)
984 const rtx_insn *insn;
986 if (from_insn == to_insn)
987 return 0;
989 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
990 if (INSN_P (insn) && reg_set_p (reg, insn))
991 return 1;
992 return 0;
995 /* Internals of reg_set_between_p. */
997 reg_set_p (const_rtx reg, const_rtx insn)
999 /* After delay slot handling, call and branch insns might be in a
1000 sequence. Check all the elements there. */
1001 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1003 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1004 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1005 return true;
1007 return false;
1010 /* We can be passed an insn or part of one. If we are passed an insn,
1011 check if a side-effect of the insn clobbers REG. */
1012 if (INSN_P (insn)
1013 && (FIND_REG_INC_NOTE (insn, reg)
1014 || (CALL_P (insn)
1015 && ((REG_P (reg)
1016 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1017 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1018 GET_MODE (reg), REGNO (reg)))
1019 || MEM_P (reg)
1020 || find_reg_fusage (insn, CLOBBER, reg)))))
1021 return true;
1023 return set_of (reg, insn) != NULL_RTX;
1026 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1027 only if none of them are modified between START and END. Return 1 if
1028 X contains a MEM; this routine does use memory aliasing. */
1031 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1033 const enum rtx_code code = GET_CODE (x);
1034 const char *fmt;
1035 int i, j;
1036 rtx_insn *insn;
1038 if (start == end)
1039 return 0;
1041 switch (code)
1043 CASE_CONST_ANY:
1044 case CONST:
1045 case SYMBOL_REF:
1046 case LABEL_REF:
1047 return 0;
1049 case PC:
1050 case CC0:
1051 return 1;
1053 case MEM:
1054 if (modified_between_p (XEXP (x, 0), start, end))
1055 return 1;
1056 if (MEM_READONLY_P (x))
1057 return 0;
1058 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1059 if (memory_modified_in_insn_p (x, insn))
1060 return 1;
1061 return 0;
1062 break;
1064 case REG:
1065 return reg_set_between_p (x, start, end);
1067 default:
1068 break;
1071 fmt = GET_RTX_FORMAT (code);
1072 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1074 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1075 return 1;
1077 else if (fmt[i] == 'E')
1078 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1079 if (modified_between_p (XVECEXP (x, i, j), start, end))
1080 return 1;
1083 return 0;
1086 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1087 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1088 does use memory aliasing. */
1091 modified_in_p (const_rtx x, const_rtx insn)
1093 const enum rtx_code code = GET_CODE (x);
1094 const char *fmt;
1095 int i, j;
1097 switch (code)
1099 CASE_CONST_ANY:
1100 case CONST:
1101 case SYMBOL_REF:
1102 case LABEL_REF:
1103 return 0;
1105 case PC:
1106 case CC0:
1107 return 1;
1109 case MEM:
1110 if (modified_in_p (XEXP (x, 0), insn))
1111 return 1;
1112 if (MEM_READONLY_P (x))
1113 return 0;
1114 if (memory_modified_in_insn_p (x, insn))
1115 return 1;
1116 return 0;
1117 break;
1119 case REG:
1120 return reg_set_p (x, insn);
1122 default:
1123 break;
1126 fmt = GET_RTX_FORMAT (code);
1127 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1129 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1130 return 1;
1132 else if (fmt[i] == 'E')
1133 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1134 if (modified_in_p (XVECEXP (x, i, j), insn))
1135 return 1;
1138 return 0;
1141 /* Helper function for set_of. */
1142 struct set_of_data
1144 const_rtx found;
1145 const_rtx pat;
1148 static void
1149 set_of_1 (rtx x, const_rtx pat, void *data1)
1151 struct set_of_data *const data = (struct set_of_data *) (data1);
1152 if (rtx_equal_p (x, data->pat)
1153 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1154 data->found = pat;
1157 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1158 (either directly or via STRICT_LOW_PART and similar modifiers). */
1159 const_rtx
1160 set_of (const_rtx pat, const_rtx insn)
1162 struct set_of_data data;
1163 data.found = NULL_RTX;
1164 data.pat = pat;
1165 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1166 return data.found;
1169 /* Add all hard register in X to *PSET. */
1170 void
1171 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1173 subrtx_iterator::array_type array;
1174 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1176 const_rtx x = *iter;
1177 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1178 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1182 /* This function, called through note_stores, collects sets and
1183 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1184 by DATA. */
1185 void
1186 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1188 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1189 if (REG_P (x) && HARD_REGISTER_P (x))
1190 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1193 /* Examine INSN, and compute the set of hard registers written by it.
1194 Store it in *PSET. Should only be called after reload. */
1195 void
1196 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1198 rtx link;
1200 CLEAR_HARD_REG_SET (*pset);
1201 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1202 if (CALL_P (insn))
1204 if (implicit)
1205 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1207 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1208 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1210 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1211 if (REG_NOTE_KIND (link) == REG_INC)
1212 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1215 /* Like record_hard_reg_sets, but called through note_uses. */
1216 void
1217 record_hard_reg_uses (rtx *px, void *data)
1219 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1222 /* Given an INSN, return a SET expression if this insn has only a single SET.
1223 It may also have CLOBBERs, USEs, or SET whose output
1224 will not be used, which we ignore. */
1227 single_set_2 (const rtx_insn *insn, const_rtx pat)
1229 rtx set = NULL;
1230 int set_verified = 1;
1231 int i;
1233 if (GET_CODE (pat) == PARALLEL)
1235 for (i = 0; i < XVECLEN (pat, 0); i++)
1237 rtx sub = XVECEXP (pat, 0, i);
1238 switch (GET_CODE (sub))
1240 case USE:
1241 case CLOBBER:
1242 break;
1244 case SET:
1245 /* We can consider insns having multiple sets, where all
1246 but one are dead as single set insns. In common case
1247 only single set is present in the pattern so we want
1248 to avoid checking for REG_UNUSED notes unless necessary.
1250 When we reach set first time, we just expect this is
1251 the single set we are looking for and only when more
1252 sets are found in the insn, we check them. */
1253 if (!set_verified)
1255 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1256 && !side_effects_p (set))
1257 set = NULL;
1258 else
1259 set_verified = 1;
1261 if (!set)
1262 set = sub, set_verified = 0;
1263 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1264 || side_effects_p (sub))
1265 return NULL_RTX;
1266 break;
1268 default:
1269 return NULL_RTX;
1273 return set;
1276 /* Given an INSN, return nonzero if it has more than one SET, else return
1277 zero. */
1280 multiple_sets (const_rtx insn)
1282 int found;
1283 int i;
1285 /* INSN must be an insn. */
1286 if (! INSN_P (insn))
1287 return 0;
1289 /* Only a PARALLEL can have multiple SETs. */
1290 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1292 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1293 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1295 /* If we have already found a SET, then return now. */
1296 if (found)
1297 return 1;
1298 else
1299 found = 1;
1303 /* Either zero or one SET. */
1304 return 0;
1307 /* Return nonzero if the destination of SET equals the source
1308 and there are no side effects. */
1311 set_noop_p (const_rtx set)
1313 rtx src = SET_SRC (set);
1314 rtx dst = SET_DEST (set);
1316 if (dst == pc_rtx && src == pc_rtx)
1317 return 1;
1319 if (MEM_P (dst) && MEM_P (src))
1320 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1322 if (GET_CODE (dst) == ZERO_EXTRACT)
1323 return rtx_equal_p (XEXP (dst, 0), src)
1324 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1325 && !side_effects_p (src);
1327 if (GET_CODE (dst) == STRICT_LOW_PART)
1328 dst = XEXP (dst, 0);
1330 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1332 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1333 return 0;
1334 src = SUBREG_REG (src);
1335 dst = SUBREG_REG (dst);
1338 /* It is a NOOP if destination overlaps with selected src vector
1339 elements. */
1340 if (GET_CODE (src) == VEC_SELECT
1341 && REG_P (XEXP (src, 0)) && REG_P (dst)
1342 && HARD_REGISTER_P (XEXP (src, 0))
1343 && HARD_REGISTER_P (dst))
1345 int i;
1346 rtx par = XEXP (src, 1);
1347 rtx src0 = XEXP (src, 0);
1348 int c0 = INTVAL (XVECEXP (par, 0, 0));
1349 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1351 for (i = 1; i < XVECLEN (par, 0); i++)
1352 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1353 return 0;
1354 return
1355 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1356 offset, GET_MODE (dst)) == (int) REGNO (dst);
1359 return (REG_P (src) && REG_P (dst)
1360 && REGNO (src) == REGNO (dst));
1363 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1364 value to itself. */
1367 noop_move_p (const rtx_insn *insn)
1369 rtx pat = PATTERN (insn);
1371 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1372 return 1;
1374 /* Insns carrying these notes are useful later on. */
1375 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1376 return 0;
1378 /* Check the code to be executed for COND_EXEC. */
1379 if (GET_CODE (pat) == COND_EXEC)
1380 pat = COND_EXEC_CODE (pat);
1382 if (GET_CODE (pat) == SET && set_noop_p (pat))
1383 return 1;
1385 if (GET_CODE (pat) == PARALLEL)
1387 int i;
1388 /* If nothing but SETs of registers to themselves,
1389 this insn can also be deleted. */
1390 for (i = 0; i < XVECLEN (pat, 0); i++)
1392 rtx tem = XVECEXP (pat, 0, i);
1394 if (GET_CODE (tem) == USE
1395 || GET_CODE (tem) == CLOBBER)
1396 continue;
1398 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1399 return 0;
1402 return 1;
1404 return 0;
1408 /* Return nonzero if register in range [REGNO, ENDREGNO)
1409 appears either explicitly or implicitly in X
1410 other than being stored into.
1412 References contained within the substructure at LOC do not count.
1413 LOC may be zero, meaning don't ignore anything. */
1415 bool
1416 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1417 rtx *loc)
1419 int i;
1420 unsigned int x_regno;
1421 RTX_CODE code;
1422 const char *fmt;
1424 repeat:
1425 /* The contents of a REG_NONNEG note is always zero, so we must come here
1426 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1427 if (x == 0)
1428 return false;
1430 code = GET_CODE (x);
1432 switch (code)
1434 case REG:
1435 x_regno = REGNO (x);
1437 /* If we modifying the stack, frame, or argument pointer, it will
1438 clobber a virtual register. In fact, we could be more precise,
1439 but it isn't worth it. */
1440 if ((x_regno == STACK_POINTER_REGNUM
1441 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1442 && x_regno == ARG_POINTER_REGNUM)
1443 || x_regno == FRAME_POINTER_REGNUM)
1444 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1445 return true;
1447 return endregno > x_regno && regno < END_REGNO (x);
1449 case SUBREG:
1450 /* If this is a SUBREG of a hard reg, we can see exactly which
1451 registers are being modified. Otherwise, handle normally. */
1452 if (REG_P (SUBREG_REG (x))
1453 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1455 unsigned int inner_regno = subreg_regno (x);
1456 unsigned int inner_endregno
1457 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1458 ? subreg_nregs (x) : 1);
1460 return endregno > inner_regno && regno < inner_endregno;
1462 break;
1464 case CLOBBER:
1465 case SET:
1466 if (&SET_DEST (x) != loc
1467 /* Note setting a SUBREG counts as referring to the REG it is in for
1468 a pseudo but not for hard registers since we can
1469 treat each word individually. */
1470 && ((GET_CODE (SET_DEST (x)) == SUBREG
1471 && loc != &SUBREG_REG (SET_DEST (x))
1472 && REG_P (SUBREG_REG (SET_DEST (x)))
1473 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1474 && refers_to_regno_p (regno, endregno,
1475 SUBREG_REG (SET_DEST (x)), loc))
1476 || (!REG_P (SET_DEST (x))
1477 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1478 return true;
1480 if (code == CLOBBER || loc == &SET_SRC (x))
1481 return false;
1482 x = SET_SRC (x);
1483 goto repeat;
1485 default:
1486 break;
1489 /* X does not match, so try its subexpressions. */
1491 fmt = GET_RTX_FORMAT (code);
1492 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1494 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1496 if (i == 0)
1498 x = XEXP (x, 0);
1499 goto repeat;
1501 else
1502 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1503 return true;
1505 else if (fmt[i] == 'E')
1507 int j;
1508 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1509 if (loc != &XVECEXP (x, i, j)
1510 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1511 return true;
1514 return false;
1517 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1518 we check if any register number in X conflicts with the relevant register
1519 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1520 contains a MEM (we don't bother checking for memory addresses that can't
1521 conflict because we expect this to be a rare case. */
1524 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1526 unsigned int regno, endregno;
1528 /* If either argument is a constant, then modifying X can not
1529 affect IN. Here we look at IN, we can profitably combine
1530 CONSTANT_P (x) with the switch statement below. */
1531 if (CONSTANT_P (in))
1532 return 0;
1534 recurse:
1535 switch (GET_CODE (x))
1537 case STRICT_LOW_PART:
1538 case ZERO_EXTRACT:
1539 case SIGN_EXTRACT:
1540 /* Overly conservative. */
1541 x = XEXP (x, 0);
1542 goto recurse;
1544 case SUBREG:
1545 regno = REGNO (SUBREG_REG (x));
1546 if (regno < FIRST_PSEUDO_REGISTER)
1547 regno = subreg_regno (x);
1548 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1549 ? subreg_nregs (x) : 1);
1550 goto do_reg;
1552 case REG:
1553 regno = REGNO (x);
1554 endregno = END_REGNO (x);
1555 do_reg:
1556 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1558 case MEM:
1560 const char *fmt;
1561 int i;
1563 if (MEM_P (in))
1564 return 1;
1566 fmt = GET_RTX_FORMAT (GET_CODE (in));
1567 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1568 if (fmt[i] == 'e')
1570 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1571 return 1;
1573 else if (fmt[i] == 'E')
1575 int j;
1576 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1577 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1578 return 1;
1581 return 0;
1584 case SCRATCH:
1585 case PC:
1586 case CC0:
1587 return reg_mentioned_p (x, in);
1589 case PARALLEL:
1591 int i;
1593 /* If any register in here refers to it we return true. */
1594 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1595 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1596 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1597 return 1;
1598 return 0;
1601 default:
1602 gcc_assert (CONSTANT_P (x));
1603 return 0;
1607 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1608 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1609 ignored by note_stores, but passed to FUN.
1611 FUN receives three arguments:
1612 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1613 2. the SET or CLOBBER rtx that does the store,
1614 3. the pointer DATA provided to note_stores.
1616 If the item being stored in or clobbered is a SUBREG of a hard register,
1617 the SUBREG will be passed. */
1619 void
1620 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1622 int i;
1624 if (GET_CODE (x) == COND_EXEC)
1625 x = COND_EXEC_CODE (x);
1627 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1629 rtx dest = SET_DEST (x);
1631 while ((GET_CODE (dest) == SUBREG
1632 && (!REG_P (SUBREG_REG (dest))
1633 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1634 || GET_CODE (dest) == ZERO_EXTRACT
1635 || GET_CODE (dest) == STRICT_LOW_PART)
1636 dest = XEXP (dest, 0);
1638 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1639 each of whose first operand is a register. */
1640 if (GET_CODE (dest) == PARALLEL)
1642 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1643 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1644 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1646 else
1647 (*fun) (dest, x, data);
1650 else if (GET_CODE (x) == PARALLEL)
1651 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1652 note_stores (XVECEXP (x, 0, i), fun, data);
1655 /* Like notes_stores, but call FUN for each expression that is being
1656 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1657 FUN for each expression, not any interior subexpressions. FUN receives a
1658 pointer to the expression and the DATA passed to this function.
1660 Note that this is not quite the same test as that done in reg_referenced_p
1661 since that considers something as being referenced if it is being
1662 partially set, while we do not. */
1664 void
1665 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1667 rtx body = *pbody;
1668 int i;
1670 switch (GET_CODE (body))
1672 case COND_EXEC:
1673 (*fun) (&COND_EXEC_TEST (body), data);
1674 note_uses (&COND_EXEC_CODE (body), fun, data);
1675 return;
1677 case PARALLEL:
1678 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1679 note_uses (&XVECEXP (body, 0, i), fun, data);
1680 return;
1682 case SEQUENCE:
1683 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1684 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1685 return;
1687 case USE:
1688 (*fun) (&XEXP (body, 0), data);
1689 return;
1691 case ASM_OPERANDS:
1692 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1693 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1694 return;
1696 case TRAP_IF:
1697 (*fun) (&TRAP_CONDITION (body), data);
1698 return;
1700 case PREFETCH:
1701 (*fun) (&XEXP (body, 0), data);
1702 return;
1704 case UNSPEC:
1705 case UNSPEC_VOLATILE:
1706 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1707 (*fun) (&XVECEXP (body, 0, i), data);
1708 return;
1710 case CLOBBER:
1711 if (MEM_P (XEXP (body, 0)))
1712 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1713 return;
1715 case SET:
1717 rtx dest = SET_DEST (body);
1719 /* For sets we replace everything in source plus registers in memory
1720 expression in store and operands of a ZERO_EXTRACT. */
1721 (*fun) (&SET_SRC (body), data);
1723 if (GET_CODE (dest) == ZERO_EXTRACT)
1725 (*fun) (&XEXP (dest, 1), data);
1726 (*fun) (&XEXP (dest, 2), data);
1729 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1730 dest = XEXP (dest, 0);
1732 if (MEM_P (dest))
1733 (*fun) (&XEXP (dest, 0), data);
1735 return;
1737 default:
1738 /* All the other possibilities never store. */
1739 (*fun) (pbody, data);
1740 return;
1744 /* Return nonzero if X's old contents don't survive after INSN.
1745 This will be true if X is (cc0) or if X is a register and
1746 X dies in INSN or because INSN entirely sets X.
1748 "Entirely set" means set directly and not through a SUBREG, or
1749 ZERO_EXTRACT, so no trace of the old contents remains.
1750 Likewise, REG_INC does not count.
1752 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1753 but for this use that makes no difference, since regs don't overlap
1754 during their lifetimes. Therefore, this function may be used
1755 at any time after deaths have been computed.
1757 If REG is a hard reg that occupies multiple machine registers, this
1758 function will only return 1 if each of those registers will be replaced
1759 by INSN. */
1762 dead_or_set_p (const_rtx insn, const_rtx x)
1764 unsigned int regno, end_regno;
1765 unsigned int i;
1767 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1768 if (GET_CODE (x) == CC0)
1769 return 1;
1771 gcc_assert (REG_P (x));
1773 regno = REGNO (x);
1774 end_regno = END_REGNO (x);
1775 for (i = regno; i < end_regno; i++)
1776 if (! dead_or_set_regno_p (insn, i))
1777 return 0;
1779 return 1;
1782 /* Return TRUE iff DEST is a register or subreg of a register and
1783 doesn't change the number of words of the inner register, and any
1784 part of the register is TEST_REGNO. */
1786 static bool
1787 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1789 unsigned int regno, endregno;
1791 if (GET_CODE (dest) == SUBREG
1792 && (((GET_MODE_SIZE (GET_MODE (dest))
1793 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1794 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1795 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1796 dest = SUBREG_REG (dest);
1798 if (!REG_P (dest))
1799 return false;
1801 regno = REGNO (dest);
1802 endregno = END_REGNO (dest);
1803 return (test_regno >= regno && test_regno < endregno);
1806 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1807 any member matches the covers_regno_no_parallel_p criteria. */
1809 static bool
1810 covers_regno_p (const_rtx dest, unsigned int test_regno)
1812 if (GET_CODE (dest) == PARALLEL)
1814 /* Some targets place small structures in registers for return
1815 values of functions, and those registers are wrapped in
1816 PARALLELs that we may see as the destination of a SET. */
1817 int i;
1819 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1821 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1822 if (inner != NULL_RTX
1823 && covers_regno_no_parallel_p (inner, test_regno))
1824 return true;
1827 return false;
1829 else
1830 return covers_regno_no_parallel_p (dest, test_regno);
1833 /* Utility function for dead_or_set_p to check an individual register. */
1836 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1838 const_rtx pattern;
1840 /* See if there is a death note for something that includes TEST_REGNO. */
1841 if (find_regno_note (insn, REG_DEAD, test_regno))
1842 return 1;
1844 if (CALL_P (insn)
1845 && find_regno_fusage (insn, CLOBBER, test_regno))
1846 return 1;
1848 pattern = PATTERN (insn);
1850 /* If a COND_EXEC is not executed, the value survives. */
1851 if (GET_CODE (pattern) == COND_EXEC)
1852 return 0;
1854 if (GET_CODE (pattern) == SET)
1855 return covers_regno_p (SET_DEST (pattern), test_regno);
1856 else if (GET_CODE (pattern) == PARALLEL)
1858 int i;
1860 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1862 rtx body = XVECEXP (pattern, 0, i);
1864 if (GET_CODE (body) == COND_EXEC)
1865 body = COND_EXEC_CODE (body);
1867 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1868 && covers_regno_p (SET_DEST (body), test_regno))
1869 return 1;
1873 return 0;
1876 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1877 If DATUM is nonzero, look for one whose datum is DATUM. */
1880 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1882 rtx link;
1884 gcc_checking_assert (insn);
1886 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1887 if (! INSN_P (insn))
1888 return 0;
1889 if (datum == 0)
1891 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1892 if (REG_NOTE_KIND (link) == kind)
1893 return link;
1894 return 0;
1897 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1898 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1899 return link;
1900 return 0;
1903 /* Return the reg-note of kind KIND in insn INSN which applies to register
1904 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1905 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1906 it might be the case that the note overlaps REGNO. */
1909 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1911 rtx link;
1913 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1914 if (! INSN_P (insn))
1915 return 0;
1917 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1918 if (REG_NOTE_KIND (link) == kind
1919 /* Verify that it is a register, so that scratch and MEM won't cause a
1920 problem here. */
1921 && REG_P (XEXP (link, 0))
1922 && REGNO (XEXP (link, 0)) <= regno
1923 && END_REGNO (XEXP (link, 0)) > regno)
1924 return link;
1925 return 0;
1928 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1929 has such a note. */
1932 find_reg_equal_equiv_note (const_rtx insn)
1934 rtx link;
1936 if (!INSN_P (insn))
1937 return 0;
1939 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1940 if (REG_NOTE_KIND (link) == REG_EQUAL
1941 || REG_NOTE_KIND (link) == REG_EQUIV)
1943 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1944 insns that have multiple sets. Checking single_set to
1945 make sure of this is not the proper check, as explained
1946 in the comment in set_unique_reg_note.
1948 This should be changed into an assert. */
1949 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1950 return 0;
1951 return link;
1953 return NULL;
1956 /* Check whether INSN is a single_set whose source is known to be
1957 equivalent to a constant. Return that constant if so, otherwise
1958 return null. */
1961 find_constant_src (const rtx_insn *insn)
1963 rtx note, set, x;
1965 set = single_set (insn);
1966 if (set)
1968 x = avoid_constant_pool_reference (SET_SRC (set));
1969 if (CONSTANT_P (x))
1970 return x;
1973 note = find_reg_equal_equiv_note (insn);
1974 if (note && CONSTANT_P (XEXP (note, 0)))
1975 return XEXP (note, 0);
1977 return NULL_RTX;
1980 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1981 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1984 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1986 /* If it's not a CALL_INSN, it can't possibly have a
1987 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1988 if (!CALL_P (insn))
1989 return 0;
1991 gcc_assert (datum);
1993 if (!REG_P (datum))
1995 rtx link;
1997 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1998 link;
1999 link = XEXP (link, 1))
2000 if (GET_CODE (XEXP (link, 0)) == code
2001 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2002 return 1;
2004 else
2006 unsigned int regno = REGNO (datum);
2008 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2009 to pseudo registers, so don't bother checking. */
2011 if (regno < FIRST_PSEUDO_REGISTER)
2013 unsigned int end_regno = END_REGNO (datum);
2014 unsigned int i;
2016 for (i = regno; i < end_regno; i++)
2017 if (find_regno_fusage (insn, code, i))
2018 return 1;
2022 return 0;
2025 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2026 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2029 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2031 rtx link;
2033 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2034 to pseudo registers, so don't bother checking. */
2036 if (regno >= FIRST_PSEUDO_REGISTER
2037 || !CALL_P (insn) )
2038 return 0;
2040 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2042 rtx op, reg;
2044 if (GET_CODE (op = XEXP (link, 0)) == code
2045 && REG_P (reg = XEXP (op, 0))
2046 && REGNO (reg) <= regno
2047 && END_REGNO (reg) > regno)
2048 return 1;
2051 return 0;
2055 /* Return true if KIND is an integer REG_NOTE. */
2057 static bool
2058 int_reg_note_p (enum reg_note kind)
2060 return kind == REG_BR_PROB;
2063 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2064 stored as the pointer to the next register note. */
2067 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2069 rtx note;
2071 gcc_checking_assert (!int_reg_note_p (kind));
2072 switch (kind)
2074 case REG_CC_SETTER:
2075 case REG_CC_USER:
2076 case REG_LABEL_TARGET:
2077 case REG_LABEL_OPERAND:
2078 case REG_TM:
2079 /* These types of register notes use an INSN_LIST rather than an
2080 EXPR_LIST, so that copying is done right and dumps look
2081 better. */
2082 note = alloc_INSN_LIST (datum, list);
2083 PUT_REG_NOTE_KIND (note, kind);
2084 break;
2086 default:
2087 note = alloc_EXPR_LIST (kind, datum, list);
2088 break;
2091 return note;
2094 /* Add register note with kind KIND and datum DATUM to INSN. */
2096 void
2097 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2099 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2102 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2104 void
2105 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2107 gcc_checking_assert (int_reg_note_p (kind));
2108 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2109 datum, REG_NOTES (insn));
2112 /* Add a register note like NOTE to INSN. */
2114 void
2115 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2117 if (GET_CODE (note) == INT_LIST)
2118 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2119 else
2120 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2123 /* Remove register note NOTE from the REG_NOTES of INSN. */
2125 void
2126 remove_note (rtx insn, const_rtx note)
2128 rtx link;
2130 if (note == NULL_RTX)
2131 return;
2133 if (REG_NOTES (insn) == note)
2134 REG_NOTES (insn) = XEXP (note, 1);
2135 else
2136 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2137 if (XEXP (link, 1) == note)
2139 XEXP (link, 1) = XEXP (note, 1);
2140 break;
2143 switch (REG_NOTE_KIND (note))
2145 case REG_EQUAL:
2146 case REG_EQUIV:
2147 df_notes_rescan (as_a <rtx_insn *> (insn));
2148 break;
2149 default:
2150 break;
2154 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2156 void
2157 remove_reg_equal_equiv_notes (rtx_insn *insn)
2159 rtx *loc;
2161 loc = &REG_NOTES (insn);
2162 while (*loc)
2164 enum reg_note kind = REG_NOTE_KIND (*loc);
2165 if (kind == REG_EQUAL || kind == REG_EQUIV)
2166 *loc = XEXP (*loc, 1);
2167 else
2168 loc = &XEXP (*loc, 1);
2172 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2174 void
2175 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2177 df_ref eq_use;
2179 if (!df)
2180 return;
2182 /* This loop is a little tricky. We cannot just go down the chain because
2183 it is being modified by some actions in the loop. So we just iterate
2184 over the head. We plan to drain the list anyway. */
2185 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2187 rtx_insn *insn = DF_REF_INSN (eq_use);
2188 rtx note = find_reg_equal_equiv_note (insn);
2190 /* This assert is generally triggered when someone deletes a REG_EQUAL
2191 or REG_EQUIV note by hacking the list manually rather than calling
2192 remove_note. */
2193 gcc_assert (note);
2195 remove_note (insn, note);
2199 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2200 return 1 if it is found. A simple equality test is used to determine if
2201 NODE matches. */
2203 bool
2204 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2206 const_rtx x;
2208 for (x = listp; x; x = XEXP (x, 1))
2209 if (node == XEXP (x, 0))
2210 return true;
2212 return false;
2215 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2216 remove that entry from the list if it is found.
2218 A simple equality test is used to determine if NODE matches. */
2220 void
2221 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2223 rtx_expr_list *temp = *listp;
2224 rtx_expr_list *prev = NULL;
2226 while (temp)
2228 if (node == temp->element ())
2230 /* Splice the node out of the list. */
2231 if (prev)
2232 XEXP (prev, 1) = temp->next ();
2233 else
2234 *listp = temp->next ();
2236 return;
2239 prev = temp;
2240 temp = temp->next ();
2244 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2245 remove that entry from the list if it is found.
2247 A simple equality test is used to determine if NODE matches. */
2249 void
2250 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2252 rtx_insn_list *temp = *listp;
2253 rtx_insn_list *prev = NULL;
2255 while (temp)
2257 if (node == temp->insn ())
2259 /* Splice the node out of the list. */
2260 if (prev)
2261 XEXP (prev, 1) = temp->next ();
2262 else
2263 *listp = temp->next ();
2265 return;
2268 prev = temp;
2269 temp = temp->next ();
2273 /* Nonzero if X contains any volatile instructions. These are instructions
2274 which may cause unpredictable machine state instructions, and thus no
2275 instructions or register uses should be moved or combined across them.
2276 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2279 volatile_insn_p (const_rtx x)
2281 const RTX_CODE code = GET_CODE (x);
2282 switch (code)
2284 case LABEL_REF:
2285 case SYMBOL_REF:
2286 case CONST:
2287 CASE_CONST_ANY:
2288 case CC0:
2289 case PC:
2290 case REG:
2291 case SCRATCH:
2292 case CLOBBER:
2293 case ADDR_VEC:
2294 case ADDR_DIFF_VEC:
2295 case CALL:
2296 case MEM:
2297 return 0;
2299 case UNSPEC_VOLATILE:
2300 return 1;
2302 case ASM_INPUT:
2303 case ASM_OPERANDS:
2304 if (MEM_VOLATILE_P (x))
2305 return 1;
2307 default:
2308 break;
2311 /* Recursively scan the operands of this expression. */
2314 const char *const fmt = GET_RTX_FORMAT (code);
2315 int i;
2317 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2319 if (fmt[i] == 'e')
2321 if (volatile_insn_p (XEXP (x, i)))
2322 return 1;
2324 else if (fmt[i] == 'E')
2326 int j;
2327 for (j = 0; j < XVECLEN (x, i); j++)
2328 if (volatile_insn_p (XVECEXP (x, i, j)))
2329 return 1;
2333 return 0;
2336 /* Nonzero if X contains any volatile memory references
2337 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2340 volatile_refs_p (const_rtx x)
2342 const RTX_CODE code = GET_CODE (x);
2343 switch (code)
2345 case LABEL_REF:
2346 case SYMBOL_REF:
2347 case CONST:
2348 CASE_CONST_ANY:
2349 case CC0:
2350 case PC:
2351 case REG:
2352 case SCRATCH:
2353 case CLOBBER:
2354 case ADDR_VEC:
2355 case ADDR_DIFF_VEC:
2356 return 0;
2358 case UNSPEC_VOLATILE:
2359 return 1;
2361 case MEM:
2362 case ASM_INPUT:
2363 case ASM_OPERANDS:
2364 if (MEM_VOLATILE_P (x))
2365 return 1;
2367 default:
2368 break;
2371 /* Recursively scan the operands of this expression. */
2374 const char *const fmt = GET_RTX_FORMAT (code);
2375 int i;
2377 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2379 if (fmt[i] == 'e')
2381 if (volatile_refs_p (XEXP (x, i)))
2382 return 1;
2384 else if (fmt[i] == 'E')
2386 int j;
2387 for (j = 0; j < XVECLEN (x, i); j++)
2388 if (volatile_refs_p (XVECEXP (x, i, j)))
2389 return 1;
2393 return 0;
2396 /* Similar to above, except that it also rejects register pre- and post-
2397 incrementing. */
2400 side_effects_p (const_rtx x)
2402 const RTX_CODE code = GET_CODE (x);
2403 switch (code)
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 case CONST:
2408 CASE_CONST_ANY:
2409 case CC0:
2410 case PC:
2411 case REG:
2412 case SCRATCH:
2413 case ADDR_VEC:
2414 case ADDR_DIFF_VEC:
2415 case VAR_LOCATION:
2416 return 0;
2418 case CLOBBER:
2419 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2420 when some combination can't be done. If we see one, don't think
2421 that we can simplify the expression. */
2422 return (GET_MODE (x) != VOIDmode);
2424 case PRE_INC:
2425 case PRE_DEC:
2426 case POST_INC:
2427 case POST_DEC:
2428 case PRE_MODIFY:
2429 case POST_MODIFY:
2430 case CALL:
2431 case UNSPEC_VOLATILE:
2432 return 1;
2434 case MEM:
2435 case ASM_INPUT:
2436 case ASM_OPERANDS:
2437 if (MEM_VOLATILE_P (x))
2438 return 1;
2440 default:
2441 break;
2444 /* Recursively scan the operands of this expression. */
2447 const char *fmt = GET_RTX_FORMAT (code);
2448 int i;
2450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2452 if (fmt[i] == 'e')
2454 if (side_effects_p (XEXP (x, i)))
2455 return 1;
2457 else if (fmt[i] == 'E')
2459 int j;
2460 for (j = 0; j < XVECLEN (x, i); j++)
2461 if (side_effects_p (XVECEXP (x, i, j)))
2462 return 1;
2466 return 0;
2469 /* Return nonzero if evaluating rtx X might cause a trap.
2470 FLAGS controls how to consider MEMs. A nonzero means the context
2471 of the access may have changed from the original, such that the
2472 address may have become invalid. */
2475 may_trap_p_1 (const_rtx x, unsigned flags)
2477 int i;
2478 enum rtx_code code;
2479 const char *fmt;
2481 /* We make no distinction currently, but this function is part of
2482 the internal target-hooks ABI so we keep the parameter as
2483 "unsigned flags". */
2484 bool code_changed = flags != 0;
2486 if (x == 0)
2487 return 0;
2488 code = GET_CODE (x);
2489 switch (code)
2491 /* Handle these cases quickly. */
2492 CASE_CONST_ANY:
2493 case SYMBOL_REF:
2494 case LABEL_REF:
2495 case CONST:
2496 case PC:
2497 case CC0:
2498 case REG:
2499 case SCRATCH:
2500 return 0;
2502 case UNSPEC:
2503 return targetm.unspec_may_trap_p (x, flags);
2505 case UNSPEC_VOLATILE:
2506 case ASM_INPUT:
2507 case TRAP_IF:
2508 return 1;
2510 case ASM_OPERANDS:
2511 return MEM_VOLATILE_P (x);
2513 /* Memory ref can trap unless it's a static var or a stack slot. */
2514 case MEM:
2515 /* Recognize specific pattern of stack checking probes. */
2516 if (flag_stack_check
2517 && MEM_VOLATILE_P (x)
2518 && XEXP (x, 0) == stack_pointer_rtx)
2519 return 1;
2520 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2521 reference; moving it out of context such as when moving code
2522 when optimizing, might cause its address to become invalid. */
2523 code_changed
2524 || !MEM_NOTRAP_P (x))
2526 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2527 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2528 GET_MODE (x), code_changed);
2531 return 0;
2533 /* Division by a non-constant might trap. */
2534 case DIV:
2535 case MOD:
2536 case UDIV:
2537 case UMOD:
2538 if (HONOR_SNANS (x))
2539 return 1;
2540 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2541 return flag_trapping_math;
2542 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2543 return 1;
2544 break;
2546 case EXPR_LIST:
2547 /* An EXPR_LIST is used to represent a function call. This
2548 certainly may trap. */
2549 return 1;
2551 case GE:
2552 case GT:
2553 case LE:
2554 case LT:
2555 case LTGT:
2556 case COMPARE:
2557 /* Some floating point comparisons may trap. */
2558 if (!flag_trapping_math)
2559 break;
2560 /* ??? There is no machine independent way to check for tests that trap
2561 when COMPARE is used, though many targets do make this distinction.
2562 For instance, sparc uses CCFPE for compares which generate exceptions
2563 and CCFP for compares which do not generate exceptions. */
2564 if (HONOR_NANS (x))
2565 return 1;
2566 /* But often the compare has some CC mode, so check operand
2567 modes as well. */
2568 if (HONOR_NANS (XEXP (x, 0))
2569 || HONOR_NANS (XEXP (x, 1)))
2570 return 1;
2571 break;
2573 case EQ:
2574 case NE:
2575 if (HONOR_SNANS (x))
2576 return 1;
2577 /* Often comparison is CC mode, so check operand modes. */
2578 if (HONOR_SNANS (XEXP (x, 0))
2579 || HONOR_SNANS (XEXP (x, 1)))
2580 return 1;
2581 break;
2583 case FIX:
2584 /* Conversion of floating point might trap. */
2585 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2586 return 1;
2587 break;
2589 case NEG:
2590 case ABS:
2591 case SUBREG:
2592 /* These operations don't trap even with floating point. */
2593 break;
2595 default:
2596 /* Any floating arithmetic may trap. */
2597 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2598 return 1;
2601 fmt = GET_RTX_FORMAT (code);
2602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2604 if (fmt[i] == 'e')
2606 if (may_trap_p_1 (XEXP (x, i), flags))
2607 return 1;
2609 else if (fmt[i] == 'E')
2611 int j;
2612 for (j = 0; j < XVECLEN (x, i); j++)
2613 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2614 return 1;
2617 return 0;
2620 /* Return nonzero if evaluating rtx X might cause a trap. */
2623 may_trap_p (const_rtx x)
2625 return may_trap_p_1 (x, 0);
2628 /* Same as above, but additionally return nonzero if evaluating rtx X might
2629 cause a fault. We define a fault for the purpose of this function as a
2630 erroneous execution condition that cannot be encountered during the normal
2631 execution of a valid program; the typical example is an unaligned memory
2632 access on a strict alignment machine. The compiler guarantees that it
2633 doesn't generate code that will fault from a valid program, but this
2634 guarantee doesn't mean anything for individual instructions. Consider
2635 the following example:
2637 struct S { int d; union { char *cp; int *ip; }; };
2639 int foo(struct S *s)
2641 if (s->d == 1)
2642 return *s->ip;
2643 else
2644 return *s->cp;
2647 on a strict alignment machine. In a valid program, foo will never be
2648 invoked on a structure for which d is equal to 1 and the underlying
2649 unique field of the union not aligned on a 4-byte boundary, but the
2650 expression *s->ip might cause a fault if considered individually.
2652 At the RTL level, potentially problematic expressions will almost always
2653 verify may_trap_p; for example, the above dereference can be emitted as
2654 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2655 However, suppose that foo is inlined in a caller that causes s->cp to
2656 point to a local character variable and guarantees that s->d is not set
2657 to 1; foo may have been effectively translated into pseudo-RTL as:
2659 if ((reg:SI) == 1)
2660 (set (reg:SI) (mem:SI (%fp - 7)))
2661 else
2662 (set (reg:QI) (mem:QI (%fp - 7)))
2664 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2665 memory reference to a stack slot, but it will certainly cause a fault
2666 on a strict alignment machine. */
2669 may_trap_or_fault_p (const_rtx x)
2671 return may_trap_p_1 (x, 1);
2674 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2675 i.e., an inequality. */
2678 inequality_comparisons_p (const_rtx x)
2680 const char *fmt;
2681 int len, i;
2682 const enum rtx_code code = GET_CODE (x);
2684 switch (code)
2686 case REG:
2687 case SCRATCH:
2688 case PC:
2689 case CC0:
2690 CASE_CONST_ANY:
2691 case CONST:
2692 case LABEL_REF:
2693 case SYMBOL_REF:
2694 return 0;
2696 case LT:
2697 case LTU:
2698 case GT:
2699 case GTU:
2700 case LE:
2701 case LEU:
2702 case GE:
2703 case GEU:
2704 return 1;
2706 default:
2707 break;
2710 len = GET_RTX_LENGTH (code);
2711 fmt = GET_RTX_FORMAT (code);
2713 for (i = 0; i < len; i++)
2715 if (fmt[i] == 'e')
2717 if (inequality_comparisons_p (XEXP (x, i)))
2718 return 1;
2720 else if (fmt[i] == 'E')
2722 int j;
2723 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2724 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2725 return 1;
2729 return 0;
2732 /* Replace any occurrence of FROM in X with TO. The function does
2733 not enter into CONST_DOUBLE for the replace.
2735 Note that copying is not done so X must not be shared unless all copies
2736 are to be modified. */
2739 replace_rtx (rtx x, rtx from, rtx to)
2741 int i, j;
2742 const char *fmt;
2744 if (x == from)
2745 return to;
2747 /* Allow this function to make replacements in EXPR_LISTs. */
2748 if (x == 0)
2749 return 0;
2751 if (GET_CODE (x) == SUBREG)
2753 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2755 if (CONST_INT_P (new_rtx))
2757 x = simplify_subreg (GET_MODE (x), new_rtx,
2758 GET_MODE (SUBREG_REG (x)),
2759 SUBREG_BYTE (x));
2760 gcc_assert (x);
2762 else
2763 SUBREG_REG (x) = new_rtx;
2765 return x;
2767 else if (GET_CODE (x) == ZERO_EXTEND)
2769 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2771 if (CONST_INT_P (new_rtx))
2773 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2774 new_rtx, GET_MODE (XEXP (x, 0)));
2775 gcc_assert (x);
2777 else
2778 XEXP (x, 0) = new_rtx;
2780 return x;
2783 fmt = GET_RTX_FORMAT (GET_CODE (x));
2784 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2786 if (fmt[i] == 'e')
2787 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2788 else if (fmt[i] == 'E')
2789 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2790 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2793 return x;
2796 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2797 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2799 void
2800 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
2802 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2803 rtx x = *loc;
2804 if (JUMP_TABLE_DATA_P (x))
2806 x = PATTERN (x);
2807 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
2808 int len = GET_NUM_ELEM (vec);
2809 for (int i = 0; i < len; ++i)
2811 rtx ref = RTVEC_ELT (vec, i);
2812 if (XEXP (ref, 0) == old_label)
2814 XEXP (ref, 0) = new_label;
2815 if (update_label_nuses)
2817 ++LABEL_NUSES (new_label);
2818 --LABEL_NUSES (old_label);
2822 return;
2825 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2826 field. This is not handled by the iterator because it doesn't
2827 handle unprinted ('0') fields. */
2828 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
2829 JUMP_LABEL (x) = new_label;
2831 subrtx_ptr_iterator::array_type array;
2832 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
2834 rtx *loc = *iter;
2835 if (rtx x = *loc)
2837 if (GET_CODE (x) == SYMBOL_REF
2838 && CONSTANT_POOL_ADDRESS_P (x))
2840 rtx c = get_pool_constant (x);
2841 if (rtx_referenced_p (old_label, c))
2843 /* Create a copy of constant C; replace the label inside
2844 but do not update LABEL_NUSES because uses in constant pool
2845 are not counted. */
2846 rtx new_c = copy_rtx (c);
2847 replace_label (&new_c, old_label, new_label, false);
2849 /* Add the new constant NEW_C to constant pool and replace
2850 the old reference to constant by new reference. */
2851 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
2852 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
2856 if ((GET_CODE (x) == LABEL_REF
2857 || GET_CODE (x) == INSN_LIST)
2858 && XEXP (x, 0) == old_label)
2860 XEXP (x, 0) = new_label;
2861 if (update_label_nuses)
2863 ++LABEL_NUSES (new_label);
2864 --LABEL_NUSES (old_label);
2871 void
2872 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
2873 bool update_label_nuses)
2875 rtx insn_as_rtx = insn;
2876 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
2877 gcc_checking_assert (insn_as_rtx == insn);
2880 /* Return true if X is referenced in BODY. */
2882 bool
2883 rtx_referenced_p (const_rtx x, const_rtx body)
2885 subrtx_iterator::array_type array;
2886 FOR_EACH_SUBRTX (iter, array, body, ALL)
2887 if (const_rtx y = *iter)
2889 /* Check if a label_ref Y refers to label X. */
2890 if (GET_CODE (y) == LABEL_REF
2891 && LABEL_P (x)
2892 && LABEL_REF_LABEL (y) == x)
2893 return true;
2895 if (rtx_equal_p (x, y))
2896 return true;
2898 /* If Y is a reference to pool constant traverse the constant. */
2899 if (GET_CODE (y) == SYMBOL_REF
2900 && CONSTANT_POOL_ADDRESS_P (y))
2901 iter.substitute (get_pool_constant (y));
2903 return false;
2906 /* If INSN is a tablejump return true and store the label (before jump table) to
2907 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2909 bool
2910 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
2912 rtx label;
2913 rtx_insn *table;
2915 if (!JUMP_P (insn))
2916 return false;
2918 label = JUMP_LABEL (insn);
2919 if (label != NULL_RTX && !ANY_RETURN_P (label)
2920 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
2921 && JUMP_TABLE_DATA_P (table))
2923 if (labelp)
2924 *labelp = label;
2925 if (tablep)
2926 *tablep = as_a <rtx_jump_table_data *> (table);
2927 return true;
2929 return false;
2932 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2933 constant that is not in the constant pool and not in the condition
2934 of an IF_THEN_ELSE. */
2936 static int
2937 computed_jump_p_1 (const_rtx x)
2939 const enum rtx_code code = GET_CODE (x);
2940 int i, j;
2941 const char *fmt;
2943 switch (code)
2945 case LABEL_REF:
2946 case PC:
2947 return 0;
2949 case CONST:
2950 CASE_CONST_ANY:
2951 case SYMBOL_REF:
2952 case REG:
2953 return 1;
2955 case MEM:
2956 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2957 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2959 case IF_THEN_ELSE:
2960 return (computed_jump_p_1 (XEXP (x, 1))
2961 || computed_jump_p_1 (XEXP (x, 2)));
2963 default:
2964 break;
2967 fmt = GET_RTX_FORMAT (code);
2968 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2970 if (fmt[i] == 'e'
2971 && computed_jump_p_1 (XEXP (x, i)))
2972 return 1;
2974 else if (fmt[i] == 'E')
2975 for (j = 0; j < XVECLEN (x, i); j++)
2976 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2977 return 1;
2980 return 0;
2983 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2985 Tablejumps and casesi insns are not considered indirect jumps;
2986 we can recognize them by a (use (label_ref)). */
2989 computed_jump_p (const rtx_insn *insn)
2991 int i;
2992 if (JUMP_P (insn))
2994 rtx pat = PATTERN (insn);
2996 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2997 if (JUMP_LABEL (insn) != NULL)
2998 return 0;
3000 if (GET_CODE (pat) == PARALLEL)
3002 int len = XVECLEN (pat, 0);
3003 int has_use_labelref = 0;
3005 for (i = len - 1; i >= 0; i--)
3006 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3007 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3008 == LABEL_REF))
3010 has_use_labelref = 1;
3011 break;
3014 if (! has_use_labelref)
3015 for (i = len - 1; i >= 0; i--)
3016 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3017 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3018 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3019 return 1;
3021 else if (GET_CODE (pat) == SET
3022 && SET_DEST (pat) == pc_rtx
3023 && computed_jump_p_1 (SET_SRC (pat)))
3024 return 1;
3026 return 0;
3031 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3032 the equivalent add insn and pass the result to FN, using DATA as the
3033 final argument. */
3035 static int
3036 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3038 rtx x = XEXP (mem, 0);
3039 switch (GET_CODE (x))
3041 case PRE_INC:
3042 case POST_INC:
3044 int size = GET_MODE_SIZE (GET_MODE (mem));
3045 rtx r1 = XEXP (x, 0);
3046 rtx c = gen_int_mode (size, GET_MODE (r1));
3047 return fn (mem, x, r1, r1, c, data);
3050 case PRE_DEC:
3051 case POST_DEC:
3053 int size = GET_MODE_SIZE (GET_MODE (mem));
3054 rtx r1 = XEXP (x, 0);
3055 rtx c = gen_int_mode (-size, GET_MODE (r1));
3056 return fn (mem, x, r1, r1, c, data);
3059 case PRE_MODIFY:
3060 case POST_MODIFY:
3062 rtx r1 = XEXP (x, 0);
3063 rtx add = XEXP (x, 1);
3064 return fn (mem, x, r1, add, NULL, data);
3067 default:
3068 gcc_unreachable ();
3072 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3073 For each such autoinc operation found, call FN, passing it
3074 the innermost enclosing MEM, the operation itself, the RTX modified
3075 by the operation, two RTXs (the second may be NULL) that, once
3076 added, represent the value to be held by the modified RTX
3077 afterwards, and DATA. FN is to return 0 to continue the
3078 traversal or any other value to have it returned to the caller of
3079 for_each_inc_dec. */
3082 for_each_inc_dec (rtx x,
3083 for_each_inc_dec_fn fn,
3084 void *data)
3086 subrtx_var_iterator::array_type array;
3087 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3089 rtx mem = *iter;
3090 if (mem
3091 && MEM_P (mem)
3092 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3094 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3095 if (res != 0)
3096 return res;
3097 iter.skip_subrtxes ();
3100 return 0;
3104 /* Searches X for any reference to REGNO, returning the rtx of the
3105 reference found if any. Otherwise, returns NULL_RTX. */
3108 regno_use_in (unsigned int regno, rtx x)
3110 const char *fmt;
3111 int i, j;
3112 rtx tem;
3114 if (REG_P (x) && REGNO (x) == regno)
3115 return x;
3117 fmt = GET_RTX_FORMAT (GET_CODE (x));
3118 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3120 if (fmt[i] == 'e')
3122 if ((tem = regno_use_in (regno, XEXP (x, i))))
3123 return tem;
3125 else if (fmt[i] == 'E')
3126 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3127 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3128 return tem;
3131 return NULL_RTX;
3134 /* Return a value indicating whether OP, an operand of a commutative
3135 operation, is preferred as the first or second operand. The higher
3136 the value, the stronger the preference for being the first operand.
3137 We use negative values to indicate a preference for the first operand
3138 and positive values for the second operand. */
3141 commutative_operand_precedence (rtx op)
3143 enum rtx_code code = GET_CODE (op);
3145 /* Constants always come the second operand. Prefer "nice" constants. */
3146 if (code == CONST_INT)
3147 return -8;
3148 if (code == CONST_WIDE_INT)
3149 return -8;
3150 if (code == CONST_DOUBLE)
3151 return -7;
3152 if (code == CONST_FIXED)
3153 return -7;
3154 op = avoid_constant_pool_reference (op);
3155 code = GET_CODE (op);
3157 switch (GET_RTX_CLASS (code))
3159 case RTX_CONST_OBJ:
3160 if (code == CONST_INT)
3161 return -6;
3162 if (code == CONST_WIDE_INT)
3163 return -6;
3164 if (code == CONST_DOUBLE)
3165 return -5;
3166 if (code == CONST_FIXED)
3167 return -5;
3168 return -4;
3170 case RTX_EXTRA:
3171 /* SUBREGs of objects should come second. */
3172 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3173 return -3;
3174 return 0;
3176 case RTX_OBJ:
3177 /* Complex expressions should be the first, so decrease priority
3178 of objects. Prefer pointer objects over non pointer objects. */
3179 if ((REG_P (op) && REG_POINTER (op))
3180 || (MEM_P (op) && MEM_POINTER (op)))
3181 return -1;
3182 return -2;
3184 case RTX_COMM_ARITH:
3185 /* Prefer operands that are themselves commutative to be first.
3186 This helps to make things linear. In particular,
3187 (and (and (reg) (reg)) (not (reg))) is canonical. */
3188 return 4;
3190 case RTX_BIN_ARITH:
3191 /* If only one operand is a binary expression, it will be the first
3192 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3193 is canonical, although it will usually be further simplified. */
3194 return 2;
3196 case RTX_UNARY:
3197 /* Then prefer NEG and NOT. */
3198 if (code == NEG || code == NOT)
3199 return 1;
3201 default:
3202 return 0;
3206 /* Return 1 iff it is necessary to swap operands of commutative operation
3207 in order to canonicalize expression. */
3209 bool
3210 swap_commutative_operands_p (rtx x, rtx y)
3212 return (commutative_operand_precedence (x)
3213 < commutative_operand_precedence (y));
3216 /* Return 1 if X is an autoincrement side effect and the register is
3217 not the stack pointer. */
3219 auto_inc_p (const_rtx x)
3221 switch (GET_CODE (x))
3223 case PRE_INC:
3224 case POST_INC:
3225 case PRE_DEC:
3226 case POST_DEC:
3227 case PRE_MODIFY:
3228 case POST_MODIFY:
3229 /* There are no REG_INC notes for SP. */
3230 if (XEXP (x, 0) != stack_pointer_rtx)
3231 return 1;
3232 default:
3233 break;
3235 return 0;
3238 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3240 loc_mentioned_in_p (rtx *loc, const_rtx in)
3242 enum rtx_code code;
3243 const char *fmt;
3244 int i, j;
3246 if (!in)
3247 return 0;
3249 code = GET_CODE (in);
3250 fmt = GET_RTX_FORMAT (code);
3251 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3253 if (fmt[i] == 'e')
3255 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3256 return 1;
3258 else if (fmt[i] == 'E')
3259 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3260 if (loc == &XVECEXP (in, i, j)
3261 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3262 return 1;
3264 return 0;
3267 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3268 and SUBREG_BYTE, return the bit offset where the subreg begins
3269 (counting from the least significant bit of the operand). */
3271 unsigned int
3272 subreg_lsb_1 (machine_mode outer_mode,
3273 machine_mode inner_mode,
3274 unsigned int subreg_byte)
3276 unsigned int bitpos;
3277 unsigned int byte;
3278 unsigned int word;
3280 /* A paradoxical subreg begins at bit position 0. */
3281 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3282 return 0;
3284 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3285 /* If the subreg crosses a word boundary ensure that
3286 it also begins and ends on a word boundary. */
3287 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3288 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3289 && (subreg_byte % UNITS_PER_WORD
3290 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3292 if (WORDS_BIG_ENDIAN)
3293 word = (GET_MODE_SIZE (inner_mode)
3294 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3295 else
3296 word = subreg_byte / UNITS_PER_WORD;
3297 bitpos = word * BITS_PER_WORD;
3299 if (BYTES_BIG_ENDIAN)
3300 byte = (GET_MODE_SIZE (inner_mode)
3301 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3302 else
3303 byte = subreg_byte % UNITS_PER_WORD;
3304 bitpos += byte * BITS_PER_UNIT;
3306 return bitpos;
3309 /* Given a subreg X, return the bit offset where the subreg begins
3310 (counting from the least significant bit of the reg). */
3312 unsigned int
3313 subreg_lsb (const_rtx x)
3315 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3316 SUBREG_BYTE (x));
3319 /* Fill in information about a subreg of a hard register.
3320 xregno - A regno of an inner hard subreg_reg (or what will become one).
3321 xmode - The mode of xregno.
3322 offset - The byte offset.
3323 ymode - The mode of a top level SUBREG (or what may become one).
3324 info - Pointer to structure to fill in.
3326 Rather than considering one particular inner register (and thus one
3327 particular "outer" register) in isolation, this function really uses
3328 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3329 function does not check whether adding INFO->offset to XREGNO gives
3330 a valid hard register; even if INFO->offset + XREGNO is out of range,
3331 there might be another register of the same type that is in range.
3332 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3333 register, since that can depend on things like whether the final
3334 register number is even or odd. Callers that want to check whether
3335 this particular subreg can be replaced by a simple (reg ...) should
3336 use simplify_subreg_regno. */
3338 void
3339 subreg_get_info (unsigned int xregno, machine_mode xmode,
3340 unsigned int offset, machine_mode ymode,
3341 struct subreg_info *info)
3343 int nregs_xmode, nregs_ymode;
3344 int mode_multiple, nregs_multiple;
3345 int offset_adj, y_offset, y_offset_adj;
3346 int regsize_xmode, regsize_ymode;
3347 bool rknown;
3349 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3351 rknown = false;
3353 /* If there are holes in a non-scalar mode in registers, we expect
3354 that it is made up of its units concatenated together. */
3355 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3357 machine_mode xmode_unit;
3359 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3360 if (GET_MODE_INNER (xmode) == VOIDmode)
3361 xmode_unit = xmode;
3362 else
3363 xmode_unit = GET_MODE_INNER (xmode);
3364 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3365 gcc_assert (nregs_xmode
3366 == (GET_MODE_NUNITS (xmode)
3367 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3368 gcc_assert (hard_regno_nregs[xregno][xmode]
3369 == (hard_regno_nregs[xregno][xmode_unit]
3370 * GET_MODE_NUNITS (xmode)));
3372 /* You can only ask for a SUBREG of a value with holes in the middle
3373 if you don't cross the holes. (Such a SUBREG should be done by
3374 picking a different register class, or doing it in memory if
3375 necessary.) An example of a value with holes is XCmode on 32-bit
3376 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3377 3 for each part, but in memory it's two 128-bit parts.
3378 Padding is assumed to be at the end (not necessarily the 'high part')
3379 of each unit. */
3380 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3381 < GET_MODE_NUNITS (xmode))
3382 && (offset / GET_MODE_SIZE (xmode_unit)
3383 != ((offset + GET_MODE_SIZE (ymode) - 1)
3384 / GET_MODE_SIZE (xmode_unit))))
3386 info->representable_p = false;
3387 rknown = true;
3390 else
3391 nregs_xmode = hard_regno_nregs[xregno][xmode];
3393 nregs_ymode = hard_regno_nregs[xregno][ymode];
3395 /* Paradoxical subregs are otherwise valid. */
3396 if (!rknown
3397 && offset == 0
3398 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3400 info->representable_p = true;
3401 /* If this is a big endian paradoxical subreg, which uses more
3402 actual hard registers than the original register, we must
3403 return a negative offset so that we find the proper highpart
3404 of the register. */
3405 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3406 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3407 info->offset = nregs_xmode - nregs_ymode;
3408 else
3409 info->offset = 0;
3410 info->nregs = nregs_ymode;
3411 return;
3414 /* If registers store different numbers of bits in the different
3415 modes, we cannot generally form this subreg. */
3416 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3417 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3418 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3419 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3421 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3422 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3423 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3425 info->representable_p = false;
3426 info->nregs
3427 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3428 info->offset = offset / regsize_xmode;
3429 return;
3431 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3433 info->representable_p = false;
3434 info->nregs
3435 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3436 info->offset = offset / regsize_xmode;
3437 return;
3439 /* Quick exit for the simple and common case of extracting whole
3440 subregisters from a multiregister value. */
3441 /* ??? It would be better to integrate this into the code below,
3442 if we can generalize the concept enough and figure out how
3443 odd-sized modes can coexist with the other weird cases we support. */
3444 if (!rknown
3445 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3446 && regsize_xmode == regsize_ymode
3447 && (offset % regsize_ymode) == 0)
3449 info->representable_p = true;
3450 info->nregs = nregs_ymode;
3451 info->offset = offset / regsize_ymode;
3452 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3453 return;
3457 /* Lowpart subregs are otherwise valid. */
3458 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3460 info->representable_p = true;
3461 rknown = true;
3463 if (offset == 0 || nregs_xmode == nregs_ymode)
3465 info->offset = 0;
3466 info->nregs = nregs_ymode;
3467 return;
3471 /* This should always pass, otherwise we don't know how to verify
3472 the constraint. These conditions may be relaxed but
3473 subreg_regno_offset would need to be redesigned. */
3474 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3475 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3477 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3478 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3480 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3481 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3482 HOST_WIDE_INT off_low = offset & (ysize - 1);
3483 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3484 offset = (xsize - ysize - off_high) | off_low;
3486 /* The XMODE value can be seen as a vector of NREGS_XMODE
3487 values. The subreg must represent a lowpart of given field.
3488 Compute what field it is. */
3489 offset_adj = offset;
3490 offset_adj -= subreg_lowpart_offset (ymode,
3491 mode_for_size (GET_MODE_BITSIZE (xmode)
3492 / nregs_xmode,
3493 MODE_INT, 0));
3495 /* Size of ymode must not be greater than the size of xmode. */
3496 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3497 gcc_assert (mode_multiple != 0);
3499 y_offset = offset / GET_MODE_SIZE (ymode);
3500 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3501 nregs_multiple = nregs_xmode / nregs_ymode;
3503 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3504 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3506 if (!rknown)
3508 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3509 rknown = true;
3511 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3512 info->nregs = nregs_ymode;
3515 /* This function returns the regno offset of a subreg expression.
3516 xregno - A regno of an inner hard subreg_reg (or what will become one).
3517 xmode - The mode of xregno.
3518 offset - The byte offset.
3519 ymode - The mode of a top level SUBREG (or what may become one).
3520 RETURN - The regno offset which would be used. */
3521 unsigned int
3522 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3523 unsigned int offset, machine_mode ymode)
3525 struct subreg_info info;
3526 subreg_get_info (xregno, xmode, offset, ymode, &info);
3527 return info.offset;
3530 /* This function returns true when the offset is representable via
3531 subreg_offset in the given regno.
3532 xregno - A regno of an inner hard subreg_reg (or what will become one).
3533 xmode - The mode of xregno.
3534 offset - The byte offset.
3535 ymode - The mode of a top level SUBREG (or what may become one).
3536 RETURN - Whether the offset is representable. */
3537 bool
3538 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3539 unsigned int offset, machine_mode ymode)
3541 struct subreg_info info;
3542 subreg_get_info (xregno, xmode, offset, ymode, &info);
3543 return info.representable_p;
3546 /* Return the number of a YMODE register to which
3548 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3550 can be simplified. Return -1 if the subreg can't be simplified.
3552 XREGNO is a hard register number. */
3555 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3556 unsigned int offset, machine_mode ymode)
3558 struct subreg_info info;
3559 unsigned int yregno;
3561 #ifdef CANNOT_CHANGE_MODE_CLASS
3562 /* Give the backend a chance to disallow the mode change. */
3563 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3564 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3565 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3566 /* We can use mode change in LRA for some transformations. */
3567 && ! lra_in_progress)
3568 return -1;
3569 #endif
3571 /* We shouldn't simplify stack-related registers. */
3572 if ((!reload_completed || frame_pointer_needed)
3573 && xregno == FRAME_POINTER_REGNUM)
3574 return -1;
3576 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3577 && xregno == ARG_POINTER_REGNUM)
3578 return -1;
3580 if (xregno == STACK_POINTER_REGNUM
3581 /* We should convert hard stack register in LRA if it is
3582 possible. */
3583 && ! lra_in_progress)
3584 return -1;
3586 /* Try to get the register offset. */
3587 subreg_get_info (xregno, xmode, offset, ymode, &info);
3588 if (!info.representable_p)
3589 return -1;
3591 /* Make sure that the offsetted register value is in range. */
3592 yregno = xregno + info.offset;
3593 if (!HARD_REGISTER_NUM_P (yregno))
3594 return -1;
3596 /* See whether (reg:YMODE YREGNO) is valid.
3598 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3599 This is a kludge to work around how complex FP arguments are passed
3600 on IA-64 and should be fixed. See PR target/49226. */
3601 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3602 && HARD_REGNO_MODE_OK (xregno, xmode))
3603 return -1;
3605 return (int) yregno;
3608 /* Return the final regno that a subreg expression refers to. */
3609 unsigned int
3610 subreg_regno (const_rtx x)
3612 unsigned int ret;
3613 rtx subreg = SUBREG_REG (x);
3614 int regno = REGNO (subreg);
3616 ret = regno + subreg_regno_offset (regno,
3617 GET_MODE (subreg),
3618 SUBREG_BYTE (x),
3619 GET_MODE (x));
3620 return ret;
3624 /* Return the number of registers that a subreg expression refers
3625 to. */
3626 unsigned int
3627 subreg_nregs (const_rtx x)
3629 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3632 /* Return the number of registers that a subreg REG with REGNO
3633 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3634 changed so that the regno can be passed in. */
3636 unsigned int
3637 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3639 struct subreg_info info;
3640 rtx subreg = SUBREG_REG (x);
3642 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3643 &info);
3644 return info.nregs;
3648 struct parms_set_data
3650 int nregs;
3651 HARD_REG_SET regs;
3654 /* Helper function for noticing stores to parameter registers. */
3655 static void
3656 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3658 struct parms_set_data *const d = (struct parms_set_data *) data;
3659 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3660 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3662 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3663 d->nregs--;
3667 /* Look backward for first parameter to be loaded.
3668 Note that loads of all parameters will not necessarily be
3669 found if CSE has eliminated some of them (e.g., an argument
3670 to the outer function is passed down as a parameter).
3671 Do not skip BOUNDARY. */
3672 rtx_insn *
3673 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3675 struct parms_set_data parm;
3676 rtx p;
3677 rtx_insn *before, *first_set;
3679 /* Since different machines initialize their parameter registers
3680 in different orders, assume nothing. Collect the set of all
3681 parameter registers. */
3682 CLEAR_HARD_REG_SET (parm.regs);
3683 parm.nregs = 0;
3684 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3685 if (GET_CODE (XEXP (p, 0)) == USE
3686 && REG_P (XEXP (XEXP (p, 0), 0)))
3688 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3690 /* We only care about registers which can hold function
3691 arguments. */
3692 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3693 continue;
3695 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3696 parm.nregs++;
3698 before = call_insn;
3699 first_set = call_insn;
3701 /* Search backward for the first set of a register in this set. */
3702 while (parm.nregs && before != boundary)
3704 before = PREV_INSN (before);
3706 /* It is possible that some loads got CSEed from one call to
3707 another. Stop in that case. */
3708 if (CALL_P (before))
3709 break;
3711 /* Our caller needs either ensure that we will find all sets
3712 (in case code has not been optimized yet), or take care
3713 for possible labels in a way by setting boundary to preceding
3714 CODE_LABEL. */
3715 if (LABEL_P (before))
3717 gcc_assert (before == boundary);
3718 break;
3721 if (INSN_P (before))
3723 int nregs_old = parm.nregs;
3724 note_stores (PATTERN (before), parms_set, &parm);
3725 /* If we found something that did not set a parameter reg,
3726 we're done. Do not keep going, as that might result
3727 in hoisting an insn before the setting of a pseudo
3728 that is used by the hoisted insn. */
3729 if (nregs_old != parm.nregs)
3730 first_set = before;
3731 else
3732 break;
3735 return first_set;
3738 /* Return true if we should avoid inserting code between INSN and preceding
3739 call instruction. */
3741 bool
3742 keep_with_call_p (const rtx_insn *insn)
3744 rtx set;
3746 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3748 if (REG_P (SET_DEST (set))
3749 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3750 && fixed_regs[REGNO (SET_DEST (set))]
3751 && general_operand (SET_SRC (set), VOIDmode))
3752 return true;
3753 if (REG_P (SET_SRC (set))
3754 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3755 && REG_P (SET_DEST (set))
3756 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3757 return true;
3758 /* There may be a stack pop just after the call and before the store
3759 of the return register. Search for the actual store when deciding
3760 if we can break or not. */
3761 if (SET_DEST (set) == stack_pointer_rtx)
3763 /* This CONST_CAST is okay because next_nonnote_insn just
3764 returns its argument and we assign it to a const_rtx
3765 variable. */
3766 const rtx_insn *i2
3767 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3768 if (i2 && keep_with_call_p (i2))
3769 return true;
3772 return false;
3775 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3776 to non-complex jumps. That is, direct unconditional, conditional,
3777 and tablejumps, but not computed jumps or returns. It also does
3778 not apply to the fallthru case of a conditional jump. */
3780 bool
3781 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3783 rtx tmp = JUMP_LABEL (jump_insn);
3784 rtx_jump_table_data *table;
3786 if (label == tmp)
3787 return true;
3789 if (tablejump_p (jump_insn, NULL, &table))
3791 rtvec vec = table->get_labels ();
3792 int i, veclen = GET_NUM_ELEM (vec);
3794 for (i = 0; i < veclen; ++i)
3795 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3796 return true;
3799 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3800 return true;
3802 return false;
3806 /* Return an estimate of the cost of computing rtx X.
3807 One use is in cse, to decide which expression to keep in the hash table.
3808 Another is in rtl generation, to pick the cheapest way to multiply.
3809 Other uses like the latter are expected in the future.
3811 X appears as operand OPNO in an expression with code OUTER_CODE.
3812 SPEED specifies whether costs optimized for speed or size should
3813 be returned. */
3816 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3818 int i, j;
3819 enum rtx_code code;
3820 const char *fmt;
3821 int total;
3822 int factor;
3824 if (x == 0)
3825 return 0;
3827 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3828 many insns, taking N times as long. */
3829 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3830 if (factor == 0)
3831 factor = 1;
3833 /* Compute the default costs of certain things.
3834 Note that targetm.rtx_costs can override the defaults. */
3836 code = GET_CODE (x);
3837 switch (code)
3839 case MULT:
3840 /* Multiplication has time-complexity O(N*N), where N is the
3841 number of units (translated from digits) when using
3842 schoolbook long multiplication. */
3843 total = factor * factor * COSTS_N_INSNS (5);
3844 break;
3845 case DIV:
3846 case UDIV:
3847 case MOD:
3848 case UMOD:
3849 /* Similarly, complexity for schoolbook long division. */
3850 total = factor * factor * COSTS_N_INSNS (7);
3851 break;
3852 case USE:
3853 /* Used in combine.c as a marker. */
3854 total = 0;
3855 break;
3856 case SET:
3857 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3858 the mode for the factor. */
3859 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3860 if (factor == 0)
3861 factor = 1;
3862 /* Pass through. */
3863 default:
3864 total = factor * COSTS_N_INSNS (1);
3867 switch (code)
3869 case REG:
3870 return 0;
3872 case SUBREG:
3873 total = 0;
3874 /* If we can't tie these modes, make this expensive. The larger
3875 the mode, the more expensive it is. */
3876 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3877 return COSTS_N_INSNS (2 + factor);
3878 break;
3880 default:
3881 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3882 return total;
3883 break;
3886 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3887 which is already in total. */
3889 fmt = GET_RTX_FORMAT (code);
3890 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3891 if (fmt[i] == 'e')
3892 total += rtx_cost (XEXP (x, i), code, i, speed);
3893 else if (fmt[i] == 'E')
3894 for (j = 0; j < XVECLEN (x, i); j++)
3895 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3897 return total;
3900 /* Fill in the structure C with information about both speed and size rtx
3901 costs for X, which is operand OPNO in an expression with code OUTER. */
3903 void
3904 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3905 struct full_rtx_costs *c)
3907 c->speed = rtx_cost (x, outer, opno, true);
3908 c->size = rtx_cost (x, outer, opno, false);
3912 /* Return cost of address expression X.
3913 Expect that X is properly formed address reference.
3915 SPEED parameter specify whether costs optimized for speed or size should
3916 be returned. */
3919 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
3921 /* We may be asked for cost of various unusual addresses, such as operands
3922 of push instruction. It is not worthwhile to complicate writing
3923 of the target hook by such cases. */
3925 if (!memory_address_addr_space_p (mode, x, as))
3926 return 1000;
3928 return targetm.address_cost (x, mode, as, speed);
3931 /* If the target doesn't override, compute the cost as with arithmetic. */
3934 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
3936 return rtx_cost (x, MEM, 0, speed);
3940 unsigned HOST_WIDE_INT
3941 nonzero_bits (const_rtx x, machine_mode mode)
3943 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3946 unsigned int
3947 num_sign_bit_copies (const_rtx x, machine_mode mode)
3949 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3952 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3953 It avoids exponential behavior in nonzero_bits1 when X has
3954 identical subexpressions on the first or the second level. */
3956 static unsigned HOST_WIDE_INT
3957 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
3958 machine_mode known_mode,
3959 unsigned HOST_WIDE_INT known_ret)
3961 if (x == known_x && mode == known_mode)
3962 return known_ret;
3964 /* Try to find identical subexpressions. If found call
3965 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3966 precomputed value for the subexpression as KNOWN_RET. */
3968 if (ARITHMETIC_P (x))
3970 rtx x0 = XEXP (x, 0);
3971 rtx x1 = XEXP (x, 1);
3973 /* Check the first level. */
3974 if (x0 == x1)
3975 return nonzero_bits1 (x, mode, x0, mode,
3976 cached_nonzero_bits (x0, mode, known_x,
3977 known_mode, known_ret));
3979 /* Check the second level. */
3980 if (ARITHMETIC_P (x0)
3981 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3982 return nonzero_bits1 (x, mode, x1, mode,
3983 cached_nonzero_bits (x1, mode, known_x,
3984 known_mode, known_ret));
3986 if (ARITHMETIC_P (x1)
3987 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3988 return nonzero_bits1 (x, mode, x0, mode,
3989 cached_nonzero_bits (x0, mode, known_x,
3990 known_mode, known_ret));
3993 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3996 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3997 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3998 is less useful. We can't allow both, because that results in exponential
3999 run time recursion. There is a nullstone testcase that triggered
4000 this. This macro avoids accidental uses of num_sign_bit_copies. */
4001 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4003 /* Given an expression, X, compute which bits in X can be nonzero.
4004 We don't care about bits outside of those defined in MODE.
4006 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4007 an arithmetic operation, we can do better. */
4009 static unsigned HOST_WIDE_INT
4010 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4011 machine_mode known_mode,
4012 unsigned HOST_WIDE_INT known_ret)
4014 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4015 unsigned HOST_WIDE_INT inner_nz;
4016 enum rtx_code code;
4017 machine_mode inner_mode;
4018 unsigned int mode_width = GET_MODE_PRECISION (mode);
4020 /* For floating-point and vector values, assume all bits are needed. */
4021 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4022 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4023 return nonzero;
4025 /* If X is wider than MODE, use its mode instead. */
4026 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4028 mode = GET_MODE (x);
4029 nonzero = GET_MODE_MASK (mode);
4030 mode_width = GET_MODE_PRECISION (mode);
4033 if (mode_width > HOST_BITS_PER_WIDE_INT)
4034 /* Our only callers in this case look for single bit values. So
4035 just return the mode mask. Those tests will then be false. */
4036 return nonzero;
4038 #ifndef WORD_REGISTER_OPERATIONS
4039 /* If MODE is wider than X, but both are a single word for both the host
4040 and target machines, we can compute this from which bits of the
4041 object might be nonzero in its own mode, taking into account the fact
4042 that on many CISC machines, accessing an object in a wider mode
4043 causes the high-order bits to become undefined. So they are
4044 not known to be zero. */
4046 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4047 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4048 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4049 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4051 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4052 known_x, known_mode, known_ret);
4053 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4054 return nonzero;
4056 #endif
4058 code = GET_CODE (x);
4059 switch (code)
4061 case REG:
4062 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4063 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4064 all the bits above ptr_mode are known to be zero. */
4065 /* As we do not know which address space the pointer is referring to,
4066 we can do this only if the target does not support different pointer
4067 or address modes depending on the address space. */
4068 if (target_default_pointer_address_modes_p ()
4069 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4070 && REG_POINTER (x))
4071 nonzero &= GET_MODE_MASK (ptr_mode);
4072 #endif
4074 /* Include declared information about alignment of pointers. */
4075 /* ??? We don't properly preserve REG_POINTER changes across
4076 pointer-to-integer casts, so we can't trust it except for
4077 things that we know must be pointers. See execute/960116-1.c. */
4078 if ((x == stack_pointer_rtx
4079 || x == frame_pointer_rtx
4080 || x == arg_pointer_rtx)
4081 && REGNO_POINTER_ALIGN (REGNO (x)))
4083 unsigned HOST_WIDE_INT alignment
4084 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4086 #ifdef PUSH_ROUNDING
4087 /* If PUSH_ROUNDING is defined, it is possible for the
4088 stack to be momentarily aligned only to that amount,
4089 so we pick the least alignment. */
4090 if (x == stack_pointer_rtx && PUSH_ARGS)
4091 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4092 alignment);
4093 #endif
4095 nonzero &= ~(alignment - 1);
4099 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4100 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4101 known_mode, known_ret,
4102 &nonzero_for_hook);
4104 if (new_rtx)
4105 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4106 known_mode, known_ret);
4108 return nonzero_for_hook;
4111 case CONST_INT:
4112 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4113 /* If X is negative in MODE, sign-extend the value. */
4114 if (INTVAL (x) > 0
4115 && mode_width < BITS_PER_WORD
4116 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4117 != 0)
4118 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4119 #endif
4121 return UINTVAL (x);
4123 case MEM:
4124 #ifdef LOAD_EXTEND_OP
4125 /* In many, if not most, RISC machines, reading a byte from memory
4126 zeros the rest of the register. Noticing that fact saves a lot
4127 of extra zero-extends. */
4128 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4129 nonzero &= GET_MODE_MASK (GET_MODE (x));
4130 #endif
4131 break;
4133 case EQ: case NE:
4134 case UNEQ: case LTGT:
4135 case GT: case GTU: case UNGT:
4136 case LT: case LTU: case UNLT:
4137 case GE: case GEU: case UNGE:
4138 case LE: case LEU: case UNLE:
4139 case UNORDERED: case ORDERED:
4140 /* If this produces an integer result, we know which bits are set.
4141 Code here used to clear bits outside the mode of X, but that is
4142 now done above. */
4143 /* Mind that MODE is the mode the caller wants to look at this
4144 operation in, and not the actual operation mode. We can wind
4145 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4146 that describes the results of a vector compare. */
4147 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4148 && mode_width <= HOST_BITS_PER_WIDE_INT)
4149 nonzero = STORE_FLAG_VALUE;
4150 break;
4152 case NEG:
4153 #if 0
4154 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4155 and num_sign_bit_copies. */
4156 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4157 == GET_MODE_PRECISION (GET_MODE (x)))
4158 nonzero = 1;
4159 #endif
4161 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4162 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4163 break;
4165 case ABS:
4166 #if 0
4167 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4168 and num_sign_bit_copies. */
4169 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4170 == GET_MODE_PRECISION (GET_MODE (x)))
4171 nonzero = 1;
4172 #endif
4173 break;
4175 case TRUNCATE:
4176 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4177 known_x, known_mode, known_ret)
4178 & GET_MODE_MASK (mode));
4179 break;
4181 case ZERO_EXTEND:
4182 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4183 known_x, known_mode, known_ret);
4184 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4185 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4186 break;
4188 case SIGN_EXTEND:
4189 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4190 Otherwise, show all the bits in the outer mode but not the inner
4191 may be nonzero. */
4192 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4193 known_x, known_mode, known_ret);
4194 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4196 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4197 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4198 inner_nz |= (GET_MODE_MASK (mode)
4199 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4202 nonzero &= inner_nz;
4203 break;
4205 case AND:
4206 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4207 known_x, known_mode, known_ret)
4208 & cached_nonzero_bits (XEXP (x, 1), mode,
4209 known_x, known_mode, known_ret);
4210 break;
4212 case XOR: case IOR:
4213 case UMIN: case UMAX: case SMIN: case SMAX:
4215 unsigned HOST_WIDE_INT nonzero0
4216 = cached_nonzero_bits (XEXP (x, 0), mode,
4217 known_x, known_mode, known_ret);
4219 /* Don't call nonzero_bits for the second time if it cannot change
4220 anything. */
4221 if ((nonzero & nonzero0) != nonzero)
4222 nonzero &= nonzero0
4223 | cached_nonzero_bits (XEXP (x, 1), mode,
4224 known_x, known_mode, known_ret);
4226 break;
4228 case PLUS: case MINUS:
4229 case MULT:
4230 case DIV: case UDIV:
4231 case MOD: case UMOD:
4232 /* We can apply the rules of arithmetic to compute the number of
4233 high- and low-order zero bits of these operations. We start by
4234 computing the width (position of the highest-order nonzero bit)
4235 and the number of low-order zero bits for each value. */
4237 unsigned HOST_WIDE_INT nz0
4238 = cached_nonzero_bits (XEXP (x, 0), mode,
4239 known_x, known_mode, known_ret);
4240 unsigned HOST_WIDE_INT nz1
4241 = cached_nonzero_bits (XEXP (x, 1), mode,
4242 known_x, known_mode, known_ret);
4243 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4244 int width0 = floor_log2 (nz0) + 1;
4245 int width1 = floor_log2 (nz1) + 1;
4246 int low0 = floor_log2 (nz0 & -nz0);
4247 int low1 = floor_log2 (nz1 & -nz1);
4248 unsigned HOST_WIDE_INT op0_maybe_minusp
4249 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4250 unsigned HOST_WIDE_INT op1_maybe_minusp
4251 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4252 unsigned int result_width = mode_width;
4253 int result_low = 0;
4255 switch (code)
4257 case PLUS:
4258 result_width = MAX (width0, width1) + 1;
4259 result_low = MIN (low0, low1);
4260 break;
4261 case MINUS:
4262 result_low = MIN (low0, low1);
4263 break;
4264 case MULT:
4265 result_width = width0 + width1;
4266 result_low = low0 + low1;
4267 break;
4268 case DIV:
4269 if (width1 == 0)
4270 break;
4271 if (!op0_maybe_minusp && !op1_maybe_minusp)
4272 result_width = width0;
4273 break;
4274 case UDIV:
4275 if (width1 == 0)
4276 break;
4277 result_width = width0;
4278 break;
4279 case MOD:
4280 if (width1 == 0)
4281 break;
4282 if (!op0_maybe_minusp && !op1_maybe_minusp)
4283 result_width = MIN (width0, width1);
4284 result_low = MIN (low0, low1);
4285 break;
4286 case UMOD:
4287 if (width1 == 0)
4288 break;
4289 result_width = MIN (width0, width1);
4290 result_low = MIN (low0, low1);
4291 break;
4292 default:
4293 gcc_unreachable ();
4296 if (result_width < mode_width)
4297 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4299 if (result_low > 0)
4300 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4302 break;
4304 case ZERO_EXTRACT:
4305 if (CONST_INT_P (XEXP (x, 1))
4306 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4307 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4308 break;
4310 case SUBREG:
4311 /* If this is a SUBREG formed for a promoted variable that has
4312 been zero-extended, we know that at least the high-order bits
4313 are zero, though others might be too. */
4315 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4316 nonzero = GET_MODE_MASK (GET_MODE (x))
4317 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4318 known_x, known_mode, known_ret);
4320 inner_mode = GET_MODE (SUBREG_REG (x));
4321 /* If the inner mode is a single word for both the host and target
4322 machines, we can compute this from which bits of the inner
4323 object might be nonzero. */
4324 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4325 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4327 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4328 known_x, known_mode, known_ret);
4330 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4331 /* If this is a typical RISC machine, we only have to worry
4332 about the way loads are extended. */
4333 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4334 ? val_signbit_known_set_p (inner_mode, nonzero)
4335 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4336 || !MEM_P (SUBREG_REG (x)))
4337 #endif
4339 /* On many CISC machines, accessing an object in a wider mode
4340 causes the high-order bits to become undefined. So they are
4341 not known to be zero. */
4342 if (GET_MODE_PRECISION (GET_MODE (x))
4343 > GET_MODE_PRECISION (inner_mode))
4344 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4345 & ~GET_MODE_MASK (inner_mode));
4348 break;
4350 case ASHIFTRT:
4351 case LSHIFTRT:
4352 case ASHIFT:
4353 case ROTATE:
4354 /* The nonzero bits are in two classes: any bits within MODE
4355 that aren't in GET_MODE (x) are always significant. The rest of the
4356 nonzero bits are those that are significant in the operand of
4357 the shift when shifted the appropriate number of bits. This
4358 shows that high-order bits are cleared by the right shift and
4359 low-order bits by left shifts. */
4360 if (CONST_INT_P (XEXP (x, 1))
4361 && INTVAL (XEXP (x, 1)) >= 0
4362 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4363 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4365 machine_mode inner_mode = GET_MODE (x);
4366 unsigned int width = GET_MODE_PRECISION (inner_mode);
4367 int count = INTVAL (XEXP (x, 1));
4368 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4369 unsigned HOST_WIDE_INT op_nonzero
4370 = cached_nonzero_bits (XEXP (x, 0), mode,
4371 known_x, known_mode, known_ret);
4372 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4373 unsigned HOST_WIDE_INT outer = 0;
4375 if (mode_width > width)
4376 outer = (op_nonzero & nonzero & ~mode_mask);
4378 if (code == LSHIFTRT)
4379 inner >>= count;
4380 else if (code == ASHIFTRT)
4382 inner >>= count;
4384 /* If the sign bit may have been nonzero before the shift, we
4385 need to mark all the places it could have been copied to
4386 by the shift as possibly nonzero. */
4387 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4388 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4389 << (width - count);
4391 else if (code == ASHIFT)
4392 inner <<= count;
4393 else
4394 inner = ((inner << (count % width)
4395 | (inner >> (width - (count % width)))) & mode_mask);
4397 nonzero &= (outer | inner);
4399 break;
4401 case FFS:
4402 case POPCOUNT:
4403 /* This is at most the number of bits in the mode. */
4404 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4405 break;
4407 case CLZ:
4408 /* If CLZ has a known value at zero, then the nonzero bits are
4409 that value, plus the number of bits in the mode minus one. */
4410 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4411 nonzero
4412 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4413 else
4414 nonzero = -1;
4415 break;
4417 case CTZ:
4418 /* If CTZ has a known value at zero, then the nonzero bits are
4419 that value, plus the number of bits in the mode minus one. */
4420 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4421 nonzero
4422 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4423 else
4424 nonzero = -1;
4425 break;
4427 case CLRSB:
4428 /* This is at most the number of bits in the mode minus 1. */
4429 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4430 break;
4432 case PARITY:
4433 nonzero = 1;
4434 break;
4436 case IF_THEN_ELSE:
4438 unsigned HOST_WIDE_INT nonzero_true
4439 = cached_nonzero_bits (XEXP (x, 1), mode,
4440 known_x, known_mode, known_ret);
4442 /* Don't call nonzero_bits for the second time if it cannot change
4443 anything. */
4444 if ((nonzero & nonzero_true) != nonzero)
4445 nonzero &= nonzero_true
4446 | cached_nonzero_bits (XEXP (x, 2), mode,
4447 known_x, known_mode, known_ret);
4449 break;
4451 default:
4452 break;
4455 return nonzero;
4458 /* See the macro definition above. */
4459 #undef cached_num_sign_bit_copies
4462 /* The function cached_num_sign_bit_copies is a wrapper around
4463 num_sign_bit_copies1. It avoids exponential behavior in
4464 num_sign_bit_copies1 when X has identical subexpressions on the
4465 first or the second level. */
4467 static unsigned int
4468 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4469 machine_mode known_mode,
4470 unsigned int known_ret)
4472 if (x == known_x && mode == known_mode)
4473 return known_ret;
4475 /* Try to find identical subexpressions. If found call
4476 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4477 the precomputed value for the subexpression as KNOWN_RET. */
4479 if (ARITHMETIC_P (x))
4481 rtx x0 = XEXP (x, 0);
4482 rtx x1 = XEXP (x, 1);
4484 /* Check the first level. */
4485 if (x0 == x1)
4486 return
4487 num_sign_bit_copies1 (x, mode, x0, mode,
4488 cached_num_sign_bit_copies (x0, mode, known_x,
4489 known_mode,
4490 known_ret));
4492 /* Check the second level. */
4493 if (ARITHMETIC_P (x0)
4494 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4495 return
4496 num_sign_bit_copies1 (x, mode, x1, mode,
4497 cached_num_sign_bit_copies (x1, mode, known_x,
4498 known_mode,
4499 known_ret));
4501 if (ARITHMETIC_P (x1)
4502 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4503 return
4504 num_sign_bit_copies1 (x, mode, x0, mode,
4505 cached_num_sign_bit_copies (x0, mode, known_x,
4506 known_mode,
4507 known_ret));
4510 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4513 /* Return the number of bits at the high-order end of X that are known to
4514 be equal to the sign bit. X will be used in mode MODE; if MODE is
4515 VOIDmode, X will be used in its own mode. The returned value will always
4516 be between 1 and the number of bits in MODE. */
4518 static unsigned int
4519 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4520 machine_mode known_mode,
4521 unsigned int known_ret)
4523 enum rtx_code code = GET_CODE (x);
4524 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4525 int num0, num1, result;
4526 unsigned HOST_WIDE_INT nonzero;
4528 /* If we weren't given a mode, use the mode of X. If the mode is still
4529 VOIDmode, we don't know anything. Likewise if one of the modes is
4530 floating-point. */
4532 if (mode == VOIDmode)
4533 mode = GET_MODE (x);
4535 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4536 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4537 return 1;
4539 /* For a smaller object, just ignore the high bits. */
4540 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4542 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4543 known_x, known_mode, known_ret);
4544 return MAX (1,
4545 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4548 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4550 #ifndef WORD_REGISTER_OPERATIONS
4551 /* If this machine does not do all register operations on the entire
4552 register and MODE is wider than the mode of X, we can say nothing
4553 at all about the high-order bits. */
4554 return 1;
4555 #else
4556 /* Likewise on machines that do, if the mode of the object is smaller
4557 than a word and loads of that size don't sign extend, we can say
4558 nothing about the high order bits. */
4559 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4560 #ifdef LOAD_EXTEND_OP
4561 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4562 #endif
4564 return 1;
4565 #endif
4568 switch (code)
4570 case REG:
4572 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4573 /* If pointers extend signed and this is a pointer in Pmode, say that
4574 all the bits above ptr_mode are known to be sign bit copies. */
4575 /* As we do not know which address space the pointer is referring to,
4576 we can do this only if the target does not support different pointer
4577 or address modes depending on the address space. */
4578 if (target_default_pointer_address_modes_p ()
4579 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4580 && mode == Pmode && REG_POINTER (x))
4581 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4582 #endif
4585 unsigned int copies_for_hook = 1, copies = 1;
4586 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4587 known_mode, known_ret,
4588 &copies_for_hook);
4590 if (new_rtx)
4591 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4592 known_mode, known_ret);
4594 if (copies > 1 || copies_for_hook > 1)
4595 return MAX (copies, copies_for_hook);
4597 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4599 break;
4601 case MEM:
4602 #ifdef LOAD_EXTEND_OP
4603 /* Some RISC machines sign-extend all loads of smaller than a word. */
4604 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4605 return MAX (1, ((int) bitwidth
4606 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4607 #endif
4608 break;
4610 case CONST_INT:
4611 /* If the constant is negative, take its 1's complement and remask.
4612 Then see how many zero bits we have. */
4613 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4614 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4615 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4616 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4618 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4620 case SUBREG:
4621 /* If this is a SUBREG for a promoted object that is sign-extended
4622 and we are looking at it in a wider mode, we know that at least the
4623 high-order bits are known to be sign bit copies. */
4625 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4627 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4628 known_x, known_mode, known_ret);
4629 return MAX ((int) bitwidth
4630 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4631 num0);
4634 /* For a smaller object, just ignore the high bits. */
4635 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4637 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4638 known_x, known_mode, known_ret);
4639 return MAX (1, (num0
4640 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4641 - bitwidth)));
4644 #ifdef WORD_REGISTER_OPERATIONS
4645 #ifdef LOAD_EXTEND_OP
4646 /* For paradoxical SUBREGs on machines where all register operations
4647 affect the entire register, just look inside. Note that we are
4648 passing MODE to the recursive call, so the number of sign bit copies
4649 will remain relative to that mode, not the inner mode. */
4651 /* This works only if loads sign extend. Otherwise, if we get a
4652 reload for the inner part, it may be loaded from the stack, and
4653 then we lose all sign bit copies that existed before the store
4654 to the stack. */
4656 if (paradoxical_subreg_p (x)
4657 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4658 && MEM_P (SUBREG_REG (x)))
4659 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4660 known_x, known_mode, known_ret);
4661 #endif
4662 #endif
4663 break;
4665 case SIGN_EXTRACT:
4666 if (CONST_INT_P (XEXP (x, 1)))
4667 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4668 break;
4670 case SIGN_EXTEND:
4671 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4672 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4673 known_x, known_mode, known_ret));
4675 case TRUNCATE:
4676 /* For a smaller object, just ignore the high bits. */
4677 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4678 known_x, known_mode, known_ret);
4679 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4680 - bitwidth)));
4682 case NOT:
4683 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4684 known_x, known_mode, known_ret);
4686 case ROTATE: case ROTATERT:
4687 /* If we are rotating left by a number of bits less than the number
4688 of sign bit copies, we can just subtract that amount from the
4689 number. */
4690 if (CONST_INT_P (XEXP (x, 1))
4691 && INTVAL (XEXP (x, 1)) >= 0
4692 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4694 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4695 known_x, known_mode, known_ret);
4696 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4697 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4699 break;
4701 case NEG:
4702 /* In general, this subtracts one sign bit copy. But if the value
4703 is known to be positive, the number of sign bit copies is the
4704 same as that of the input. Finally, if the input has just one bit
4705 that might be nonzero, all the bits are copies of the sign bit. */
4706 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4707 known_x, known_mode, known_ret);
4708 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4709 return num0 > 1 ? num0 - 1 : 1;
4711 nonzero = nonzero_bits (XEXP (x, 0), mode);
4712 if (nonzero == 1)
4713 return bitwidth;
4715 if (num0 > 1
4716 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4717 num0--;
4719 return num0;
4721 case IOR: case AND: case XOR:
4722 case SMIN: case SMAX: case UMIN: case UMAX:
4723 /* Logical operations will preserve the number of sign-bit copies.
4724 MIN and MAX operations always return one of the operands. */
4725 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4726 known_x, known_mode, known_ret);
4727 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4728 known_x, known_mode, known_ret);
4730 /* If num1 is clearing some of the top bits then regardless of
4731 the other term, we are guaranteed to have at least that many
4732 high-order zero bits. */
4733 if (code == AND
4734 && num1 > 1
4735 && bitwidth <= HOST_BITS_PER_WIDE_INT
4736 && CONST_INT_P (XEXP (x, 1))
4737 && (UINTVAL (XEXP (x, 1))
4738 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4739 return num1;
4741 /* Similarly for IOR when setting high-order bits. */
4742 if (code == IOR
4743 && num1 > 1
4744 && bitwidth <= HOST_BITS_PER_WIDE_INT
4745 && CONST_INT_P (XEXP (x, 1))
4746 && (UINTVAL (XEXP (x, 1))
4747 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4748 return num1;
4750 return MIN (num0, num1);
4752 case PLUS: case MINUS:
4753 /* For addition and subtraction, we can have a 1-bit carry. However,
4754 if we are subtracting 1 from a positive number, there will not
4755 be such a carry. Furthermore, if the positive number is known to
4756 be 0 or 1, we know the result is either -1 or 0. */
4758 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4759 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4761 nonzero = nonzero_bits (XEXP (x, 0), mode);
4762 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4763 return (nonzero == 1 || nonzero == 0 ? bitwidth
4764 : bitwidth - floor_log2 (nonzero) - 1);
4767 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4768 known_x, known_mode, known_ret);
4769 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4770 known_x, known_mode, known_ret);
4771 result = MAX (1, MIN (num0, num1) - 1);
4773 return result;
4775 case MULT:
4776 /* The number of bits of the product is the sum of the number of
4777 bits of both terms. However, unless one of the terms if known
4778 to be positive, we must allow for an additional bit since negating
4779 a negative number can remove one sign bit copy. */
4781 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4782 known_x, known_mode, known_ret);
4783 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4784 known_x, known_mode, known_ret);
4786 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4787 if (result > 0
4788 && (bitwidth > HOST_BITS_PER_WIDE_INT
4789 || (((nonzero_bits (XEXP (x, 0), mode)
4790 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4791 && ((nonzero_bits (XEXP (x, 1), mode)
4792 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4793 != 0))))
4794 result--;
4796 return MAX (1, result);
4798 case UDIV:
4799 /* The result must be <= the first operand. If the first operand
4800 has the high bit set, we know nothing about the number of sign
4801 bit copies. */
4802 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4803 return 1;
4804 else if ((nonzero_bits (XEXP (x, 0), mode)
4805 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4806 return 1;
4807 else
4808 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4809 known_x, known_mode, known_ret);
4811 case UMOD:
4812 /* The result must be <= the second operand. If the second operand
4813 has (or just might have) the high bit set, we know nothing about
4814 the number of sign bit copies. */
4815 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4816 return 1;
4817 else if ((nonzero_bits (XEXP (x, 1), mode)
4818 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4819 return 1;
4820 else
4821 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4822 known_x, known_mode, known_ret);
4824 case DIV:
4825 /* Similar to unsigned division, except that we have to worry about
4826 the case where the divisor is negative, in which case we have
4827 to add 1. */
4828 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4829 known_x, known_mode, known_ret);
4830 if (result > 1
4831 && (bitwidth > HOST_BITS_PER_WIDE_INT
4832 || (nonzero_bits (XEXP (x, 1), mode)
4833 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4834 result--;
4836 return result;
4838 case MOD:
4839 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4840 known_x, known_mode, known_ret);
4841 if (result > 1
4842 && (bitwidth > HOST_BITS_PER_WIDE_INT
4843 || (nonzero_bits (XEXP (x, 1), mode)
4844 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4845 result--;
4847 return result;
4849 case ASHIFTRT:
4850 /* Shifts by a constant add to the number of bits equal to the
4851 sign bit. */
4852 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4853 known_x, known_mode, known_ret);
4854 if (CONST_INT_P (XEXP (x, 1))
4855 && INTVAL (XEXP (x, 1)) > 0
4856 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4857 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4859 return num0;
4861 case ASHIFT:
4862 /* Left shifts destroy copies. */
4863 if (!CONST_INT_P (XEXP (x, 1))
4864 || INTVAL (XEXP (x, 1)) < 0
4865 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4866 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4867 return 1;
4869 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4870 known_x, known_mode, known_ret);
4871 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4873 case IF_THEN_ELSE:
4874 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4875 known_x, known_mode, known_ret);
4876 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4877 known_x, known_mode, known_ret);
4878 return MIN (num0, num1);
4880 case EQ: case NE: case GE: case GT: case LE: case LT:
4881 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4882 case GEU: case GTU: case LEU: case LTU:
4883 case UNORDERED: case ORDERED:
4884 /* If the constant is negative, take its 1's complement and remask.
4885 Then see how many zero bits we have. */
4886 nonzero = STORE_FLAG_VALUE;
4887 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4888 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4889 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4891 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4893 default:
4894 break;
4897 /* If we haven't been able to figure it out by one of the above rules,
4898 see if some of the high-order bits are known to be zero. If so,
4899 count those bits and return one less than that amount. If we can't
4900 safely compute the mask for this mode, always return BITWIDTH. */
4902 bitwidth = GET_MODE_PRECISION (mode);
4903 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4904 return 1;
4906 nonzero = nonzero_bits (x, mode);
4907 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4908 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4911 /* Calculate the rtx_cost of a single instruction. A return value of
4912 zero indicates an instruction pattern without a known cost. */
4915 insn_rtx_cost (rtx pat, bool speed)
4917 int i, cost;
4918 rtx set;
4920 /* Extract the single set rtx from the instruction pattern.
4921 We can't use single_set since we only have the pattern. */
4922 if (GET_CODE (pat) == SET)
4923 set = pat;
4924 else if (GET_CODE (pat) == PARALLEL)
4926 set = NULL_RTX;
4927 for (i = 0; i < XVECLEN (pat, 0); i++)
4929 rtx x = XVECEXP (pat, 0, i);
4930 if (GET_CODE (x) == SET)
4932 if (set)
4933 return 0;
4934 set = x;
4937 if (!set)
4938 return 0;
4940 else
4941 return 0;
4943 cost = set_src_cost (SET_SRC (set), speed);
4944 return cost > 0 ? cost : COSTS_N_INSNS (1);
4947 /* Returns estimate on cost of computing SEQ. */
4949 unsigned
4950 seq_cost (const rtx_insn *seq, bool speed)
4952 unsigned cost = 0;
4953 rtx set;
4955 for (; seq; seq = NEXT_INSN (seq))
4957 set = single_set (seq);
4958 if (set)
4959 cost += set_rtx_cost (set, speed);
4960 else
4961 cost++;
4964 return cost;
4967 /* Given an insn INSN and condition COND, return the condition in a
4968 canonical form to simplify testing by callers. Specifically:
4970 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4971 (2) Both operands will be machine operands; (cc0) will have been replaced.
4972 (3) If an operand is a constant, it will be the second operand.
4973 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4974 for GE, GEU, and LEU.
4976 If the condition cannot be understood, or is an inequality floating-point
4977 comparison which needs to be reversed, 0 will be returned.
4979 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4981 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4982 insn used in locating the condition was found. If a replacement test
4983 of the condition is desired, it should be placed in front of that
4984 insn and we will be sure that the inputs are still valid.
4986 If WANT_REG is nonzero, we wish the condition to be relative to that
4987 register, if possible. Therefore, do not canonicalize the condition
4988 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4989 to be a compare to a CC mode register.
4991 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4992 and at INSN. */
4995 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
4996 rtx_insn **earliest,
4997 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4999 enum rtx_code code;
5000 rtx_insn *prev = insn;
5001 const_rtx set;
5002 rtx tem;
5003 rtx op0, op1;
5004 int reverse_code = 0;
5005 machine_mode mode;
5006 basic_block bb = BLOCK_FOR_INSN (insn);
5008 code = GET_CODE (cond);
5009 mode = GET_MODE (cond);
5010 op0 = XEXP (cond, 0);
5011 op1 = XEXP (cond, 1);
5013 if (reverse)
5014 code = reversed_comparison_code (cond, insn);
5015 if (code == UNKNOWN)
5016 return 0;
5018 if (earliest)
5019 *earliest = insn;
5021 /* If we are comparing a register with zero, see if the register is set
5022 in the previous insn to a COMPARE or a comparison operation. Perform
5023 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5024 in cse.c */
5026 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5027 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5028 && op1 == CONST0_RTX (GET_MODE (op0))
5029 && op0 != want_reg)
5031 /* Set nonzero when we find something of interest. */
5032 rtx x = 0;
5034 /* If comparison with cc0, import actual comparison from compare
5035 insn. */
5036 if (op0 == cc0_rtx)
5038 if ((prev = prev_nonnote_insn (prev)) == 0
5039 || !NONJUMP_INSN_P (prev)
5040 || (set = single_set (prev)) == 0
5041 || SET_DEST (set) != cc0_rtx)
5042 return 0;
5044 op0 = SET_SRC (set);
5045 op1 = CONST0_RTX (GET_MODE (op0));
5046 if (earliest)
5047 *earliest = prev;
5050 /* If this is a COMPARE, pick up the two things being compared. */
5051 if (GET_CODE (op0) == COMPARE)
5053 op1 = XEXP (op0, 1);
5054 op0 = XEXP (op0, 0);
5055 continue;
5057 else if (!REG_P (op0))
5058 break;
5060 /* Go back to the previous insn. Stop if it is not an INSN. We also
5061 stop if it isn't a single set or if it has a REG_INC note because
5062 we don't want to bother dealing with it. */
5064 prev = prev_nonnote_nondebug_insn (prev);
5066 if (prev == 0
5067 || !NONJUMP_INSN_P (prev)
5068 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5069 /* In cfglayout mode, there do not have to be labels at the
5070 beginning of a block, or jumps at the end, so the previous
5071 conditions would not stop us when we reach bb boundary. */
5072 || BLOCK_FOR_INSN (prev) != bb)
5073 break;
5075 set = set_of (op0, prev);
5077 if (set
5078 && (GET_CODE (set) != SET
5079 || !rtx_equal_p (SET_DEST (set), op0)))
5080 break;
5082 /* If this is setting OP0, get what it sets it to if it looks
5083 relevant. */
5084 if (set)
5086 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5087 #ifdef FLOAT_STORE_FLAG_VALUE
5088 REAL_VALUE_TYPE fsfv;
5089 #endif
5091 /* ??? We may not combine comparisons done in a CCmode with
5092 comparisons not done in a CCmode. This is to aid targets
5093 like Alpha that have an IEEE compliant EQ instruction, and
5094 a non-IEEE compliant BEQ instruction. The use of CCmode is
5095 actually artificial, simply to prevent the combination, but
5096 should not affect other platforms.
5098 However, we must allow VOIDmode comparisons to match either
5099 CCmode or non-CCmode comparison, because some ports have
5100 modeless comparisons inside branch patterns.
5102 ??? This mode check should perhaps look more like the mode check
5103 in simplify_comparison in combine. */
5104 if (((GET_MODE_CLASS (mode) == MODE_CC)
5105 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5106 && mode != VOIDmode
5107 && inner_mode != VOIDmode)
5108 break;
5109 if (GET_CODE (SET_SRC (set)) == COMPARE
5110 || (((code == NE
5111 || (code == LT
5112 && val_signbit_known_set_p (inner_mode,
5113 STORE_FLAG_VALUE))
5114 #ifdef FLOAT_STORE_FLAG_VALUE
5115 || (code == LT
5116 && SCALAR_FLOAT_MODE_P (inner_mode)
5117 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5118 REAL_VALUE_NEGATIVE (fsfv)))
5119 #endif
5121 && COMPARISON_P (SET_SRC (set))))
5122 x = SET_SRC (set);
5123 else if (((code == EQ
5124 || (code == GE
5125 && val_signbit_known_set_p (inner_mode,
5126 STORE_FLAG_VALUE))
5127 #ifdef FLOAT_STORE_FLAG_VALUE
5128 || (code == GE
5129 && SCALAR_FLOAT_MODE_P (inner_mode)
5130 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5131 REAL_VALUE_NEGATIVE (fsfv)))
5132 #endif
5134 && COMPARISON_P (SET_SRC (set)))
5136 reverse_code = 1;
5137 x = SET_SRC (set);
5139 else if ((code == EQ || code == NE)
5140 && GET_CODE (SET_SRC (set)) == XOR)
5141 /* Handle sequences like:
5143 (set op0 (xor X Y))
5144 ...(eq|ne op0 (const_int 0))...
5146 in which case:
5148 (eq op0 (const_int 0)) reduces to (eq X Y)
5149 (ne op0 (const_int 0)) reduces to (ne X Y)
5151 This is the form used by MIPS16, for example. */
5152 x = SET_SRC (set);
5153 else
5154 break;
5157 else if (reg_set_p (op0, prev))
5158 /* If this sets OP0, but not directly, we have to give up. */
5159 break;
5161 if (x)
5163 /* If the caller is expecting the condition to be valid at INSN,
5164 make sure X doesn't change before INSN. */
5165 if (valid_at_insn_p)
5166 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5167 break;
5168 if (COMPARISON_P (x))
5169 code = GET_CODE (x);
5170 if (reverse_code)
5172 code = reversed_comparison_code (x, prev);
5173 if (code == UNKNOWN)
5174 return 0;
5175 reverse_code = 0;
5178 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5179 if (earliest)
5180 *earliest = prev;
5184 /* If constant is first, put it last. */
5185 if (CONSTANT_P (op0))
5186 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5188 /* If OP0 is the result of a comparison, we weren't able to find what
5189 was really being compared, so fail. */
5190 if (!allow_cc_mode
5191 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5192 return 0;
5194 /* Canonicalize any ordered comparison with integers involving equality
5195 if we can do computations in the relevant mode and we do not
5196 overflow. */
5198 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5199 && CONST_INT_P (op1)
5200 && GET_MODE (op0) != VOIDmode
5201 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5203 HOST_WIDE_INT const_val = INTVAL (op1);
5204 unsigned HOST_WIDE_INT uconst_val = const_val;
5205 unsigned HOST_WIDE_INT max_val
5206 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5208 switch (code)
5210 case LE:
5211 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5212 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5213 break;
5215 /* When cross-compiling, const_val might be sign-extended from
5216 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5217 case GE:
5218 if ((const_val & max_val)
5219 != ((unsigned HOST_WIDE_INT) 1
5220 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5221 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5222 break;
5224 case LEU:
5225 if (uconst_val < max_val)
5226 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5227 break;
5229 case GEU:
5230 if (uconst_val != 0)
5231 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5232 break;
5234 default:
5235 break;
5239 /* Never return CC0; return zero instead. */
5240 if (CC0_P (op0))
5241 return 0;
5243 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5246 /* Given a jump insn JUMP, return the condition that will cause it to branch
5247 to its JUMP_LABEL. If the condition cannot be understood, or is an
5248 inequality floating-point comparison which needs to be reversed, 0 will
5249 be returned.
5251 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5252 insn used in locating the condition was found. If a replacement test
5253 of the condition is desired, it should be placed in front of that
5254 insn and we will be sure that the inputs are still valid. If EARLIEST
5255 is null, the returned condition will be valid at INSN.
5257 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5258 compare CC mode register.
5260 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5263 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5264 int valid_at_insn_p)
5266 rtx cond;
5267 int reverse;
5268 rtx set;
5270 /* If this is not a standard conditional jump, we can't parse it. */
5271 if (!JUMP_P (jump)
5272 || ! any_condjump_p (jump))
5273 return 0;
5274 set = pc_set (jump);
5276 cond = XEXP (SET_SRC (set), 0);
5278 /* If this branches to JUMP_LABEL when the condition is false, reverse
5279 the condition. */
5280 reverse
5281 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5282 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5284 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5285 allow_cc_mode, valid_at_insn_p);
5288 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5289 TARGET_MODE_REP_EXTENDED.
5291 Note that we assume that the property of
5292 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5293 narrower than mode B. I.e., if A is a mode narrower than B then in
5294 order to be able to operate on it in mode B, mode A needs to
5295 satisfy the requirements set by the representation of mode B. */
5297 static void
5298 init_num_sign_bit_copies_in_rep (void)
5300 machine_mode mode, in_mode;
5302 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5303 in_mode = GET_MODE_WIDER_MODE (mode))
5304 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5305 mode = GET_MODE_WIDER_MODE (mode))
5307 machine_mode i;
5309 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5310 extends to the next widest mode. */
5311 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5312 || GET_MODE_WIDER_MODE (mode) == in_mode);
5314 /* We are in in_mode. Count how many bits outside of mode
5315 have to be copies of the sign-bit. */
5316 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5318 machine_mode wider = GET_MODE_WIDER_MODE (i);
5320 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5321 /* We can only check sign-bit copies starting from the
5322 top-bit. In order to be able to check the bits we
5323 have already seen we pretend that subsequent bits
5324 have to be sign-bit copies too. */
5325 || num_sign_bit_copies_in_rep [in_mode][mode])
5326 num_sign_bit_copies_in_rep [in_mode][mode]
5327 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5332 /* Suppose that truncation from the machine mode of X to MODE is not a
5333 no-op. See if there is anything special about X so that we can
5334 assume it already contains a truncated value of MODE. */
5336 bool
5337 truncated_to_mode (machine_mode mode, const_rtx x)
5339 /* This register has already been used in MODE without explicit
5340 truncation. */
5341 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5342 return true;
5344 /* See if we already satisfy the requirements of MODE. If yes we
5345 can just switch to MODE. */
5346 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5347 && (num_sign_bit_copies (x, GET_MODE (x))
5348 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5349 return true;
5351 return false;
5354 /* Return true if RTX code CODE has a single sequence of zero or more
5355 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5356 entry in that case. */
5358 static bool
5359 setup_reg_subrtx_bounds (unsigned int code)
5361 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5362 unsigned int i = 0;
5363 for (; format[i] != 'e'; ++i)
5365 if (!format[i])
5366 /* No subrtxes. Leave start and count as 0. */
5367 return true;
5368 if (format[i] == 'E' || format[i] == 'V')
5369 return false;
5372 /* Record the sequence of 'e's. */
5373 rtx_all_subrtx_bounds[code].start = i;
5375 ++i;
5376 while (format[i] == 'e');
5377 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5378 /* rtl-iter.h relies on this. */
5379 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5381 for (; format[i]; ++i)
5382 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5383 return false;
5385 return true;
5388 /* Initialize rtx_all_subrtx_bounds. */
5389 void
5390 init_rtlanal (void)
5392 int i;
5393 for (i = 0; i < NUM_RTX_CODE; i++)
5395 if (!setup_reg_subrtx_bounds (i))
5396 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5397 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5398 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5401 init_num_sign_bit_copies_in_rep ();
5404 /* Check whether this is a constant pool constant. */
5405 bool
5406 constant_pool_constant_p (rtx x)
5408 x = avoid_constant_pool_reference (x);
5409 return CONST_DOUBLE_P (x);
5412 /* If M is a bitmask that selects a field of low-order bits within an item but
5413 not the entire word, return the length of the field. Return -1 otherwise.
5414 M is used in machine mode MODE. */
5417 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5419 if (mode != VOIDmode)
5421 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5422 return -1;
5423 m &= GET_MODE_MASK (mode);
5426 return exact_log2 (m + 1);
5429 /* Return the mode of MEM's address. */
5431 machine_mode
5432 get_address_mode (rtx mem)
5434 machine_mode mode;
5436 gcc_assert (MEM_P (mem));
5437 mode = GET_MODE (XEXP (mem, 0));
5438 if (mode != VOIDmode)
5439 return mode;
5440 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5443 /* Split up a CONST_DOUBLE or integer constant rtx
5444 into two rtx's for single words,
5445 storing in *FIRST the word that comes first in memory in the target
5446 and in *SECOND the other.
5448 TODO: This function needs to be rewritten to work on any size
5449 integer. */
5451 void
5452 split_double (rtx value, rtx *first, rtx *second)
5454 if (CONST_INT_P (value))
5456 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5458 /* In this case the CONST_INT holds both target words.
5459 Extract the bits from it into two word-sized pieces.
5460 Sign extend each half to HOST_WIDE_INT. */
5461 unsigned HOST_WIDE_INT low, high;
5462 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5463 unsigned bits_per_word = BITS_PER_WORD;
5465 /* Set sign_bit to the most significant bit of a word. */
5466 sign_bit = 1;
5467 sign_bit <<= bits_per_word - 1;
5469 /* Set mask so that all bits of the word are set. We could
5470 have used 1 << BITS_PER_WORD instead of basing the
5471 calculation on sign_bit. However, on machines where
5472 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5473 compiler warning, even though the code would never be
5474 executed. */
5475 mask = sign_bit << 1;
5476 mask--;
5478 /* Set sign_extend as any remaining bits. */
5479 sign_extend = ~mask;
5481 /* Pick the lower word and sign-extend it. */
5482 low = INTVAL (value);
5483 low &= mask;
5484 if (low & sign_bit)
5485 low |= sign_extend;
5487 /* Pick the higher word, shifted to the least significant
5488 bits, and sign-extend it. */
5489 high = INTVAL (value);
5490 high >>= bits_per_word - 1;
5491 high >>= 1;
5492 high &= mask;
5493 if (high & sign_bit)
5494 high |= sign_extend;
5496 /* Store the words in the target machine order. */
5497 if (WORDS_BIG_ENDIAN)
5499 *first = GEN_INT (high);
5500 *second = GEN_INT (low);
5502 else
5504 *first = GEN_INT (low);
5505 *second = GEN_INT (high);
5508 else
5510 /* The rule for using CONST_INT for a wider mode
5511 is that we regard the value as signed.
5512 So sign-extend it. */
5513 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5514 if (WORDS_BIG_ENDIAN)
5516 *first = high;
5517 *second = value;
5519 else
5521 *first = value;
5522 *second = high;
5526 else if (GET_CODE (value) == CONST_WIDE_INT)
5528 /* All of this is scary code and needs to be converted to
5529 properly work with any size integer. */
5530 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5531 if (WORDS_BIG_ENDIAN)
5533 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5534 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5536 else
5538 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5539 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5542 else if (!CONST_DOUBLE_P (value))
5544 if (WORDS_BIG_ENDIAN)
5546 *first = const0_rtx;
5547 *second = value;
5549 else
5551 *first = value;
5552 *second = const0_rtx;
5555 else if (GET_MODE (value) == VOIDmode
5556 /* This is the old way we did CONST_DOUBLE integers. */
5557 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5559 /* In an integer, the words are defined as most and least significant.
5560 So order them by the target's convention. */
5561 if (WORDS_BIG_ENDIAN)
5563 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5564 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5566 else
5568 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5569 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5572 else
5574 REAL_VALUE_TYPE r;
5575 long l[2];
5576 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5578 /* Note, this converts the REAL_VALUE_TYPE to the target's
5579 format, splits up the floating point double and outputs
5580 exactly 32 bits of it into each of l[0] and l[1] --
5581 not necessarily BITS_PER_WORD bits. */
5582 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5584 /* If 32 bits is an entire word for the target, but not for the host,
5585 then sign-extend on the host so that the number will look the same
5586 way on the host that it would on the target. See for instance
5587 simplify_unary_operation. The #if is needed to avoid compiler
5588 warnings. */
5590 #if HOST_BITS_PER_LONG > 32
5591 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5593 if (l[0] & ((long) 1 << 31))
5594 l[0] |= ((long) (-1) << 32);
5595 if (l[1] & ((long) 1 << 31))
5596 l[1] |= ((long) (-1) << 32);
5598 #endif
5600 *first = GEN_INT (l[0]);
5601 *second = GEN_INT (l[1]);
5605 /* Return true if X is a sign_extract or zero_extract from the least
5606 significant bit. */
5608 static bool
5609 lsb_bitfield_op_p (rtx x)
5611 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5613 machine_mode mode = GET_MODE (XEXP (x, 0));
5614 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5615 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5617 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5619 return false;
5622 /* Strip outer address "mutations" from LOC and return a pointer to the
5623 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5624 stripped expression there.
5626 "Mutations" either convert between modes or apply some kind of
5627 extension, truncation or alignment. */
5629 rtx *
5630 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5632 for (;;)
5634 enum rtx_code code = GET_CODE (*loc);
5635 if (GET_RTX_CLASS (code) == RTX_UNARY)
5636 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5637 used to convert between pointer sizes. */
5638 loc = &XEXP (*loc, 0);
5639 else if (lsb_bitfield_op_p (*loc))
5640 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5641 acts as a combined truncation and extension. */
5642 loc = &XEXP (*loc, 0);
5643 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5644 /* (and ... (const_int -X)) is used to align to X bytes. */
5645 loc = &XEXP (*loc, 0);
5646 else if (code == SUBREG
5647 && !OBJECT_P (SUBREG_REG (*loc))
5648 && subreg_lowpart_p (*loc))
5649 /* (subreg (operator ...) ...) inside and is used for mode
5650 conversion too. */
5651 loc = &SUBREG_REG (*loc);
5652 else
5653 return loc;
5654 if (outer_code)
5655 *outer_code = code;
5659 /* Return true if CODE applies some kind of scale. The scaled value is
5660 is the first operand and the scale is the second. */
5662 static bool
5663 binary_scale_code_p (enum rtx_code code)
5665 return (code == MULT
5666 || code == ASHIFT
5667 /* Needed by ARM targets. */
5668 || code == ASHIFTRT
5669 || code == LSHIFTRT
5670 || code == ROTATE
5671 || code == ROTATERT);
5674 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5675 (see address_info). Return null otherwise. */
5677 static rtx *
5678 get_base_term (rtx *inner)
5680 if (GET_CODE (*inner) == LO_SUM)
5681 inner = strip_address_mutations (&XEXP (*inner, 0));
5682 if (REG_P (*inner)
5683 || MEM_P (*inner)
5684 || GET_CODE (*inner) == SUBREG
5685 || GET_CODE (*inner) == SCRATCH)
5686 return inner;
5687 return 0;
5690 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5691 (see address_info). Return null otherwise. */
5693 static rtx *
5694 get_index_term (rtx *inner)
5696 /* At present, only constant scales are allowed. */
5697 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5698 inner = strip_address_mutations (&XEXP (*inner, 0));
5699 if (REG_P (*inner)
5700 || MEM_P (*inner)
5701 || GET_CODE (*inner) == SUBREG
5702 || GET_CODE (*inner) == SCRATCH)
5703 return inner;
5704 return 0;
5707 /* Set the segment part of address INFO to LOC, given that INNER is the
5708 unmutated value. */
5710 static void
5711 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5713 gcc_assert (!info->segment);
5714 info->segment = loc;
5715 info->segment_term = inner;
5718 /* Set the base part of address INFO to LOC, given that INNER is the
5719 unmutated value. */
5721 static void
5722 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5724 gcc_assert (!info->base);
5725 info->base = loc;
5726 info->base_term = inner;
5729 /* Set the index part of address INFO to LOC, given that INNER is the
5730 unmutated value. */
5732 static void
5733 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5735 gcc_assert (!info->index);
5736 info->index = loc;
5737 info->index_term = inner;
5740 /* Set the displacement part of address INFO to LOC, given that INNER
5741 is the constant term. */
5743 static void
5744 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5746 gcc_assert (!info->disp);
5747 info->disp = loc;
5748 info->disp_term = inner;
5751 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5752 rest of INFO accordingly. */
5754 static void
5755 decompose_incdec_address (struct address_info *info)
5757 info->autoinc_p = true;
5759 rtx *base = &XEXP (*info->inner, 0);
5760 set_address_base (info, base, base);
5761 gcc_checking_assert (info->base == info->base_term);
5763 /* These addresses are only valid when the size of the addressed
5764 value is known. */
5765 gcc_checking_assert (info->mode != VOIDmode);
5768 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5769 of INFO accordingly. */
5771 static void
5772 decompose_automod_address (struct address_info *info)
5774 info->autoinc_p = true;
5776 rtx *base = &XEXP (*info->inner, 0);
5777 set_address_base (info, base, base);
5778 gcc_checking_assert (info->base == info->base_term);
5780 rtx plus = XEXP (*info->inner, 1);
5781 gcc_assert (GET_CODE (plus) == PLUS);
5783 info->base_term2 = &XEXP (plus, 0);
5784 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5786 rtx *step = &XEXP (plus, 1);
5787 rtx *inner_step = strip_address_mutations (step);
5788 if (CONSTANT_P (*inner_step))
5789 set_address_disp (info, step, inner_step);
5790 else
5791 set_address_index (info, step, inner_step);
5794 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5795 values in [PTR, END). Return a pointer to the end of the used array. */
5797 static rtx **
5798 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5800 rtx x = *loc;
5801 if (GET_CODE (x) == PLUS)
5803 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5804 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5806 else
5808 gcc_assert (ptr != end);
5809 *ptr++ = loc;
5811 return ptr;
5814 /* Evaluate the likelihood of X being a base or index value, returning
5815 positive if it is likely to be a base, negative if it is likely to be
5816 an index, and 0 if we can't tell. Make the magnitude of the return
5817 value reflect the amount of confidence we have in the answer.
5819 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5821 static int
5822 baseness (rtx x, machine_mode mode, addr_space_t as,
5823 enum rtx_code outer_code, enum rtx_code index_code)
5825 /* Believe *_POINTER unless the address shape requires otherwise. */
5826 if (REG_P (x) && REG_POINTER (x))
5827 return 2;
5828 if (MEM_P (x) && MEM_POINTER (x))
5829 return 2;
5831 if (REG_P (x) && HARD_REGISTER_P (x))
5833 /* X is a hard register. If it only fits one of the base
5834 or index classes, choose that interpretation. */
5835 int regno = REGNO (x);
5836 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5837 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5838 if (base_p != index_p)
5839 return base_p ? 1 : -1;
5841 return 0;
5844 /* INFO->INNER describes a normal, non-automodified address.
5845 Fill in the rest of INFO accordingly. */
5847 static void
5848 decompose_normal_address (struct address_info *info)
5850 /* Treat the address as the sum of up to four values. */
5851 rtx *ops[4];
5852 size_t n_ops = extract_plus_operands (info->inner, ops,
5853 ops + ARRAY_SIZE (ops)) - ops;
5855 /* If there is more than one component, any base component is in a PLUS. */
5856 if (n_ops > 1)
5857 info->base_outer_code = PLUS;
5859 /* Try to classify each sum operand now. Leave those that could be
5860 either a base or an index in OPS. */
5861 rtx *inner_ops[4];
5862 size_t out = 0;
5863 for (size_t in = 0; in < n_ops; ++in)
5865 rtx *loc = ops[in];
5866 rtx *inner = strip_address_mutations (loc);
5867 if (CONSTANT_P (*inner))
5868 set_address_disp (info, loc, inner);
5869 else if (GET_CODE (*inner) == UNSPEC)
5870 set_address_segment (info, loc, inner);
5871 else
5873 /* The only other possibilities are a base or an index. */
5874 rtx *base_term = get_base_term (inner);
5875 rtx *index_term = get_index_term (inner);
5876 gcc_assert (base_term || index_term);
5877 if (!base_term)
5878 set_address_index (info, loc, index_term);
5879 else if (!index_term)
5880 set_address_base (info, loc, base_term);
5881 else
5883 gcc_assert (base_term == index_term);
5884 ops[out] = loc;
5885 inner_ops[out] = base_term;
5886 ++out;
5891 /* Classify the remaining OPS members as bases and indexes. */
5892 if (out == 1)
5894 /* If we haven't seen a base or an index yet, assume that this is
5895 the base. If we were confident that another term was the base
5896 or index, treat the remaining operand as the other kind. */
5897 if (!info->base)
5898 set_address_base (info, ops[0], inner_ops[0]);
5899 else
5900 set_address_index (info, ops[0], inner_ops[0]);
5902 else if (out == 2)
5904 /* In the event of a tie, assume the base comes first. */
5905 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5906 GET_CODE (*ops[1]))
5907 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5908 GET_CODE (*ops[0])))
5910 set_address_base (info, ops[0], inner_ops[0]);
5911 set_address_index (info, ops[1], inner_ops[1]);
5913 else
5915 set_address_base (info, ops[1], inner_ops[1]);
5916 set_address_index (info, ops[0], inner_ops[0]);
5919 else
5920 gcc_assert (out == 0);
5923 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5924 or VOIDmode if not known. AS is the address space associated with LOC.
5925 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5927 void
5928 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
5929 addr_space_t as, enum rtx_code outer_code)
5931 memset (info, 0, sizeof (*info));
5932 info->mode = mode;
5933 info->as = as;
5934 info->addr_outer_code = outer_code;
5935 info->outer = loc;
5936 info->inner = strip_address_mutations (loc, &outer_code);
5937 info->base_outer_code = outer_code;
5938 switch (GET_CODE (*info->inner))
5940 case PRE_DEC:
5941 case PRE_INC:
5942 case POST_DEC:
5943 case POST_INC:
5944 decompose_incdec_address (info);
5945 break;
5947 case PRE_MODIFY:
5948 case POST_MODIFY:
5949 decompose_automod_address (info);
5950 break;
5952 default:
5953 decompose_normal_address (info);
5954 break;
5958 /* Describe address operand LOC in INFO. */
5960 void
5961 decompose_lea_address (struct address_info *info, rtx *loc)
5963 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5966 /* Describe the address of MEM X in INFO. */
5968 void
5969 decompose_mem_address (struct address_info *info, rtx x)
5971 gcc_assert (MEM_P (x));
5972 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5973 MEM_ADDR_SPACE (x), MEM);
5976 /* Update INFO after a change to the address it describes. */
5978 void
5979 update_address (struct address_info *info)
5981 decompose_address (info, info->outer, info->mode, info->as,
5982 info->addr_outer_code);
5985 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5986 more complicated than that. */
5988 HOST_WIDE_INT
5989 get_index_scale (const struct address_info *info)
5991 rtx index = *info->index;
5992 if (GET_CODE (index) == MULT
5993 && CONST_INT_P (XEXP (index, 1))
5994 && info->index_term == &XEXP (index, 0))
5995 return INTVAL (XEXP (index, 1));
5997 if (GET_CODE (index) == ASHIFT
5998 && CONST_INT_P (XEXP (index, 1))
5999 && info->index_term == &XEXP (index, 0))
6000 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6002 if (info->index == info->index_term)
6003 return 1;
6005 return 0;
6008 /* Return the "index code" of INFO, in the form required by
6009 ok_for_base_p_1. */
6011 enum rtx_code
6012 get_index_code (const struct address_info *info)
6014 if (info->index)
6015 return GET_CODE (*info->index);
6017 if (info->disp)
6018 return GET_CODE (*info->disp);
6020 return SCRATCH;
6023 /* Return true if X contains a thread-local symbol. */
6025 bool
6026 tls_referenced_p (const_rtx x)
6028 if (!targetm.have_tls)
6029 return false;
6031 subrtx_iterator::array_type array;
6032 FOR_EACH_SUBRTX (iter, array, x, ALL)
6033 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6034 return true;
6035 return false;