2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / rtlanal.c
blob56873365856af9ba3dde6af26a112751aa203635
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "input.h"
36 #include "function.h"
37 #include "predict.h"
38 #include "basic-block.h"
39 #include "df.h"
40 #include "symtab.h"
41 #include "tree.h"
42 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
43 #include "addresses.h"
44 #include "rtl-iter.h"
46 /* Forward declarations */
47 static void set_of_1 (rtx, const_rtx, void *);
48 static bool covers_regno_p (const_rtx, unsigned int);
49 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
50 static int computed_jump_p_1 (const_rtx);
51 static void parms_set (rtx, const_rtx, void *);
53 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
54 const_rtx, machine_mode,
55 unsigned HOST_WIDE_INT);
56 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
57 const_rtx, machine_mode,
58 unsigned HOST_WIDE_INT);
59 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
60 machine_mode,
61 unsigned int);
62 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
63 machine_mode, unsigned int);
65 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
66 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
68 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
69 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
70 SIGN_EXTEND then while narrowing we also have to enforce the
71 representation and sign-extend the value to mode DESTINATION_REP.
73 If the value is already sign-extended to DESTINATION_REP mode we
74 can just switch to DESTINATION mode on it. For each pair of
75 integral modes SOURCE and DESTINATION, when truncating from SOURCE
76 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
77 contains the number of high-order bits in SOURCE that have to be
78 copies of the sign-bit so that we can do this mode-switch to
79 DESTINATION. */
81 static unsigned int
82 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
84 /* Store X into index I of ARRAY. ARRAY is known to have at least I
85 elements. Return the new base of ARRAY. */
87 template <typename T>
88 typename T::value_type *
89 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
90 value_type *base,
91 size_t i, value_type x)
93 if (base == array.stack)
95 if (i < LOCAL_ELEMS)
97 base[i] = x;
98 return base;
100 gcc_checking_assert (i == LOCAL_ELEMS);
101 /* A previous iteration might also have moved from the stack to the
102 heap, in which case the heap array will already be big enough. */
103 if (vec_safe_length (array.heap) <= i)
104 vec_safe_grow (array.heap, i + 1);
105 base = array.heap->address ();
106 memcpy (base, array.stack, sizeof (array.stack));
107 base[LOCAL_ELEMS] = x;
108 return base;
110 unsigned int length = array.heap->length ();
111 if (length > i)
113 gcc_checking_assert (base == array.heap->address ());
114 base[i] = x;
115 return base;
117 else
119 gcc_checking_assert (i == length);
120 vec_safe_push (array.heap, x);
121 return array.heap->address ();
125 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
126 number of elements added to the worklist. */
128 template <typename T>
129 size_t
130 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
131 value_type *base,
132 size_t end, rtx_type x)
134 enum rtx_code code = GET_CODE (x);
135 const char *format = GET_RTX_FORMAT (code);
136 size_t orig_end = end;
137 if (__builtin_expect (INSN_P (x), false))
139 /* Put the pattern at the top of the queue, since that's what
140 we're likely to want most. It also allows for the SEQUENCE
141 code below. */
142 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
143 if (format[i] == 'e')
145 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
146 if (__builtin_expect (end < LOCAL_ELEMS, true))
147 base[end++] = subx;
148 else
149 base = add_single_to_queue (array, base, end++, subx);
152 else
153 for (int i = 0; format[i]; ++i)
154 if (format[i] == 'e')
156 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
157 if (__builtin_expect (end < LOCAL_ELEMS, true))
158 base[end++] = subx;
159 else
160 base = add_single_to_queue (array, base, end++, subx);
162 else if (format[i] == 'E')
164 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
165 rtx *vec = x->u.fld[i].rt_rtvec->elem;
166 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
167 for (unsigned int j = 0; j < length; j++)
168 base[end++] = T::get_value (vec[j]);
169 else
170 for (unsigned int j = 0; j < length; j++)
171 base = add_single_to_queue (array, base, end++,
172 T::get_value (vec[j]));
173 if (code == SEQUENCE && end == length)
174 /* If the subrtxes of the sequence fill the entire array then
175 we know that no other parts of a containing insn are queued.
176 The caller is therefore iterating over the sequence as a
177 PATTERN (...), so we also want the patterns of the
178 subinstructions. */
179 for (unsigned int j = 0; j < length; j++)
181 typename T::rtx_type x = T::get_rtx (base[j]);
182 if (INSN_P (x))
183 base[j] = T::get_value (PATTERN (x));
186 return end - orig_end;
189 template <typename T>
190 void
191 generic_subrtx_iterator <T>::free_array (array_type &array)
193 vec_free (array.heap);
196 template <typename T>
197 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
199 template class generic_subrtx_iterator <const_rtx_accessor>;
200 template class generic_subrtx_iterator <rtx_var_accessor>;
201 template class generic_subrtx_iterator <rtx_ptr_accessor>;
203 /* Return 1 if the value of X is unstable
204 (would be different at a different point in the program).
205 The frame pointer, arg pointer, etc. are considered stable
206 (within one function) and so is anything marked `unchanging'. */
209 rtx_unstable_p (const_rtx x)
211 const RTX_CODE code = GET_CODE (x);
212 int i;
213 const char *fmt;
215 switch (code)
217 case MEM:
218 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
220 case CONST:
221 CASE_CONST_ANY:
222 case SYMBOL_REF:
223 case LABEL_REF:
224 return 0;
226 case REG:
227 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
228 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
229 /* The arg pointer varies if it is not a fixed register. */
230 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
231 return 0;
232 /* ??? When call-clobbered, the value is stable modulo the restore
233 that must happen after a call. This currently screws up local-alloc
234 into believing that the restore is not needed. */
235 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
236 return 0;
237 return 1;
239 case ASM_OPERANDS:
240 if (MEM_VOLATILE_P (x))
241 return 1;
243 /* Fall through. */
245 default:
246 break;
249 fmt = GET_RTX_FORMAT (code);
250 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
251 if (fmt[i] == 'e')
253 if (rtx_unstable_p (XEXP (x, i)))
254 return 1;
256 else if (fmt[i] == 'E')
258 int j;
259 for (j = 0; j < XVECLEN (x, i); j++)
260 if (rtx_unstable_p (XVECEXP (x, i, j)))
261 return 1;
264 return 0;
267 /* Return 1 if X has a value that can vary even between two
268 executions of the program. 0 means X can be compared reliably
269 against certain constants or near-constants.
270 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
271 zero, we are slightly more conservative.
272 The frame pointer and the arg pointer are considered constant. */
274 bool
275 rtx_varies_p (const_rtx x, bool for_alias)
277 RTX_CODE code;
278 int i;
279 const char *fmt;
281 if (!x)
282 return 0;
284 code = GET_CODE (x);
285 switch (code)
287 case MEM:
288 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
290 case CONST:
291 CASE_CONST_ANY:
292 case SYMBOL_REF:
293 case LABEL_REF:
294 return 0;
296 case REG:
297 /* Note that we have to test for the actual rtx used for the frame
298 and arg pointers and not just the register number in case we have
299 eliminated the frame and/or arg pointer and are using it
300 for pseudos. */
301 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
302 /* The arg pointer varies if it is not a fixed register. */
303 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
304 return 0;
305 if (x == pic_offset_table_rtx
306 /* ??? When call-clobbered, the value is stable modulo the restore
307 that must happen after a call. This currently screws up
308 local-alloc into believing that the restore is not needed, so we
309 must return 0 only if we are called from alias analysis. */
310 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
311 return 0;
312 return 1;
314 case LO_SUM:
315 /* The operand 0 of a LO_SUM is considered constant
316 (in fact it is related specifically to operand 1)
317 during alias analysis. */
318 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
319 || rtx_varies_p (XEXP (x, 1), for_alias);
321 case ASM_OPERANDS:
322 if (MEM_VOLATILE_P (x))
323 return 1;
325 /* Fall through. */
327 default:
328 break;
331 fmt = GET_RTX_FORMAT (code);
332 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
333 if (fmt[i] == 'e')
335 if (rtx_varies_p (XEXP (x, i), for_alias))
336 return 1;
338 else if (fmt[i] == 'E')
340 int j;
341 for (j = 0; j < XVECLEN (x, i); j++)
342 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
343 return 1;
346 return 0;
349 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
350 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
351 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
352 references on strict alignment machines. */
354 static int
355 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
356 machine_mode mode, bool unaligned_mems)
358 enum rtx_code code = GET_CODE (x);
360 /* The offset must be a multiple of the mode size if we are considering
361 unaligned memory references on strict alignment machines. */
362 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
364 HOST_WIDE_INT actual_offset = offset;
366 #ifdef SPARC_STACK_BOUNDARY_HACK
367 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
368 the real alignment of %sp. However, when it does this, the
369 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
370 if (SPARC_STACK_BOUNDARY_HACK
371 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
372 actual_offset -= STACK_POINTER_OFFSET;
373 #endif
375 if (actual_offset % GET_MODE_SIZE (mode) != 0)
376 return 1;
379 switch (code)
381 case SYMBOL_REF:
382 if (SYMBOL_REF_WEAK (x))
383 return 1;
384 if (!CONSTANT_POOL_ADDRESS_P (x))
386 tree decl;
387 HOST_WIDE_INT decl_size;
389 if (offset < 0)
390 return 1;
391 if (size == 0)
392 size = GET_MODE_SIZE (mode);
393 if (size == 0)
394 return offset != 0;
396 /* If the size of the access or of the symbol is unknown,
397 assume the worst. */
398 decl = SYMBOL_REF_DECL (x);
400 /* Else check that the access is in bounds. TODO: restructure
401 expr_size/tree_expr_size/int_expr_size and just use the latter. */
402 if (!decl)
403 decl_size = -1;
404 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
405 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
406 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
407 : -1);
408 else if (TREE_CODE (decl) == STRING_CST)
409 decl_size = TREE_STRING_LENGTH (decl);
410 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
411 decl_size = int_size_in_bytes (TREE_TYPE (decl));
412 else
413 decl_size = -1;
415 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
418 return 0;
420 case LABEL_REF:
421 return 0;
423 case REG:
424 /* Stack references are assumed not to trap, but we need to deal with
425 nonsensical offsets. */
426 if (x == frame_pointer_rtx)
428 HOST_WIDE_INT adj_offset = offset - STARTING_FRAME_OFFSET;
429 if (size == 0)
430 size = GET_MODE_SIZE (mode);
431 if (FRAME_GROWS_DOWNWARD)
433 if (adj_offset < frame_offset || adj_offset + size - 1 >= 0)
434 return 1;
436 else
438 if (adj_offset < 0 || adj_offset + size - 1 >= frame_offset)
439 return 1;
441 return 0;
443 /* ??? Need to add a similar guard for nonsensical offsets. */
444 if (x == hard_frame_pointer_rtx
445 || x == stack_pointer_rtx
446 /* The arg pointer varies if it is not a fixed register. */
447 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
448 return 0;
449 /* All of the virtual frame registers are stack references. */
450 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
451 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
452 return 0;
453 return 1;
455 case CONST:
456 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
457 mode, unaligned_mems);
459 case PLUS:
460 /* An address is assumed not to trap if:
461 - it is the pic register plus a constant. */
462 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
463 return 0;
465 /* - or it is an address that can't trap plus a constant integer. */
466 if (CONST_INT_P (XEXP (x, 1))
467 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
468 size, mode, unaligned_mems))
469 return 0;
471 return 1;
473 case LO_SUM:
474 case PRE_MODIFY:
475 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
476 mode, unaligned_mems);
478 case PRE_DEC:
479 case PRE_INC:
480 case POST_DEC:
481 case POST_INC:
482 case POST_MODIFY:
483 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
484 mode, unaligned_mems);
486 default:
487 break;
490 /* If it isn't one of the case above, it can cause a trap. */
491 return 1;
494 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
497 rtx_addr_can_trap_p (const_rtx x)
499 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
502 /* Return true if X is an address that is known to not be zero. */
504 bool
505 nonzero_address_p (const_rtx x)
507 const enum rtx_code code = GET_CODE (x);
509 switch (code)
511 case SYMBOL_REF:
512 return !SYMBOL_REF_WEAK (x);
514 case LABEL_REF:
515 return true;
517 case REG:
518 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
519 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
520 || x == stack_pointer_rtx
521 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
522 return true;
523 /* All of the virtual frame registers are stack references. */
524 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
525 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
526 return true;
527 return false;
529 case CONST:
530 return nonzero_address_p (XEXP (x, 0));
532 case PLUS:
533 /* Handle PIC references. */
534 if (XEXP (x, 0) == pic_offset_table_rtx
535 && CONSTANT_P (XEXP (x, 1)))
536 return true;
537 return false;
539 case PRE_MODIFY:
540 /* Similar to the above; allow positive offsets. Further, since
541 auto-inc is only allowed in memories, the register must be a
542 pointer. */
543 if (CONST_INT_P (XEXP (x, 1))
544 && INTVAL (XEXP (x, 1)) > 0)
545 return true;
546 return nonzero_address_p (XEXP (x, 0));
548 case PRE_INC:
549 /* Similarly. Further, the offset is always positive. */
550 return true;
552 case PRE_DEC:
553 case POST_DEC:
554 case POST_INC:
555 case POST_MODIFY:
556 return nonzero_address_p (XEXP (x, 0));
558 case LO_SUM:
559 return nonzero_address_p (XEXP (x, 1));
561 default:
562 break;
565 /* If it isn't one of the case above, might be zero. */
566 return false;
569 /* Return 1 if X refers to a memory location whose address
570 cannot be compared reliably with constant addresses,
571 or if X refers to a BLKmode memory object.
572 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
573 zero, we are slightly more conservative. */
575 bool
576 rtx_addr_varies_p (const_rtx x, bool for_alias)
578 enum rtx_code code;
579 int i;
580 const char *fmt;
582 if (x == 0)
583 return 0;
585 code = GET_CODE (x);
586 if (code == MEM)
587 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
589 fmt = GET_RTX_FORMAT (code);
590 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
591 if (fmt[i] == 'e')
593 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
594 return 1;
596 else if (fmt[i] == 'E')
598 int j;
599 for (j = 0; j < XVECLEN (x, i); j++)
600 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
601 return 1;
603 return 0;
606 /* Return the CALL in X if there is one. */
609 get_call_rtx_from (rtx x)
611 if (INSN_P (x))
612 x = PATTERN (x);
613 if (GET_CODE (x) == PARALLEL)
614 x = XVECEXP (x, 0, 0);
615 if (GET_CODE (x) == SET)
616 x = SET_SRC (x);
617 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
618 return x;
619 return NULL_RTX;
622 /* Return the value of the integer term in X, if one is apparent;
623 otherwise return 0.
624 Only obvious integer terms are detected.
625 This is used in cse.c with the `related_value' field. */
627 HOST_WIDE_INT
628 get_integer_term (const_rtx x)
630 if (GET_CODE (x) == CONST)
631 x = XEXP (x, 0);
633 if (GET_CODE (x) == MINUS
634 && CONST_INT_P (XEXP (x, 1)))
635 return - INTVAL (XEXP (x, 1));
636 if (GET_CODE (x) == PLUS
637 && CONST_INT_P (XEXP (x, 1)))
638 return INTVAL (XEXP (x, 1));
639 return 0;
642 /* If X is a constant, return the value sans apparent integer term;
643 otherwise return 0.
644 Only obvious integer terms are detected. */
647 get_related_value (const_rtx x)
649 if (GET_CODE (x) != CONST)
650 return 0;
651 x = XEXP (x, 0);
652 if (GET_CODE (x) == PLUS
653 && CONST_INT_P (XEXP (x, 1)))
654 return XEXP (x, 0);
655 else if (GET_CODE (x) == MINUS
656 && CONST_INT_P (XEXP (x, 1)))
657 return XEXP (x, 0);
658 return 0;
661 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
662 to somewhere in the same object or object_block as SYMBOL. */
664 bool
665 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
667 tree decl;
669 if (GET_CODE (symbol) != SYMBOL_REF)
670 return false;
672 if (offset == 0)
673 return true;
675 if (offset > 0)
677 if (CONSTANT_POOL_ADDRESS_P (symbol)
678 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
679 return true;
681 decl = SYMBOL_REF_DECL (symbol);
682 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
683 return true;
686 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
687 && SYMBOL_REF_BLOCK (symbol)
688 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
689 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
690 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
691 return true;
693 return false;
696 /* Split X into a base and a constant offset, storing them in *BASE_OUT
697 and *OFFSET_OUT respectively. */
699 void
700 split_const (rtx x, rtx *base_out, rtx *offset_out)
702 if (GET_CODE (x) == CONST)
704 x = XEXP (x, 0);
705 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
707 *base_out = XEXP (x, 0);
708 *offset_out = XEXP (x, 1);
709 return;
712 *base_out = x;
713 *offset_out = const0_rtx;
716 /* Return the number of places FIND appears within X. If COUNT_DEST is
717 zero, we do not count occurrences inside the destination of a SET. */
720 count_occurrences (const_rtx x, const_rtx find, int count_dest)
722 int i, j;
723 enum rtx_code code;
724 const char *format_ptr;
725 int count;
727 if (x == find)
728 return 1;
730 code = GET_CODE (x);
732 switch (code)
734 case REG:
735 CASE_CONST_ANY:
736 case SYMBOL_REF:
737 case CODE_LABEL:
738 case PC:
739 case CC0:
740 return 0;
742 case EXPR_LIST:
743 count = count_occurrences (XEXP (x, 0), find, count_dest);
744 if (XEXP (x, 1))
745 count += count_occurrences (XEXP (x, 1), find, count_dest);
746 return count;
748 case MEM:
749 if (MEM_P (find) && rtx_equal_p (x, find))
750 return 1;
751 break;
753 case SET:
754 if (SET_DEST (x) == find && ! count_dest)
755 return count_occurrences (SET_SRC (x), find, count_dest);
756 break;
758 default:
759 break;
762 format_ptr = GET_RTX_FORMAT (code);
763 count = 0;
765 for (i = 0; i < GET_RTX_LENGTH (code); i++)
767 switch (*format_ptr++)
769 case 'e':
770 count += count_occurrences (XEXP (x, i), find, count_dest);
771 break;
773 case 'E':
774 for (j = 0; j < XVECLEN (x, i); j++)
775 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
776 break;
779 return count;
783 /* Return TRUE if OP is a register or subreg of a register that
784 holds an unsigned quantity. Otherwise, return FALSE. */
786 bool
787 unsigned_reg_p (rtx op)
789 if (REG_P (op)
790 && REG_EXPR (op)
791 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
792 return true;
794 if (GET_CODE (op) == SUBREG
795 && SUBREG_PROMOTED_SIGN (op))
796 return true;
798 return false;
802 /* Nonzero if register REG appears somewhere within IN.
803 Also works if REG is not a register; in this case it checks
804 for a subexpression of IN that is Lisp "equal" to REG. */
807 reg_mentioned_p (const_rtx reg, const_rtx in)
809 const char *fmt;
810 int i;
811 enum rtx_code code;
813 if (in == 0)
814 return 0;
816 if (reg == in)
817 return 1;
819 if (GET_CODE (in) == LABEL_REF)
820 return reg == LABEL_REF_LABEL (in);
822 code = GET_CODE (in);
824 switch (code)
826 /* Compare registers by number. */
827 case REG:
828 return REG_P (reg) && REGNO (in) == REGNO (reg);
830 /* These codes have no constituent expressions
831 and are unique. */
832 case SCRATCH:
833 case CC0:
834 case PC:
835 return 0;
837 CASE_CONST_ANY:
838 /* These are kept unique for a given value. */
839 return 0;
841 default:
842 break;
845 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
846 return 1;
848 fmt = GET_RTX_FORMAT (code);
850 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
852 if (fmt[i] == 'E')
854 int j;
855 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
856 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
857 return 1;
859 else if (fmt[i] == 'e'
860 && reg_mentioned_p (reg, XEXP (in, i)))
861 return 1;
863 return 0;
866 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
867 no CODE_LABEL insn. */
870 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
872 rtx_insn *p;
873 if (beg == end)
874 return 0;
875 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
876 if (LABEL_P (p))
877 return 0;
878 return 1;
881 /* Nonzero if register REG is used in an insn between
882 FROM_INSN and TO_INSN (exclusive of those two). */
885 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
886 const rtx_insn *to_insn)
888 rtx_insn *insn;
890 if (from_insn == to_insn)
891 return 0;
893 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
894 if (NONDEBUG_INSN_P (insn)
895 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
896 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
897 return 1;
898 return 0;
901 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
902 is entirely replaced by a new value and the only use is as a SET_DEST,
903 we do not consider it a reference. */
906 reg_referenced_p (const_rtx x, const_rtx body)
908 int i;
910 switch (GET_CODE (body))
912 case SET:
913 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
914 return 1;
916 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
917 of a REG that occupies all of the REG, the insn references X if
918 it is mentioned in the destination. */
919 if (GET_CODE (SET_DEST (body)) != CC0
920 && GET_CODE (SET_DEST (body)) != PC
921 && !REG_P (SET_DEST (body))
922 && ! (GET_CODE (SET_DEST (body)) == SUBREG
923 && REG_P (SUBREG_REG (SET_DEST (body)))
924 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
925 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
926 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
927 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
928 && reg_overlap_mentioned_p (x, SET_DEST (body)))
929 return 1;
930 return 0;
932 case ASM_OPERANDS:
933 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
934 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
935 return 1;
936 return 0;
938 case CALL:
939 case USE:
940 case IF_THEN_ELSE:
941 return reg_overlap_mentioned_p (x, body);
943 case TRAP_IF:
944 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
946 case PREFETCH:
947 return reg_overlap_mentioned_p (x, XEXP (body, 0));
949 case UNSPEC:
950 case UNSPEC_VOLATILE:
951 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
952 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
953 return 1;
954 return 0;
956 case PARALLEL:
957 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
958 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
959 return 1;
960 return 0;
962 case CLOBBER:
963 if (MEM_P (XEXP (body, 0)))
964 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
965 return 1;
966 return 0;
968 case COND_EXEC:
969 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
970 return 1;
971 return reg_referenced_p (x, COND_EXEC_CODE (body));
973 default:
974 return 0;
978 /* Nonzero if register REG is set or clobbered in an insn between
979 FROM_INSN and TO_INSN (exclusive of those two). */
982 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
983 const rtx_insn *to_insn)
985 const rtx_insn *insn;
987 if (from_insn == to_insn)
988 return 0;
990 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
991 if (INSN_P (insn) && reg_set_p (reg, insn))
992 return 1;
993 return 0;
996 /* Internals of reg_set_between_p. */
998 reg_set_p (const_rtx reg, const_rtx insn)
1000 /* After delay slot handling, call and branch insns might be in a
1001 sequence. Check all the elements there. */
1002 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1004 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1005 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1006 return true;
1008 return false;
1011 /* We can be passed an insn or part of one. If we are passed an insn,
1012 check if a side-effect of the insn clobbers REG. */
1013 if (INSN_P (insn)
1014 && (FIND_REG_INC_NOTE (insn, reg)
1015 || (CALL_P (insn)
1016 && ((REG_P (reg)
1017 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1018 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1019 GET_MODE (reg), REGNO (reg)))
1020 || MEM_P (reg)
1021 || find_reg_fusage (insn, CLOBBER, reg)))))
1022 return true;
1024 return set_of (reg, insn) != NULL_RTX;
1027 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1028 only if none of them are modified between START and END. Return 1 if
1029 X contains a MEM; this routine does use memory aliasing. */
1032 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1034 const enum rtx_code code = GET_CODE (x);
1035 const char *fmt;
1036 int i, j;
1037 rtx_insn *insn;
1039 if (start == end)
1040 return 0;
1042 switch (code)
1044 CASE_CONST_ANY:
1045 case CONST:
1046 case SYMBOL_REF:
1047 case LABEL_REF:
1048 return 0;
1050 case PC:
1051 case CC0:
1052 return 1;
1054 case MEM:
1055 if (modified_between_p (XEXP (x, 0), start, end))
1056 return 1;
1057 if (MEM_READONLY_P (x))
1058 return 0;
1059 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1060 if (memory_modified_in_insn_p (x, insn))
1061 return 1;
1062 return 0;
1063 break;
1065 case REG:
1066 return reg_set_between_p (x, start, end);
1068 default:
1069 break;
1072 fmt = GET_RTX_FORMAT (code);
1073 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1075 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1076 return 1;
1078 else if (fmt[i] == 'E')
1079 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1080 if (modified_between_p (XVECEXP (x, i, j), start, end))
1081 return 1;
1084 return 0;
1087 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1088 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1089 does use memory aliasing. */
1092 modified_in_p (const_rtx x, const_rtx insn)
1094 const enum rtx_code code = GET_CODE (x);
1095 const char *fmt;
1096 int i, j;
1098 switch (code)
1100 CASE_CONST_ANY:
1101 case CONST:
1102 case SYMBOL_REF:
1103 case LABEL_REF:
1104 return 0;
1106 case PC:
1107 case CC0:
1108 return 1;
1110 case MEM:
1111 if (modified_in_p (XEXP (x, 0), insn))
1112 return 1;
1113 if (MEM_READONLY_P (x))
1114 return 0;
1115 if (memory_modified_in_insn_p (x, insn))
1116 return 1;
1117 return 0;
1118 break;
1120 case REG:
1121 return reg_set_p (x, insn);
1123 default:
1124 break;
1127 fmt = GET_RTX_FORMAT (code);
1128 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1130 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1131 return 1;
1133 else if (fmt[i] == 'E')
1134 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1135 if (modified_in_p (XVECEXP (x, i, j), insn))
1136 return 1;
1139 return 0;
1142 /* Helper function for set_of. */
1143 struct set_of_data
1145 const_rtx found;
1146 const_rtx pat;
1149 static void
1150 set_of_1 (rtx x, const_rtx pat, void *data1)
1152 struct set_of_data *const data = (struct set_of_data *) (data1);
1153 if (rtx_equal_p (x, data->pat)
1154 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1155 data->found = pat;
1158 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1159 (either directly or via STRICT_LOW_PART and similar modifiers). */
1160 const_rtx
1161 set_of (const_rtx pat, const_rtx insn)
1163 struct set_of_data data;
1164 data.found = NULL_RTX;
1165 data.pat = pat;
1166 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1167 return data.found;
1170 /* Add all hard register in X to *PSET. */
1171 void
1172 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1174 subrtx_iterator::array_type array;
1175 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1177 const_rtx x = *iter;
1178 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1179 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1183 /* This function, called through note_stores, collects sets and
1184 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1185 by DATA. */
1186 void
1187 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1189 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1190 if (REG_P (x) && HARD_REGISTER_P (x))
1191 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1194 /* Examine INSN, and compute the set of hard registers written by it.
1195 Store it in *PSET. Should only be called after reload. */
1196 void
1197 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1199 rtx link;
1201 CLEAR_HARD_REG_SET (*pset);
1202 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1203 if (CALL_P (insn))
1205 if (implicit)
1206 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1208 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1209 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1211 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1212 if (REG_NOTE_KIND (link) == REG_INC)
1213 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1216 /* Like record_hard_reg_sets, but called through note_uses. */
1217 void
1218 record_hard_reg_uses (rtx *px, void *data)
1220 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1223 /* Given an INSN, return a SET expression if this insn has only a single SET.
1224 It may also have CLOBBERs, USEs, or SET whose output
1225 will not be used, which we ignore. */
1228 single_set_2 (const rtx_insn *insn, const_rtx pat)
1230 rtx set = NULL;
1231 int set_verified = 1;
1232 int i;
1234 if (GET_CODE (pat) == PARALLEL)
1236 for (i = 0; i < XVECLEN (pat, 0); i++)
1238 rtx sub = XVECEXP (pat, 0, i);
1239 switch (GET_CODE (sub))
1241 case USE:
1242 case CLOBBER:
1243 break;
1245 case SET:
1246 /* We can consider insns having multiple sets, where all
1247 but one are dead as single set insns. In common case
1248 only single set is present in the pattern so we want
1249 to avoid checking for REG_UNUSED notes unless necessary.
1251 When we reach set first time, we just expect this is
1252 the single set we are looking for and only when more
1253 sets are found in the insn, we check them. */
1254 if (!set_verified)
1256 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1257 && !side_effects_p (set))
1258 set = NULL;
1259 else
1260 set_verified = 1;
1262 if (!set)
1263 set = sub, set_verified = 0;
1264 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1265 || side_effects_p (sub))
1266 return NULL_RTX;
1267 break;
1269 default:
1270 return NULL_RTX;
1274 return set;
1277 /* Given an INSN, return nonzero if it has more than one SET, else return
1278 zero. */
1281 multiple_sets (const_rtx insn)
1283 int found;
1284 int i;
1286 /* INSN must be an insn. */
1287 if (! INSN_P (insn))
1288 return 0;
1290 /* Only a PARALLEL can have multiple SETs. */
1291 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1293 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1294 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1296 /* If we have already found a SET, then return now. */
1297 if (found)
1298 return 1;
1299 else
1300 found = 1;
1304 /* Either zero or one SET. */
1305 return 0;
1308 /* Return nonzero if the destination of SET equals the source
1309 and there are no side effects. */
1312 set_noop_p (const_rtx set)
1314 rtx src = SET_SRC (set);
1315 rtx dst = SET_DEST (set);
1317 if (dst == pc_rtx && src == pc_rtx)
1318 return 1;
1320 if (MEM_P (dst) && MEM_P (src))
1321 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1323 if (GET_CODE (dst) == ZERO_EXTRACT)
1324 return rtx_equal_p (XEXP (dst, 0), src)
1325 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1326 && !side_effects_p (src);
1328 if (GET_CODE (dst) == STRICT_LOW_PART)
1329 dst = XEXP (dst, 0);
1331 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1333 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1334 return 0;
1335 src = SUBREG_REG (src);
1336 dst = SUBREG_REG (dst);
1339 /* It is a NOOP if destination overlaps with selected src vector
1340 elements. */
1341 if (GET_CODE (src) == VEC_SELECT
1342 && REG_P (XEXP (src, 0)) && REG_P (dst)
1343 && HARD_REGISTER_P (XEXP (src, 0))
1344 && HARD_REGISTER_P (dst))
1346 int i;
1347 rtx par = XEXP (src, 1);
1348 rtx src0 = XEXP (src, 0);
1349 int c0 = INTVAL (XVECEXP (par, 0, 0));
1350 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1352 for (i = 1; i < XVECLEN (par, 0); i++)
1353 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1354 return 0;
1355 return
1356 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1357 offset, GET_MODE (dst)) == (int) REGNO (dst);
1360 return (REG_P (src) && REG_P (dst)
1361 && REGNO (src) == REGNO (dst));
1364 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1365 value to itself. */
1368 noop_move_p (const rtx_insn *insn)
1370 rtx pat = PATTERN (insn);
1372 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1373 return 1;
1375 /* Insns carrying these notes are useful later on. */
1376 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1377 return 0;
1379 /* Check the code to be executed for COND_EXEC. */
1380 if (GET_CODE (pat) == COND_EXEC)
1381 pat = COND_EXEC_CODE (pat);
1383 if (GET_CODE (pat) == SET && set_noop_p (pat))
1384 return 1;
1386 if (GET_CODE (pat) == PARALLEL)
1388 int i;
1389 /* If nothing but SETs of registers to themselves,
1390 this insn can also be deleted. */
1391 for (i = 0; i < XVECLEN (pat, 0); i++)
1393 rtx tem = XVECEXP (pat, 0, i);
1395 if (GET_CODE (tem) == USE
1396 || GET_CODE (tem) == CLOBBER)
1397 continue;
1399 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1400 return 0;
1403 return 1;
1405 return 0;
1409 /* Return nonzero if register in range [REGNO, ENDREGNO)
1410 appears either explicitly or implicitly in X
1411 other than being stored into.
1413 References contained within the substructure at LOC do not count.
1414 LOC may be zero, meaning don't ignore anything. */
1416 bool
1417 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1418 rtx *loc)
1420 int i;
1421 unsigned int x_regno;
1422 RTX_CODE code;
1423 const char *fmt;
1425 repeat:
1426 /* The contents of a REG_NONNEG note is always zero, so we must come here
1427 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1428 if (x == 0)
1429 return false;
1431 code = GET_CODE (x);
1433 switch (code)
1435 case REG:
1436 x_regno = REGNO (x);
1438 /* If we modifying the stack, frame, or argument pointer, it will
1439 clobber a virtual register. In fact, we could be more precise,
1440 but it isn't worth it. */
1441 if ((x_regno == STACK_POINTER_REGNUM
1442 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1443 && x_regno == ARG_POINTER_REGNUM)
1444 || x_regno == FRAME_POINTER_REGNUM)
1445 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1446 return true;
1448 return endregno > x_regno && regno < END_REGNO (x);
1450 case SUBREG:
1451 /* If this is a SUBREG of a hard reg, we can see exactly which
1452 registers are being modified. Otherwise, handle normally. */
1453 if (REG_P (SUBREG_REG (x))
1454 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1456 unsigned int inner_regno = subreg_regno (x);
1457 unsigned int inner_endregno
1458 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1459 ? subreg_nregs (x) : 1);
1461 return endregno > inner_regno && regno < inner_endregno;
1463 break;
1465 case CLOBBER:
1466 case SET:
1467 if (&SET_DEST (x) != loc
1468 /* Note setting a SUBREG counts as referring to the REG it is in for
1469 a pseudo but not for hard registers since we can
1470 treat each word individually. */
1471 && ((GET_CODE (SET_DEST (x)) == SUBREG
1472 && loc != &SUBREG_REG (SET_DEST (x))
1473 && REG_P (SUBREG_REG (SET_DEST (x)))
1474 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1475 && refers_to_regno_p (regno, endregno,
1476 SUBREG_REG (SET_DEST (x)), loc))
1477 || (!REG_P (SET_DEST (x))
1478 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1479 return true;
1481 if (code == CLOBBER || loc == &SET_SRC (x))
1482 return false;
1483 x = SET_SRC (x);
1484 goto repeat;
1486 default:
1487 break;
1490 /* X does not match, so try its subexpressions. */
1492 fmt = GET_RTX_FORMAT (code);
1493 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1495 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1497 if (i == 0)
1499 x = XEXP (x, 0);
1500 goto repeat;
1502 else
1503 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1504 return true;
1506 else if (fmt[i] == 'E')
1508 int j;
1509 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1510 if (loc != &XVECEXP (x, i, j)
1511 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1512 return true;
1515 return false;
1518 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1519 we check if any register number in X conflicts with the relevant register
1520 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1521 contains a MEM (we don't bother checking for memory addresses that can't
1522 conflict because we expect this to be a rare case. */
1525 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1527 unsigned int regno, endregno;
1529 /* If either argument is a constant, then modifying X can not
1530 affect IN. Here we look at IN, we can profitably combine
1531 CONSTANT_P (x) with the switch statement below. */
1532 if (CONSTANT_P (in))
1533 return 0;
1535 recurse:
1536 switch (GET_CODE (x))
1538 case STRICT_LOW_PART:
1539 case ZERO_EXTRACT:
1540 case SIGN_EXTRACT:
1541 /* Overly conservative. */
1542 x = XEXP (x, 0);
1543 goto recurse;
1545 case SUBREG:
1546 regno = REGNO (SUBREG_REG (x));
1547 if (regno < FIRST_PSEUDO_REGISTER)
1548 regno = subreg_regno (x);
1549 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1550 ? subreg_nregs (x) : 1);
1551 goto do_reg;
1553 case REG:
1554 regno = REGNO (x);
1555 endregno = END_REGNO (x);
1556 do_reg:
1557 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1559 case MEM:
1561 const char *fmt;
1562 int i;
1564 if (MEM_P (in))
1565 return 1;
1567 fmt = GET_RTX_FORMAT (GET_CODE (in));
1568 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1569 if (fmt[i] == 'e')
1571 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1572 return 1;
1574 else if (fmt[i] == 'E')
1576 int j;
1577 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1578 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1579 return 1;
1582 return 0;
1585 case SCRATCH:
1586 case PC:
1587 case CC0:
1588 return reg_mentioned_p (x, in);
1590 case PARALLEL:
1592 int i;
1594 /* If any register in here refers to it we return true. */
1595 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1596 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1597 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1598 return 1;
1599 return 0;
1602 default:
1603 gcc_assert (CONSTANT_P (x));
1604 return 0;
1608 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1609 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1610 ignored by note_stores, but passed to FUN.
1612 FUN receives three arguments:
1613 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1614 2. the SET or CLOBBER rtx that does the store,
1615 3. the pointer DATA provided to note_stores.
1617 If the item being stored in or clobbered is a SUBREG of a hard register,
1618 the SUBREG will be passed. */
1620 void
1621 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1623 int i;
1625 if (GET_CODE (x) == COND_EXEC)
1626 x = COND_EXEC_CODE (x);
1628 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1630 rtx dest = SET_DEST (x);
1632 while ((GET_CODE (dest) == SUBREG
1633 && (!REG_P (SUBREG_REG (dest))
1634 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1635 || GET_CODE (dest) == ZERO_EXTRACT
1636 || GET_CODE (dest) == STRICT_LOW_PART)
1637 dest = XEXP (dest, 0);
1639 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1640 each of whose first operand is a register. */
1641 if (GET_CODE (dest) == PARALLEL)
1643 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1644 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1645 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1647 else
1648 (*fun) (dest, x, data);
1651 else if (GET_CODE (x) == PARALLEL)
1652 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1653 note_stores (XVECEXP (x, 0, i), fun, data);
1656 /* Like notes_stores, but call FUN for each expression that is being
1657 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1658 FUN for each expression, not any interior subexpressions. FUN receives a
1659 pointer to the expression and the DATA passed to this function.
1661 Note that this is not quite the same test as that done in reg_referenced_p
1662 since that considers something as being referenced if it is being
1663 partially set, while we do not. */
1665 void
1666 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1668 rtx body = *pbody;
1669 int i;
1671 switch (GET_CODE (body))
1673 case COND_EXEC:
1674 (*fun) (&COND_EXEC_TEST (body), data);
1675 note_uses (&COND_EXEC_CODE (body), fun, data);
1676 return;
1678 case PARALLEL:
1679 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1680 note_uses (&XVECEXP (body, 0, i), fun, data);
1681 return;
1683 case SEQUENCE:
1684 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1685 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1686 return;
1688 case USE:
1689 (*fun) (&XEXP (body, 0), data);
1690 return;
1692 case ASM_OPERANDS:
1693 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1694 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1695 return;
1697 case TRAP_IF:
1698 (*fun) (&TRAP_CONDITION (body), data);
1699 return;
1701 case PREFETCH:
1702 (*fun) (&XEXP (body, 0), data);
1703 return;
1705 case UNSPEC:
1706 case UNSPEC_VOLATILE:
1707 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1708 (*fun) (&XVECEXP (body, 0, i), data);
1709 return;
1711 case CLOBBER:
1712 if (MEM_P (XEXP (body, 0)))
1713 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1714 return;
1716 case SET:
1718 rtx dest = SET_DEST (body);
1720 /* For sets we replace everything in source plus registers in memory
1721 expression in store and operands of a ZERO_EXTRACT. */
1722 (*fun) (&SET_SRC (body), data);
1724 if (GET_CODE (dest) == ZERO_EXTRACT)
1726 (*fun) (&XEXP (dest, 1), data);
1727 (*fun) (&XEXP (dest, 2), data);
1730 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1731 dest = XEXP (dest, 0);
1733 if (MEM_P (dest))
1734 (*fun) (&XEXP (dest, 0), data);
1736 return;
1738 default:
1739 /* All the other possibilities never store. */
1740 (*fun) (pbody, data);
1741 return;
1745 /* Return nonzero if X's old contents don't survive after INSN.
1746 This will be true if X is (cc0) or if X is a register and
1747 X dies in INSN or because INSN entirely sets X.
1749 "Entirely set" means set directly and not through a SUBREG, or
1750 ZERO_EXTRACT, so no trace of the old contents remains.
1751 Likewise, REG_INC does not count.
1753 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1754 but for this use that makes no difference, since regs don't overlap
1755 during their lifetimes. Therefore, this function may be used
1756 at any time after deaths have been computed.
1758 If REG is a hard reg that occupies multiple machine registers, this
1759 function will only return 1 if each of those registers will be replaced
1760 by INSN. */
1763 dead_or_set_p (const_rtx insn, const_rtx x)
1765 unsigned int regno, end_regno;
1766 unsigned int i;
1768 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1769 if (GET_CODE (x) == CC0)
1770 return 1;
1772 gcc_assert (REG_P (x));
1774 regno = REGNO (x);
1775 end_regno = END_REGNO (x);
1776 for (i = regno; i < end_regno; i++)
1777 if (! dead_or_set_regno_p (insn, i))
1778 return 0;
1780 return 1;
1783 /* Return TRUE iff DEST is a register or subreg of a register and
1784 doesn't change the number of words of the inner register, and any
1785 part of the register is TEST_REGNO. */
1787 static bool
1788 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1790 unsigned int regno, endregno;
1792 if (GET_CODE (dest) == SUBREG
1793 && (((GET_MODE_SIZE (GET_MODE (dest))
1794 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1795 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1796 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1797 dest = SUBREG_REG (dest);
1799 if (!REG_P (dest))
1800 return false;
1802 regno = REGNO (dest);
1803 endregno = END_REGNO (dest);
1804 return (test_regno >= regno && test_regno < endregno);
1807 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1808 any member matches the covers_regno_no_parallel_p criteria. */
1810 static bool
1811 covers_regno_p (const_rtx dest, unsigned int test_regno)
1813 if (GET_CODE (dest) == PARALLEL)
1815 /* Some targets place small structures in registers for return
1816 values of functions, and those registers are wrapped in
1817 PARALLELs that we may see as the destination of a SET. */
1818 int i;
1820 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1822 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1823 if (inner != NULL_RTX
1824 && covers_regno_no_parallel_p (inner, test_regno))
1825 return true;
1828 return false;
1830 else
1831 return covers_regno_no_parallel_p (dest, test_regno);
1834 /* Utility function for dead_or_set_p to check an individual register. */
1837 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
1839 const_rtx pattern;
1841 /* See if there is a death note for something that includes TEST_REGNO. */
1842 if (find_regno_note (insn, REG_DEAD, test_regno))
1843 return 1;
1845 if (CALL_P (insn)
1846 && find_regno_fusage (insn, CLOBBER, test_regno))
1847 return 1;
1849 pattern = PATTERN (insn);
1851 /* If a COND_EXEC is not executed, the value survives. */
1852 if (GET_CODE (pattern) == COND_EXEC)
1853 return 0;
1855 if (GET_CODE (pattern) == SET)
1856 return covers_regno_p (SET_DEST (pattern), test_regno);
1857 else if (GET_CODE (pattern) == PARALLEL)
1859 int i;
1861 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1863 rtx body = XVECEXP (pattern, 0, i);
1865 if (GET_CODE (body) == COND_EXEC)
1866 body = COND_EXEC_CODE (body);
1868 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1869 && covers_regno_p (SET_DEST (body), test_regno))
1870 return 1;
1874 return 0;
1877 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1878 If DATUM is nonzero, look for one whose datum is DATUM. */
1881 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
1883 rtx link;
1885 gcc_checking_assert (insn);
1887 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1888 if (! INSN_P (insn))
1889 return 0;
1890 if (datum == 0)
1892 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1893 if (REG_NOTE_KIND (link) == kind)
1894 return link;
1895 return 0;
1898 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1899 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1900 return link;
1901 return 0;
1904 /* Return the reg-note of kind KIND in insn INSN which applies to register
1905 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1906 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1907 it might be the case that the note overlaps REGNO. */
1910 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
1912 rtx link;
1914 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1915 if (! INSN_P (insn))
1916 return 0;
1918 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1919 if (REG_NOTE_KIND (link) == kind
1920 /* Verify that it is a register, so that scratch and MEM won't cause a
1921 problem here. */
1922 && REG_P (XEXP (link, 0))
1923 && REGNO (XEXP (link, 0)) <= regno
1924 && END_REGNO (XEXP (link, 0)) > regno)
1925 return link;
1926 return 0;
1929 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1930 has such a note. */
1933 find_reg_equal_equiv_note (const_rtx insn)
1935 rtx link;
1937 if (!INSN_P (insn))
1938 return 0;
1940 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1941 if (REG_NOTE_KIND (link) == REG_EQUAL
1942 || REG_NOTE_KIND (link) == REG_EQUIV)
1944 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1945 insns that have multiple sets. Checking single_set to
1946 make sure of this is not the proper check, as explained
1947 in the comment in set_unique_reg_note.
1949 This should be changed into an assert. */
1950 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1951 return 0;
1952 return link;
1954 return NULL;
1957 /* Check whether INSN is a single_set whose source is known to be
1958 equivalent to a constant. Return that constant if so, otherwise
1959 return null. */
1962 find_constant_src (const rtx_insn *insn)
1964 rtx note, set, x;
1966 set = single_set (insn);
1967 if (set)
1969 x = avoid_constant_pool_reference (SET_SRC (set));
1970 if (CONSTANT_P (x))
1971 return x;
1974 note = find_reg_equal_equiv_note (insn);
1975 if (note && CONSTANT_P (XEXP (note, 0)))
1976 return XEXP (note, 0);
1978 return NULL_RTX;
1981 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1982 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1985 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
1987 /* If it's not a CALL_INSN, it can't possibly have a
1988 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1989 if (!CALL_P (insn))
1990 return 0;
1992 gcc_assert (datum);
1994 if (!REG_P (datum))
1996 rtx link;
1998 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1999 link;
2000 link = XEXP (link, 1))
2001 if (GET_CODE (XEXP (link, 0)) == code
2002 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2003 return 1;
2005 else
2007 unsigned int regno = REGNO (datum);
2009 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2010 to pseudo registers, so don't bother checking. */
2012 if (regno < FIRST_PSEUDO_REGISTER)
2014 unsigned int end_regno = END_REGNO (datum);
2015 unsigned int i;
2017 for (i = regno; i < end_regno; i++)
2018 if (find_regno_fusage (insn, code, i))
2019 return 1;
2023 return 0;
2026 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2027 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2030 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2032 rtx link;
2034 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2035 to pseudo registers, so don't bother checking. */
2037 if (regno >= FIRST_PSEUDO_REGISTER
2038 || !CALL_P (insn) )
2039 return 0;
2041 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2043 rtx op, reg;
2045 if (GET_CODE (op = XEXP (link, 0)) == code
2046 && REG_P (reg = XEXP (op, 0))
2047 && REGNO (reg) <= regno
2048 && END_REGNO (reg) > regno)
2049 return 1;
2052 return 0;
2056 /* Return true if KIND is an integer REG_NOTE. */
2058 static bool
2059 int_reg_note_p (enum reg_note kind)
2061 return kind == REG_BR_PROB;
2064 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2065 stored as the pointer to the next register note. */
2068 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2070 rtx note;
2072 gcc_checking_assert (!int_reg_note_p (kind));
2073 switch (kind)
2075 case REG_CC_SETTER:
2076 case REG_CC_USER:
2077 case REG_LABEL_TARGET:
2078 case REG_LABEL_OPERAND:
2079 case REG_TM:
2080 /* These types of register notes use an INSN_LIST rather than an
2081 EXPR_LIST, so that copying is done right and dumps look
2082 better. */
2083 note = alloc_INSN_LIST (datum, list);
2084 PUT_REG_NOTE_KIND (note, kind);
2085 break;
2087 default:
2088 note = alloc_EXPR_LIST (kind, datum, list);
2089 break;
2092 return note;
2095 /* Add register note with kind KIND and datum DATUM to INSN. */
2097 void
2098 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2100 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2103 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2105 void
2106 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2108 gcc_checking_assert (int_reg_note_p (kind));
2109 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2110 datum, REG_NOTES (insn));
2113 /* Add a register note like NOTE to INSN. */
2115 void
2116 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2118 if (GET_CODE (note) == INT_LIST)
2119 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2120 else
2121 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2124 /* Remove register note NOTE from the REG_NOTES of INSN. */
2126 void
2127 remove_note (rtx insn, const_rtx note)
2129 rtx link;
2131 if (note == NULL_RTX)
2132 return;
2134 if (REG_NOTES (insn) == note)
2135 REG_NOTES (insn) = XEXP (note, 1);
2136 else
2137 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2138 if (XEXP (link, 1) == note)
2140 XEXP (link, 1) = XEXP (note, 1);
2141 break;
2144 switch (REG_NOTE_KIND (note))
2146 case REG_EQUAL:
2147 case REG_EQUIV:
2148 df_notes_rescan (as_a <rtx_insn *> (insn));
2149 break;
2150 default:
2151 break;
2155 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2157 void
2158 remove_reg_equal_equiv_notes (rtx_insn *insn)
2160 rtx *loc;
2162 loc = &REG_NOTES (insn);
2163 while (*loc)
2165 enum reg_note kind = REG_NOTE_KIND (*loc);
2166 if (kind == REG_EQUAL || kind == REG_EQUIV)
2167 *loc = XEXP (*loc, 1);
2168 else
2169 loc = &XEXP (*loc, 1);
2173 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2175 void
2176 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2178 df_ref eq_use;
2180 if (!df)
2181 return;
2183 /* This loop is a little tricky. We cannot just go down the chain because
2184 it is being modified by some actions in the loop. So we just iterate
2185 over the head. We plan to drain the list anyway. */
2186 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2188 rtx_insn *insn = DF_REF_INSN (eq_use);
2189 rtx note = find_reg_equal_equiv_note (insn);
2191 /* This assert is generally triggered when someone deletes a REG_EQUAL
2192 or REG_EQUIV note by hacking the list manually rather than calling
2193 remove_note. */
2194 gcc_assert (note);
2196 remove_note (insn, note);
2200 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2201 return 1 if it is found. A simple equality test is used to determine if
2202 NODE matches. */
2204 bool
2205 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2207 const_rtx x;
2209 for (x = listp; x; x = XEXP (x, 1))
2210 if (node == XEXP (x, 0))
2211 return true;
2213 return false;
2216 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2217 remove that entry from the list if it is found.
2219 A simple equality test is used to determine if NODE matches. */
2221 void
2222 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2224 rtx_expr_list *temp = *listp;
2225 rtx_expr_list *prev = NULL;
2227 while (temp)
2229 if (node == temp->element ())
2231 /* Splice the node out of the list. */
2232 if (prev)
2233 XEXP (prev, 1) = temp->next ();
2234 else
2235 *listp = temp->next ();
2237 return;
2240 prev = temp;
2241 temp = temp->next ();
2245 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2246 remove that entry from the list if it is found.
2248 A simple equality test is used to determine if NODE matches. */
2250 void
2251 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2253 rtx_insn_list *temp = *listp;
2254 rtx_insn_list *prev = NULL;
2256 while (temp)
2258 if (node == temp->insn ())
2260 /* Splice the node out of the list. */
2261 if (prev)
2262 XEXP (prev, 1) = temp->next ();
2263 else
2264 *listp = temp->next ();
2266 return;
2269 prev = temp;
2270 temp = temp->next ();
2274 /* Nonzero if X contains any volatile instructions. These are instructions
2275 which may cause unpredictable machine state instructions, and thus no
2276 instructions or register uses should be moved or combined across them.
2277 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2280 volatile_insn_p (const_rtx x)
2282 const RTX_CODE code = GET_CODE (x);
2283 switch (code)
2285 case LABEL_REF:
2286 case SYMBOL_REF:
2287 case CONST:
2288 CASE_CONST_ANY:
2289 case CC0:
2290 case PC:
2291 case REG:
2292 case SCRATCH:
2293 case CLOBBER:
2294 case ADDR_VEC:
2295 case ADDR_DIFF_VEC:
2296 case CALL:
2297 case MEM:
2298 return 0;
2300 case UNSPEC_VOLATILE:
2301 return 1;
2303 case ASM_INPUT:
2304 case ASM_OPERANDS:
2305 if (MEM_VOLATILE_P (x))
2306 return 1;
2308 default:
2309 break;
2312 /* Recursively scan the operands of this expression. */
2315 const char *const fmt = GET_RTX_FORMAT (code);
2316 int i;
2318 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2320 if (fmt[i] == 'e')
2322 if (volatile_insn_p (XEXP (x, i)))
2323 return 1;
2325 else if (fmt[i] == 'E')
2327 int j;
2328 for (j = 0; j < XVECLEN (x, i); j++)
2329 if (volatile_insn_p (XVECEXP (x, i, j)))
2330 return 1;
2334 return 0;
2337 /* Nonzero if X contains any volatile memory references
2338 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2341 volatile_refs_p (const_rtx x)
2343 const RTX_CODE code = GET_CODE (x);
2344 switch (code)
2346 case LABEL_REF:
2347 case SYMBOL_REF:
2348 case CONST:
2349 CASE_CONST_ANY:
2350 case CC0:
2351 case PC:
2352 case REG:
2353 case SCRATCH:
2354 case CLOBBER:
2355 case ADDR_VEC:
2356 case ADDR_DIFF_VEC:
2357 return 0;
2359 case UNSPEC_VOLATILE:
2360 return 1;
2362 case MEM:
2363 case ASM_INPUT:
2364 case ASM_OPERANDS:
2365 if (MEM_VOLATILE_P (x))
2366 return 1;
2368 default:
2369 break;
2372 /* Recursively scan the operands of this expression. */
2375 const char *const fmt = GET_RTX_FORMAT (code);
2376 int i;
2378 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2380 if (fmt[i] == 'e')
2382 if (volatile_refs_p (XEXP (x, i)))
2383 return 1;
2385 else if (fmt[i] == 'E')
2387 int j;
2388 for (j = 0; j < XVECLEN (x, i); j++)
2389 if (volatile_refs_p (XVECEXP (x, i, j)))
2390 return 1;
2394 return 0;
2397 /* Similar to above, except that it also rejects register pre- and post-
2398 incrementing. */
2401 side_effects_p (const_rtx x)
2403 const RTX_CODE code = GET_CODE (x);
2404 switch (code)
2406 case LABEL_REF:
2407 case SYMBOL_REF:
2408 case CONST:
2409 CASE_CONST_ANY:
2410 case CC0:
2411 case PC:
2412 case REG:
2413 case SCRATCH:
2414 case ADDR_VEC:
2415 case ADDR_DIFF_VEC:
2416 case VAR_LOCATION:
2417 return 0;
2419 case CLOBBER:
2420 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2421 when some combination can't be done. If we see one, don't think
2422 that we can simplify the expression. */
2423 return (GET_MODE (x) != VOIDmode);
2425 case PRE_INC:
2426 case PRE_DEC:
2427 case POST_INC:
2428 case POST_DEC:
2429 case PRE_MODIFY:
2430 case POST_MODIFY:
2431 case CALL:
2432 case UNSPEC_VOLATILE:
2433 return 1;
2435 case MEM:
2436 case ASM_INPUT:
2437 case ASM_OPERANDS:
2438 if (MEM_VOLATILE_P (x))
2439 return 1;
2441 default:
2442 break;
2445 /* Recursively scan the operands of this expression. */
2448 const char *fmt = GET_RTX_FORMAT (code);
2449 int i;
2451 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2453 if (fmt[i] == 'e')
2455 if (side_effects_p (XEXP (x, i)))
2456 return 1;
2458 else if (fmt[i] == 'E')
2460 int j;
2461 for (j = 0; j < XVECLEN (x, i); j++)
2462 if (side_effects_p (XVECEXP (x, i, j)))
2463 return 1;
2467 return 0;
2470 /* Return nonzero if evaluating rtx X might cause a trap.
2471 FLAGS controls how to consider MEMs. A nonzero means the context
2472 of the access may have changed from the original, such that the
2473 address may have become invalid. */
2476 may_trap_p_1 (const_rtx x, unsigned flags)
2478 int i;
2479 enum rtx_code code;
2480 const char *fmt;
2482 /* We make no distinction currently, but this function is part of
2483 the internal target-hooks ABI so we keep the parameter as
2484 "unsigned flags". */
2485 bool code_changed = flags != 0;
2487 if (x == 0)
2488 return 0;
2489 code = GET_CODE (x);
2490 switch (code)
2492 /* Handle these cases quickly. */
2493 CASE_CONST_ANY:
2494 case SYMBOL_REF:
2495 case LABEL_REF:
2496 case CONST:
2497 case PC:
2498 case CC0:
2499 case REG:
2500 case SCRATCH:
2501 return 0;
2503 case UNSPEC:
2504 return targetm.unspec_may_trap_p (x, flags);
2506 case UNSPEC_VOLATILE:
2507 case ASM_INPUT:
2508 case TRAP_IF:
2509 return 1;
2511 case ASM_OPERANDS:
2512 return MEM_VOLATILE_P (x);
2514 /* Memory ref can trap unless it's a static var or a stack slot. */
2515 case MEM:
2516 /* Recognize specific pattern of stack checking probes. */
2517 if (flag_stack_check
2518 && MEM_VOLATILE_P (x)
2519 && XEXP (x, 0) == stack_pointer_rtx)
2520 return 1;
2521 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2522 reference; moving it out of context such as when moving code
2523 when optimizing, might cause its address to become invalid. */
2524 code_changed
2525 || !MEM_NOTRAP_P (x))
2527 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2528 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2529 GET_MODE (x), code_changed);
2532 return 0;
2534 /* Division by a non-constant might trap. */
2535 case DIV:
2536 case MOD:
2537 case UDIV:
2538 case UMOD:
2539 if (HONOR_SNANS (x))
2540 return 1;
2541 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2542 return flag_trapping_math;
2543 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2544 return 1;
2545 break;
2547 case EXPR_LIST:
2548 /* An EXPR_LIST is used to represent a function call. This
2549 certainly may trap. */
2550 return 1;
2552 case GE:
2553 case GT:
2554 case LE:
2555 case LT:
2556 case LTGT:
2557 case COMPARE:
2558 /* Some floating point comparisons may trap. */
2559 if (!flag_trapping_math)
2560 break;
2561 /* ??? There is no machine independent way to check for tests that trap
2562 when COMPARE is used, though many targets do make this distinction.
2563 For instance, sparc uses CCFPE for compares which generate exceptions
2564 and CCFP for compares which do not generate exceptions. */
2565 if (HONOR_NANS (x))
2566 return 1;
2567 /* But often the compare has some CC mode, so check operand
2568 modes as well. */
2569 if (HONOR_NANS (XEXP (x, 0))
2570 || HONOR_NANS (XEXP (x, 1)))
2571 return 1;
2572 break;
2574 case EQ:
2575 case NE:
2576 if (HONOR_SNANS (x))
2577 return 1;
2578 /* Often comparison is CC mode, so check operand modes. */
2579 if (HONOR_SNANS (XEXP (x, 0))
2580 || HONOR_SNANS (XEXP (x, 1)))
2581 return 1;
2582 break;
2584 case FIX:
2585 /* Conversion of floating point might trap. */
2586 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2587 return 1;
2588 break;
2590 case NEG:
2591 case ABS:
2592 case SUBREG:
2593 /* These operations don't trap even with floating point. */
2594 break;
2596 default:
2597 /* Any floating arithmetic may trap. */
2598 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2599 return 1;
2602 fmt = GET_RTX_FORMAT (code);
2603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2605 if (fmt[i] == 'e')
2607 if (may_trap_p_1 (XEXP (x, i), flags))
2608 return 1;
2610 else if (fmt[i] == 'E')
2612 int j;
2613 for (j = 0; j < XVECLEN (x, i); j++)
2614 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2615 return 1;
2618 return 0;
2621 /* Return nonzero if evaluating rtx X might cause a trap. */
2624 may_trap_p (const_rtx x)
2626 return may_trap_p_1 (x, 0);
2629 /* Same as above, but additionally return nonzero if evaluating rtx X might
2630 cause a fault. We define a fault for the purpose of this function as a
2631 erroneous execution condition that cannot be encountered during the normal
2632 execution of a valid program; the typical example is an unaligned memory
2633 access on a strict alignment machine. The compiler guarantees that it
2634 doesn't generate code that will fault from a valid program, but this
2635 guarantee doesn't mean anything for individual instructions. Consider
2636 the following example:
2638 struct S { int d; union { char *cp; int *ip; }; };
2640 int foo(struct S *s)
2642 if (s->d == 1)
2643 return *s->ip;
2644 else
2645 return *s->cp;
2648 on a strict alignment machine. In a valid program, foo will never be
2649 invoked on a structure for which d is equal to 1 and the underlying
2650 unique field of the union not aligned on a 4-byte boundary, but the
2651 expression *s->ip might cause a fault if considered individually.
2653 At the RTL level, potentially problematic expressions will almost always
2654 verify may_trap_p; for example, the above dereference can be emitted as
2655 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2656 However, suppose that foo is inlined in a caller that causes s->cp to
2657 point to a local character variable and guarantees that s->d is not set
2658 to 1; foo may have been effectively translated into pseudo-RTL as:
2660 if ((reg:SI) == 1)
2661 (set (reg:SI) (mem:SI (%fp - 7)))
2662 else
2663 (set (reg:QI) (mem:QI (%fp - 7)))
2665 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2666 memory reference to a stack slot, but it will certainly cause a fault
2667 on a strict alignment machine. */
2670 may_trap_or_fault_p (const_rtx x)
2672 return may_trap_p_1 (x, 1);
2675 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2676 i.e., an inequality. */
2679 inequality_comparisons_p (const_rtx x)
2681 const char *fmt;
2682 int len, i;
2683 const enum rtx_code code = GET_CODE (x);
2685 switch (code)
2687 case REG:
2688 case SCRATCH:
2689 case PC:
2690 case CC0:
2691 CASE_CONST_ANY:
2692 case CONST:
2693 case LABEL_REF:
2694 case SYMBOL_REF:
2695 return 0;
2697 case LT:
2698 case LTU:
2699 case GT:
2700 case GTU:
2701 case LE:
2702 case LEU:
2703 case GE:
2704 case GEU:
2705 return 1;
2707 default:
2708 break;
2711 len = GET_RTX_LENGTH (code);
2712 fmt = GET_RTX_FORMAT (code);
2714 for (i = 0; i < len; i++)
2716 if (fmt[i] == 'e')
2718 if (inequality_comparisons_p (XEXP (x, i)))
2719 return 1;
2721 else if (fmt[i] == 'E')
2723 int j;
2724 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2725 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2726 return 1;
2730 return 0;
2733 /* Replace any occurrence of FROM in X with TO. The function does
2734 not enter into CONST_DOUBLE for the replace.
2736 Note that copying is not done so X must not be shared unless all copies
2737 are to be modified. */
2740 replace_rtx (rtx x, rtx from, rtx to)
2742 int i, j;
2743 const char *fmt;
2745 if (x == from)
2746 return to;
2748 /* Allow this function to make replacements in EXPR_LISTs. */
2749 if (x == 0)
2750 return 0;
2752 if (GET_CODE (x) == SUBREG)
2754 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2756 if (CONST_INT_P (new_rtx))
2758 x = simplify_subreg (GET_MODE (x), new_rtx,
2759 GET_MODE (SUBREG_REG (x)),
2760 SUBREG_BYTE (x));
2761 gcc_assert (x);
2763 else
2764 SUBREG_REG (x) = new_rtx;
2766 return x;
2768 else if (GET_CODE (x) == ZERO_EXTEND)
2770 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2772 if (CONST_INT_P (new_rtx))
2774 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2775 new_rtx, GET_MODE (XEXP (x, 0)));
2776 gcc_assert (x);
2778 else
2779 XEXP (x, 0) = new_rtx;
2781 return x;
2784 fmt = GET_RTX_FORMAT (GET_CODE (x));
2785 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2787 if (fmt[i] == 'e')
2788 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2789 else if (fmt[i] == 'E')
2790 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2791 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2794 return x;
2797 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2798 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2800 void
2801 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
2803 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2804 rtx x = *loc;
2805 if (JUMP_TABLE_DATA_P (x))
2807 x = PATTERN (x);
2808 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
2809 int len = GET_NUM_ELEM (vec);
2810 for (int i = 0; i < len; ++i)
2812 rtx ref = RTVEC_ELT (vec, i);
2813 if (XEXP (ref, 0) == old_label)
2815 XEXP (ref, 0) = new_label;
2816 if (update_label_nuses)
2818 ++LABEL_NUSES (new_label);
2819 --LABEL_NUSES (old_label);
2823 return;
2826 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2827 field. This is not handled by the iterator because it doesn't
2828 handle unprinted ('0') fields. */
2829 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
2830 JUMP_LABEL (x) = new_label;
2832 subrtx_ptr_iterator::array_type array;
2833 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
2835 rtx *loc = *iter;
2836 if (rtx x = *loc)
2838 if (GET_CODE (x) == SYMBOL_REF
2839 && CONSTANT_POOL_ADDRESS_P (x))
2841 rtx c = get_pool_constant (x);
2842 if (rtx_referenced_p (old_label, c))
2844 /* Create a copy of constant C; replace the label inside
2845 but do not update LABEL_NUSES because uses in constant pool
2846 are not counted. */
2847 rtx new_c = copy_rtx (c);
2848 replace_label (&new_c, old_label, new_label, false);
2850 /* Add the new constant NEW_C to constant pool and replace
2851 the old reference to constant by new reference. */
2852 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
2853 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
2857 if ((GET_CODE (x) == LABEL_REF
2858 || GET_CODE (x) == INSN_LIST)
2859 && XEXP (x, 0) == old_label)
2861 XEXP (x, 0) = new_label;
2862 if (update_label_nuses)
2864 ++LABEL_NUSES (new_label);
2865 --LABEL_NUSES (old_label);
2872 void
2873 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
2874 bool update_label_nuses)
2876 rtx insn_as_rtx = insn;
2877 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
2878 gcc_checking_assert (insn_as_rtx == insn);
2881 /* Return true if X is referenced in BODY. */
2883 bool
2884 rtx_referenced_p (const_rtx x, const_rtx body)
2886 subrtx_iterator::array_type array;
2887 FOR_EACH_SUBRTX (iter, array, body, ALL)
2888 if (const_rtx y = *iter)
2890 /* Check if a label_ref Y refers to label X. */
2891 if (GET_CODE (y) == LABEL_REF
2892 && LABEL_P (x)
2893 && LABEL_REF_LABEL (y) == x)
2894 return true;
2896 if (rtx_equal_p (x, y))
2897 return true;
2899 /* If Y is a reference to pool constant traverse the constant. */
2900 if (GET_CODE (y) == SYMBOL_REF
2901 && CONSTANT_POOL_ADDRESS_P (y))
2902 iter.substitute (get_pool_constant (y));
2904 return false;
2907 /* If INSN is a tablejump return true and store the label (before jump table) to
2908 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2910 bool
2911 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
2913 rtx label;
2914 rtx_insn *table;
2916 if (!JUMP_P (insn))
2917 return false;
2919 label = JUMP_LABEL (insn);
2920 if (label != NULL_RTX && !ANY_RETURN_P (label)
2921 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
2922 && JUMP_TABLE_DATA_P (table))
2924 if (labelp)
2925 *labelp = label;
2926 if (tablep)
2927 *tablep = as_a <rtx_jump_table_data *> (table);
2928 return true;
2930 return false;
2933 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2934 constant that is not in the constant pool and not in the condition
2935 of an IF_THEN_ELSE. */
2937 static int
2938 computed_jump_p_1 (const_rtx x)
2940 const enum rtx_code code = GET_CODE (x);
2941 int i, j;
2942 const char *fmt;
2944 switch (code)
2946 case LABEL_REF:
2947 case PC:
2948 return 0;
2950 case CONST:
2951 CASE_CONST_ANY:
2952 case SYMBOL_REF:
2953 case REG:
2954 return 1;
2956 case MEM:
2957 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2958 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2960 case IF_THEN_ELSE:
2961 return (computed_jump_p_1 (XEXP (x, 1))
2962 || computed_jump_p_1 (XEXP (x, 2)));
2964 default:
2965 break;
2968 fmt = GET_RTX_FORMAT (code);
2969 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2971 if (fmt[i] == 'e'
2972 && computed_jump_p_1 (XEXP (x, i)))
2973 return 1;
2975 else if (fmt[i] == 'E')
2976 for (j = 0; j < XVECLEN (x, i); j++)
2977 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2978 return 1;
2981 return 0;
2984 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2986 Tablejumps and casesi insns are not considered indirect jumps;
2987 we can recognize them by a (use (label_ref)). */
2990 computed_jump_p (const rtx_insn *insn)
2992 int i;
2993 if (JUMP_P (insn))
2995 rtx pat = PATTERN (insn);
2997 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2998 if (JUMP_LABEL (insn) != NULL)
2999 return 0;
3001 if (GET_CODE (pat) == PARALLEL)
3003 int len = XVECLEN (pat, 0);
3004 int has_use_labelref = 0;
3006 for (i = len - 1; i >= 0; i--)
3007 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3008 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3009 == LABEL_REF))
3011 has_use_labelref = 1;
3012 break;
3015 if (! has_use_labelref)
3016 for (i = len - 1; i >= 0; i--)
3017 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3018 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3019 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3020 return 1;
3022 else if (GET_CODE (pat) == SET
3023 && SET_DEST (pat) == pc_rtx
3024 && computed_jump_p_1 (SET_SRC (pat)))
3025 return 1;
3027 return 0;
3032 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3033 the equivalent add insn and pass the result to FN, using DATA as the
3034 final argument. */
3036 static int
3037 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3039 rtx x = XEXP (mem, 0);
3040 switch (GET_CODE (x))
3042 case PRE_INC:
3043 case POST_INC:
3045 int size = GET_MODE_SIZE (GET_MODE (mem));
3046 rtx r1 = XEXP (x, 0);
3047 rtx c = gen_int_mode (size, GET_MODE (r1));
3048 return fn (mem, x, r1, r1, c, data);
3051 case PRE_DEC:
3052 case POST_DEC:
3054 int size = GET_MODE_SIZE (GET_MODE (mem));
3055 rtx r1 = XEXP (x, 0);
3056 rtx c = gen_int_mode (-size, GET_MODE (r1));
3057 return fn (mem, x, r1, r1, c, data);
3060 case PRE_MODIFY:
3061 case POST_MODIFY:
3063 rtx r1 = XEXP (x, 0);
3064 rtx add = XEXP (x, 1);
3065 return fn (mem, x, r1, add, NULL, data);
3068 default:
3069 gcc_unreachable ();
3073 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3074 For each such autoinc operation found, call FN, passing it
3075 the innermost enclosing MEM, the operation itself, the RTX modified
3076 by the operation, two RTXs (the second may be NULL) that, once
3077 added, represent the value to be held by the modified RTX
3078 afterwards, and DATA. FN is to return 0 to continue the
3079 traversal or any other value to have it returned to the caller of
3080 for_each_inc_dec. */
3083 for_each_inc_dec (rtx x,
3084 for_each_inc_dec_fn fn,
3085 void *data)
3087 subrtx_var_iterator::array_type array;
3088 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3090 rtx mem = *iter;
3091 if (mem
3092 && MEM_P (mem)
3093 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3095 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3096 if (res != 0)
3097 return res;
3098 iter.skip_subrtxes ();
3101 return 0;
3105 /* Searches X for any reference to REGNO, returning the rtx of the
3106 reference found if any. Otherwise, returns NULL_RTX. */
3109 regno_use_in (unsigned int regno, rtx x)
3111 const char *fmt;
3112 int i, j;
3113 rtx tem;
3115 if (REG_P (x) && REGNO (x) == regno)
3116 return x;
3118 fmt = GET_RTX_FORMAT (GET_CODE (x));
3119 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3121 if (fmt[i] == 'e')
3123 if ((tem = regno_use_in (regno, XEXP (x, i))))
3124 return tem;
3126 else if (fmt[i] == 'E')
3127 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3128 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3129 return tem;
3132 return NULL_RTX;
3135 /* Return a value indicating whether OP, an operand of a commutative
3136 operation, is preferred as the first or second operand. The higher
3137 the value, the stronger the preference for being the first operand.
3138 We use negative values to indicate a preference for the first operand
3139 and positive values for the second operand. */
3142 commutative_operand_precedence (rtx op)
3144 enum rtx_code code = GET_CODE (op);
3146 /* Constants always come the second operand. Prefer "nice" constants. */
3147 if (code == CONST_INT)
3148 return -8;
3149 if (code == CONST_WIDE_INT)
3150 return -8;
3151 if (code == CONST_DOUBLE)
3152 return -7;
3153 if (code == CONST_FIXED)
3154 return -7;
3155 op = avoid_constant_pool_reference (op);
3156 code = GET_CODE (op);
3158 switch (GET_RTX_CLASS (code))
3160 case RTX_CONST_OBJ:
3161 if (code == CONST_INT)
3162 return -6;
3163 if (code == CONST_WIDE_INT)
3164 return -6;
3165 if (code == CONST_DOUBLE)
3166 return -5;
3167 if (code == CONST_FIXED)
3168 return -5;
3169 return -4;
3171 case RTX_EXTRA:
3172 /* SUBREGs of objects should come second. */
3173 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3174 return -3;
3175 return 0;
3177 case RTX_OBJ:
3178 /* Complex expressions should be the first, so decrease priority
3179 of objects. Prefer pointer objects over non pointer objects. */
3180 if ((REG_P (op) && REG_POINTER (op))
3181 || (MEM_P (op) && MEM_POINTER (op)))
3182 return -1;
3183 return -2;
3185 case RTX_COMM_ARITH:
3186 /* Prefer operands that are themselves commutative to be first.
3187 This helps to make things linear. In particular,
3188 (and (and (reg) (reg)) (not (reg))) is canonical. */
3189 return 4;
3191 case RTX_BIN_ARITH:
3192 /* If only one operand is a binary expression, it will be the first
3193 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3194 is canonical, although it will usually be further simplified. */
3195 return 2;
3197 case RTX_UNARY:
3198 /* Then prefer NEG and NOT. */
3199 if (code == NEG || code == NOT)
3200 return 1;
3202 default:
3203 return 0;
3207 /* Return 1 iff it is necessary to swap operands of commutative operation
3208 in order to canonicalize expression. */
3210 bool
3211 swap_commutative_operands_p (rtx x, rtx y)
3213 return (commutative_operand_precedence (x)
3214 < commutative_operand_precedence (y));
3217 /* Return 1 if X is an autoincrement side effect and the register is
3218 not the stack pointer. */
3220 auto_inc_p (const_rtx x)
3222 switch (GET_CODE (x))
3224 case PRE_INC:
3225 case POST_INC:
3226 case PRE_DEC:
3227 case POST_DEC:
3228 case PRE_MODIFY:
3229 case POST_MODIFY:
3230 /* There are no REG_INC notes for SP. */
3231 if (XEXP (x, 0) != stack_pointer_rtx)
3232 return 1;
3233 default:
3234 break;
3236 return 0;
3239 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3241 loc_mentioned_in_p (rtx *loc, const_rtx in)
3243 enum rtx_code code;
3244 const char *fmt;
3245 int i, j;
3247 if (!in)
3248 return 0;
3250 code = GET_CODE (in);
3251 fmt = GET_RTX_FORMAT (code);
3252 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3254 if (fmt[i] == 'e')
3256 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3257 return 1;
3259 else if (fmt[i] == 'E')
3260 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3261 if (loc == &XVECEXP (in, i, j)
3262 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3263 return 1;
3265 return 0;
3268 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3269 and SUBREG_BYTE, return the bit offset where the subreg begins
3270 (counting from the least significant bit of the operand). */
3272 unsigned int
3273 subreg_lsb_1 (machine_mode outer_mode,
3274 machine_mode inner_mode,
3275 unsigned int subreg_byte)
3277 unsigned int bitpos;
3278 unsigned int byte;
3279 unsigned int word;
3281 /* A paradoxical subreg begins at bit position 0. */
3282 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3283 return 0;
3285 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3286 /* If the subreg crosses a word boundary ensure that
3287 it also begins and ends on a word boundary. */
3288 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3289 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3290 && (subreg_byte % UNITS_PER_WORD
3291 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3293 if (WORDS_BIG_ENDIAN)
3294 word = (GET_MODE_SIZE (inner_mode)
3295 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3296 else
3297 word = subreg_byte / UNITS_PER_WORD;
3298 bitpos = word * BITS_PER_WORD;
3300 if (BYTES_BIG_ENDIAN)
3301 byte = (GET_MODE_SIZE (inner_mode)
3302 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3303 else
3304 byte = subreg_byte % UNITS_PER_WORD;
3305 bitpos += byte * BITS_PER_UNIT;
3307 return bitpos;
3310 /* Given a subreg X, return the bit offset where the subreg begins
3311 (counting from the least significant bit of the reg). */
3313 unsigned int
3314 subreg_lsb (const_rtx x)
3316 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3317 SUBREG_BYTE (x));
3320 /* Fill in information about a subreg of a hard register.
3321 xregno - A regno of an inner hard subreg_reg (or what will become one).
3322 xmode - The mode of xregno.
3323 offset - The byte offset.
3324 ymode - The mode of a top level SUBREG (or what may become one).
3325 info - Pointer to structure to fill in.
3327 Rather than considering one particular inner register (and thus one
3328 particular "outer" register) in isolation, this function really uses
3329 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3330 function does not check whether adding INFO->offset to XREGNO gives
3331 a valid hard register; even if INFO->offset + XREGNO is out of range,
3332 there might be another register of the same type that is in range.
3333 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3334 register, since that can depend on things like whether the final
3335 register number is even or odd. Callers that want to check whether
3336 this particular subreg can be replaced by a simple (reg ...) should
3337 use simplify_subreg_regno. */
3339 void
3340 subreg_get_info (unsigned int xregno, machine_mode xmode,
3341 unsigned int offset, machine_mode ymode,
3342 struct subreg_info *info)
3344 int nregs_xmode, nregs_ymode;
3345 int mode_multiple, nregs_multiple;
3346 int offset_adj, y_offset, y_offset_adj;
3347 int regsize_xmode, regsize_ymode;
3348 bool rknown;
3350 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3352 rknown = false;
3354 /* If there are holes in a non-scalar mode in registers, we expect
3355 that it is made up of its units concatenated together. */
3356 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3358 machine_mode xmode_unit;
3360 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3361 if (GET_MODE_INNER (xmode) == VOIDmode)
3362 xmode_unit = xmode;
3363 else
3364 xmode_unit = GET_MODE_INNER (xmode);
3365 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3366 gcc_assert (nregs_xmode
3367 == (GET_MODE_NUNITS (xmode)
3368 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3369 gcc_assert (hard_regno_nregs[xregno][xmode]
3370 == (hard_regno_nregs[xregno][xmode_unit]
3371 * GET_MODE_NUNITS (xmode)));
3373 /* You can only ask for a SUBREG of a value with holes in the middle
3374 if you don't cross the holes. (Such a SUBREG should be done by
3375 picking a different register class, or doing it in memory if
3376 necessary.) An example of a value with holes is XCmode on 32-bit
3377 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3378 3 for each part, but in memory it's two 128-bit parts.
3379 Padding is assumed to be at the end (not necessarily the 'high part')
3380 of each unit. */
3381 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3382 < GET_MODE_NUNITS (xmode))
3383 && (offset / GET_MODE_SIZE (xmode_unit)
3384 != ((offset + GET_MODE_SIZE (ymode) - 1)
3385 / GET_MODE_SIZE (xmode_unit))))
3387 info->representable_p = false;
3388 rknown = true;
3391 else
3392 nregs_xmode = hard_regno_nregs[xregno][xmode];
3394 nregs_ymode = hard_regno_nregs[xregno][ymode];
3396 /* Paradoxical subregs are otherwise valid. */
3397 if (!rknown
3398 && offset == 0
3399 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3401 info->representable_p = true;
3402 /* If this is a big endian paradoxical subreg, which uses more
3403 actual hard registers than the original register, we must
3404 return a negative offset so that we find the proper highpart
3405 of the register. */
3406 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3407 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3408 info->offset = nregs_xmode - nregs_ymode;
3409 else
3410 info->offset = 0;
3411 info->nregs = nregs_ymode;
3412 return;
3415 /* If registers store different numbers of bits in the different
3416 modes, we cannot generally form this subreg. */
3417 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3418 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3419 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3420 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3422 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3423 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3424 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3426 info->representable_p = false;
3427 info->nregs
3428 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3429 info->offset = offset / regsize_xmode;
3430 return;
3432 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3434 info->representable_p = false;
3435 info->nregs
3436 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3437 info->offset = offset / regsize_xmode;
3438 return;
3440 /* Quick exit for the simple and common case of extracting whole
3441 subregisters from a multiregister value. */
3442 /* ??? It would be better to integrate this into the code below,
3443 if we can generalize the concept enough and figure out how
3444 odd-sized modes can coexist with the other weird cases we support. */
3445 if (!rknown
3446 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3447 && regsize_xmode == regsize_ymode
3448 && (offset % regsize_ymode) == 0)
3450 info->representable_p = true;
3451 info->nregs = nregs_ymode;
3452 info->offset = offset / regsize_ymode;
3453 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3454 return;
3458 /* Lowpart subregs are otherwise valid. */
3459 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3461 info->representable_p = true;
3462 rknown = true;
3464 if (offset == 0 || nregs_xmode == nregs_ymode)
3466 info->offset = 0;
3467 info->nregs = nregs_ymode;
3468 return;
3472 /* This should always pass, otherwise we don't know how to verify
3473 the constraint. These conditions may be relaxed but
3474 subreg_regno_offset would need to be redesigned. */
3475 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3476 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3478 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3479 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3481 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3482 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3483 HOST_WIDE_INT off_low = offset & (ysize - 1);
3484 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3485 offset = (xsize - ysize - off_high) | off_low;
3487 /* The XMODE value can be seen as a vector of NREGS_XMODE
3488 values. The subreg must represent a lowpart of given field.
3489 Compute what field it is. */
3490 offset_adj = offset;
3491 offset_adj -= subreg_lowpart_offset (ymode,
3492 mode_for_size (GET_MODE_BITSIZE (xmode)
3493 / nregs_xmode,
3494 MODE_INT, 0));
3496 /* Size of ymode must not be greater than the size of xmode. */
3497 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3498 gcc_assert (mode_multiple != 0);
3500 y_offset = offset / GET_MODE_SIZE (ymode);
3501 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3502 nregs_multiple = nregs_xmode / nregs_ymode;
3504 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3505 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3507 if (!rknown)
3509 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3510 rknown = true;
3512 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3513 info->nregs = nregs_ymode;
3516 /* This function returns the regno offset of a subreg expression.
3517 xregno - A regno of an inner hard subreg_reg (or what will become one).
3518 xmode - The mode of xregno.
3519 offset - The byte offset.
3520 ymode - The mode of a top level SUBREG (or what may become one).
3521 RETURN - The regno offset which would be used. */
3522 unsigned int
3523 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3524 unsigned int offset, machine_mode ymode)
3526 struct subreg_info info;
3527 subreg_get_info (xregno, xmode, offset, ymode, &info);
3528 return info.offset;
3531 /* This function returns true when the offset is representable via
3532 subreg_offset in the given regno.
3533 xregno - A regno of an inner hard subreg_reg (or what will become one).
3534 xmode - The mode of xregno.
3535 offset - The byte offset.
3536 ymode - The mode of a top level SUBREG (or what may become one).
3537 RETURN - Whether the offset is representable. */
3538 bool
3539 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3540 unsigned int offset, machine_mode ymode)
3542 struct subreg_info info;
3543 subreg_get_info (xregno, xmode, offset, ymode, &info);
3544 return info.representable_p;
3547 /* Return the number of a YMODE register to which
3549 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3551 can be simplified. Return -1 if the subreg can't be simplified.
3553 XREGNO is a hard register number. */
3556 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3557 unsigned int offset, machine_mode ymode)
3559 struct subreg_info info;
3560 unsigned int yregno;
3562 #ifdef CANNOT_CHANGE_MODE_CLASS
3563 /* Give the backend a chance to disallow the mode change. */
3564 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3565 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3566 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3567 /* We can use mode change in LRA for some transformations. */
3568 && ! lra_in_progress)
3569 return -1;
3570 #endif
3572 /* We shouldn't simplify stack-related registers. */
3573 if ((!reload_completed || frame_pointer_needed)
3574 && xregno == FRAME_POINTER_REGNUM)
3575 return -1;
3577 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3578 && xregno == ARG_POINTER_REGNUM)
3579 return -1;
3581 if (xregno == STACK_POINTER_REGNUM
3582 /* We should convert hard stack register in LRA if it is
3583 possible. */
3584 && ! lra_in_progress)
3585 return -1;
3587 /* Try to get the register offset. */
3588 subreg_get_info (xregno, xmode, offset, ymode, &info);
3589 if (!info.representable_p)
3590 return -1;
3592 /* Make sure that the offsetted register value is in range. */
3593 yregno = xregno + info.offset;
3594 if (!HARD_REGISTER_NUM_P (yregno))
3595 return -1;
3597 /* See whether (reg:YMODE YREGNO) is valid.
3599 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3600 This is a kludge to work around how complex FP arguments are passed
3601 on IA-64 and should be fixed. See PR target/49226. */
3602 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3603 && HARD_REGNO_MODE_OK (xregno, xmode))
3604 return -1;
3606 return (int) yregno;
3609 /* Return the final regno that a subreg expression refers to. */
3610 unsigned int
3611 subreg_regno (const_rtx x)
3613 unsigned int ret;
3614 rtx subreg = SUBREG_REG (x);
3615 int regno = REGNO (subreg);
3617 ret = regno + subreg_regno_offset (regno,
3618 GET_MODE (subreg),
3619 SUBREG_BYTE (x),
3620 GET_MODE (x));
3621 return ret;
3625 /* Return the number of registers that a subreg expression refers
3626 to. */
3627 unsigned int
3628 subreg_nregs (const_rtx x)
3630 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3633 /* Return the number of registers that a subreg REG with REGNO
3634 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3635 changed so that the regno can be passed in. */
3637 unsigned int
3638 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3640 struct subreg_info info;
3641 rtx subreg = SUBREG_REG (x);
3643 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3644 &info);
3645 return info.nregs;
3649 struct parms_set_data
3651 int nregs;
3652 HARD_REG_SET regs;
3655 /* Helper function for noticing stores to parameter registers. */
3656 static void
3657 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3659 struct parms_set_data *const d = (struct parms_set_data *) data;
3660 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3661 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3663 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3664 d->nregs--;
3668 /* Look backward for first parameter to be loaded.
3669 Note that loads of all parameters will not necessarily be
3670 found if CSE has eliminated some of them (e.g., an argument
3671 to the outer function is passed down as a parameter).
3672 Do not skip BOUNDARY. */
3673 rtx_insn *
3674 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3676 struct parms_set_data parm;
3677 rtx p;
3678 rtx_insn *before, *first_set;
3680 /* Since different machines initialize their parameter registers
3681 in different orders, assume nothing. Collect the set of all
3682 parameter registers. */
3683 CLEAR_HARD_REG_SET (parm.regs);
3684 parm.nregs = 0;
3685 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3686 if (GET_CODE (XEXP (p, 0)) == USE
3687 && REG_P (XEXP (XEXP (p, 0), 0)))
3689 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3691 /* We only care about registers which can hold function
3692 arguments. */
3693 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3694 continue;
3696 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3697 parm.nregs++;
3699 before = call_insn;
3700 first_set = call_insn;
3702 /* Search backward for the first set of a register in this set. */
3703 while (parm.nregs && before != boundary)
3705 before = PREV_INSN (before);
3707 /* It is possible that some loads got CSEed from one call to
3708 another. Stop in that case. */
3709 if (CALL_P (before))
3710 break;
3712 /* Our caller needs either ensure that we will find all sets
3713 (in case code has not been optimized yet), or take care
3714 for possible labels in a way by setting boundary to preceding
3715 CODE_LABEL. */
3716 if (LABEL_P (before))
3718 gcc_assert (before == boundary);
3719 break;
3722 if (INSN_P (before))
3724 int nregs_old = parm.nregs;
3725 note_stores (PATTERN (before), parms_set, &parm);
3726 /* If we found something that did not set a parameter reg,
3727 we're done. Do not keep going, as that might result
3728 in hoisting an insn before the setting of a pseudo
3729 that is used by the hoisted insn. */
3730 if (nregs_old != parm.nregs)
3731 first_set = before;
3732 else
3733 break;
3736 return first_set;
3739 /* Return true if we should avoid inserting code between INSN and preceding
3740 call instruction. */
3742 bool
3743 keep_with_call_p (const rtx_insn *insn)
3745 rtx set;
3747 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3749 if (REG_P (SET_DEST (set))
3750 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3751 && fixed_regs[REGNO (SET_DEST (set))]
3752 && general_operand (SET_SRC (set), VOIDmode))
3753 return true;
3754 if (REG_P (SET_SRC (set))
3755 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3756 && REG_P (SET_DEST (set))
3757 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3758 return true;
3759 /* There may be a stack pop just after the call and before the store
3760 of the return register. Search for the actual store when deciding
3761 if we can break or not. */
3762 if (SET_DEST (set) == stack_pointer_rtx)
3764 /* This CONST_CAST is okay because next_nonnote_insn just
3765 returns its argument and we assign it to a const_rtx
3766 variable. */
3767 const rtx_insn *i2
3768 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3769 if (i2 && keep_with_call_p (i2))
3770 return true;
3773 return false;
3776 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3777 to non-complex jumps. That is, direct unconditional, conditional,
3778 and tablejumps, but not computed jumps or returns. It also does
3779 not apply to the fallthru case of a conditional jump. */
3781 bool
3782 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3784 rtx tmp = JUMP_LABEL (jump_insn);
3785 rtx_jump_table_data *table;
3787 if (label == tmp)
3788 return true;
3790 if (tablejump_p (jump_insn, NULL, &table))
3792 rtvec vec = table->get_labels ();
3793 int i, veclen = GET_NUM_ELEM (vec);
3795 for (i = 0; i < veclen; ++i)
3796 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3797 return true;
3800 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
3801 return true;
3803 return false;
3807 /* Return an estimate of the cost of computing rtx X.
3808 One use is in cse, to decide which expression to keep in the hash table.
3809 Another is in rtl generation, to pick the cheapest way to multiply.
3810 Other uses like the latter are expected in the future.
3812 X appears as operand OPNO in an expression with code OUTER_CODE.
3813 SPEED specifies whether costs optimized for speed or size should
3814 be returned. */
3817 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
3819 int i, j;
3820 enum rtx_code code;
3821 const char *fmt;
3822 int total;
3823 int factor;
3825 if (x == 0)
3826 return 0;
3828 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3829 many insns, taking N times as long. */
3830 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
3831 if (factor == 0)
3832 factor = 1;
3834 /* Compute the default costs of certain things.
3835 Note that targetm.rtx_costs can override the defaults. */
3837 code = GET_CODE (x);
3838 switch (code)
3840 case MULT:
3841 /* Multiplication has time-complexity O(N*N), where N is the
3842 number of units (translated from digits) when using
3843 schoolbook long multiplication. */
3844 total = factor * factor * COSTS_N_INSNS (5);
3845 break;
3846 case DIV:
3847 case UDIV:
3848 case MOD:
3849 case UMOD:
3850 /* Similarly, complexity for schoolbook long division. */
3851 total = factor * factor * COSTS_N_INSNS (7);
3852 break;
3853 case USE:
3854 /* Used in combine.c as a marker. */
3855 total = 0;
3856 break;
3857 case SET:
3858 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3859 the mode for the factor. */
3860 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
3861 if (factor == 0)
3862 factor = 1;
3863 /* Pass through. */
3864 default:
3865 total = factor * COSTS_N_INSNS (1);
3868 switch (code)
3870 case REG:
3871 return 0;
3873 case SUBREG:
3874 total = 0;
3875 /* If we can't tie these modes, make this expensive. The larger
3876 the mode, the more expensive it is. */
3877 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3878 return COSTS_N_INSNS (2 + factor);
3879 break;
3881 default:
3882 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
3883 return total;
3884 break;
3887 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3888 which is already in total. */
3890 fmt = GET_RTX_FORMAT (code);
3891 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3892 if (fmt[i] == 'e')
3893 total += rtx_cost (XEXP (x, i), code, i, speed);
3894 else if (fmt[i] == 'E')
3895 for (j = 0; j < XVECLEN (x, i); j++)
3896 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
3898 return total;
3901 /* Fill in the structure C with information about both speed and size rtx
3902 costs for X, which is operand OPNO in an expression with code OUTER. */
3904 void
3905 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
3906 struct full_rtx_costs *c)
3908 c->speed = rtx_cost (x, outer, opno, true);
3909 c->size = rtx_cost (x, outer, opno, false);
3913 /* Return cost of address expression X.
3914 Expect that X is properly formed address reference.
3916 SPEED parameter specify whether costs optimized for speed or size should
3917 be returned. */
3920 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
3922 /* We may be asked for cost of various unusual addresses, such as operands
3923 of push instruction. It is not worthwhile to complicate writing
3924 of the target hook by such cases. */
3926 if (!memory_address_addr_space_p (mode, x, as))
3927 return 1000;
3929 return targetm.address_cost (x, mode, as, speed);
3932 /* If the target doesn't override, compute the cost as with arithmetic. */
3935 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
3937 return rtx_cost (x, MEM, 0, speed);
3941 unsigned HOST_WIDE_INT
3942 nonzero_bits (const_rtx x, machine_mode mode)
3944 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3947 unsigned int
3948 num_sign_bit_copies (const_rtx x, machine_mode mode)
3950 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3953 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3954 It avoids exponential behavior in nonzero_bits1 when X has
3955 identical subexpressions on the first or the second level. */
3957 static unsigned HOST_WIDE_INT
3958 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
3959 machine_mode known_mode,
3960 unsigned HOST_WIDE_INT known_ret)
3962 if (x == known_x && mode == known_mode)
3963 return known_ret;
3965 /* Try to find identical subexpressions. If found call
3966 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3967 precomputed value for the subexpression as KNOWN_RET. */
3969 if (ARITHMETIC_P (x))
3971 rtx x0 = XEXP (x, 0);
3972 rtx x1 = XEXP (x, 1);
3974 /* Check the first level. */
3975 if (x0 == x1)
3976 return nonzero_bits1 (x, mode, x0, mode,
3977 cached_nonzero_bits (x0, mode, known_x,
3978 known_mode, known_ret));
3980 /* Check the second level. */
3981 if (ARITHMETIC_P (x0)
3982 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3983 return nonzero_bits1 (x, mode, x1, mode,
3984 cached_nonzero_bits (x1, mode, known_x,
3985 known_mode, known_ret));
3987 if (ARITHMETIC_P (x1)
3988 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3989 return nonzero_bits1 (x, mode, x0, mode,
3990 cached_nonzero_bits (x0, mode, known_x,
3991 known_mode, known_ret));
3994 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3997 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3998 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3999 is less useful. We can't allow both, because that results in exponential
4000 run time recursion. There is a nullstone testcase that triggered
4001 this. This macro avoids accidental uses of num_sign_bit_copies. */
4002 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4004 /* Given an expression, X, compute which bits in X can be nonzero.
4005 We don't care about bits outside of those defined in MODE.
4007 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4008 an arithmetic operation, we can do better. */
4010 static unsigned HOST_WIDE_INT
4011 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4012 machine_mode known_mode,
4013 unsigned HOST_WIDE_INT known_ret)
4015 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4016 unsigned HOST_WIDE_INT inner_nz;
4017 enum rtx_code code;
4018 machine_mode inner_mode;
4019 unsigned int mode_width = GET_MODE_PRECISION (mode);
4021 /* For floating-point and vector values, assume all bits are needed. */
4022 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4023 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4024 return nonzero;
4026 /* If X is wider than MODE, use its mode instead. */
4027 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4029 mode = GET_MODE (x);
4030 nonzero = GET_MODE_MASK (mode);
4031 mode_width = GET_MODE_PRECISION (mode);
4034 if (mode_width > HOST_BITS_PER_WIDE_INT)
4035 /* Our only callers in this case look for single bit values. So
4036 just return the mode mask. Those tests will then be false. */
4037 return nonzero;
4039 #ifndef WORD_REGISTER_OPERATIONS
4040 /* If MODE is wider than X, but both are a single word for both the host
4041 and target machines, we can compute this from which bits of the
4042 object might be nonzero in its own mode, taking into account the fact
4043 that on many CISC machines, accessing an object in a wider mode
4044 causes the high-order bits to become undefined. So they are
4045 not known to be zero. */
4047 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4048 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4049 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4050 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4052 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4053 known_x, known_mode, known_ret);
4054 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4055 return nonzero;
4057 #endif
4059 code = GET_CODE (x);
4060 switch (code)
4062 case REG:
4063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4064 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4065 all the bits above ptr_mode are known to be zero. */
4066 /* As we do not know which address space the pointer is referring to,
4067 we can do this only if the target does not support different pointer
4068 or address modes depending on the address space. */
4069 if (target_default_pointer_address_modes_p ()
4070 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4071 && REG_POINTER (x))
4072 nonzero &= GET_MODE_MASK (ptr_mode);
4073 #endif
4075 /* Include declared information about alignment of pointers. */
4076 /* ??? We don't properly preserve REG_POINTER changes across
4077 pointer-to-integer casts, so we can't trust it except for
4078 things that we know must be pointers. See execute/960116-1.c. */
4079 if ((x == stack_pointer_rtx
4080 || x == frame_pointer_rtx
4081 || x == arg_pointer_rtx)
4082 && REGNO_POINTER_ALIGN (REGNO (x)))
4084 unsigned HOST_WIDE_INT alignment
4085 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4087 #ifdef PUSH_ROUNDING
4088 /* If PUSH_ROUNDING is defined, it is possible for the
4089 stack to be momentarily aligned only to that amount,
4090 so we pick the least alignment. */
4091 if (x == stack_pointer_rtx && PUSH_ARGS)
4092 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4093 alignment);
4094 #endif
4096 nonzero &= ~(alignment - 1);
4100 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4101 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4102 known_mode, known_ret,
4103 &nonzero_for_hook);
4105 if (new_rtx)
4106 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4107 known_mode, known_ret);
4109 return nonzero_for_hook;
4112 case CONST_INT:
4113 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4114 /* If X is negative in MODE, sign-extend the value. */
4115 if (INTVAL (x) > 0
4116 && mode_width < BITS_PER_WORD
4117 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4118 != 0)
4119 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4120 #endif
4122 return UINTVAL (x);
4124 case MEM:
4125 #ifdef LOAD_EXTEND_OP
4126 /* In many, if not most, RISC machines, reading a byte from memory
4127 zeros the rest of the register. Noticing that fact saves a lot
4128 of extra zero-extends. */
4129 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4130 nonzero &= GET_MODE_MASK (GET_MODE (x));
4131 #endif
4132 break;
4134 case EQ: case NE:
4135 case UNEQ: case LTGT:
4136 case GT: case GTU: case UNGT:
4137 case LT: case LTU: case UNLT:
4138 case GE: case GEU: case UNGE:
4139 case LE: case LEU: case UNLE:
4140 case UNORDERED: case ORDERED:
4141 /* If this produces an integer result, we know which bits are set.
4142 Code here used to clear bits outside the mode of X, but that is
4143 now done above. */
4144 /* Mind that MODE is the mode the caller wants to look at this
4145 operation in, and not the actual operation mode. We can wind
4146 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4147 that describes the results of a vector compare. */
4148 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4149 && mode_width <= HOST_BITS_PER_WIDE_INT)
4150 nonzero = STORE_FLAG_VALUE;
4151 break;
4153 case NEG:
4154 #if 0
4155 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4156 and num_sign_bit_copies. */
4157 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4158 == GET_MODE_PRECISION (GET_MODE (x)))
4159 nonzero = 1;
4160 #endif
4162 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4163 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4164 break;
4166 case ABS:
4167 #if 0
4168 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4169 and num_sign_bit_copies. */
4170 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4171 == GET_MODE_PRECISION (GET_MODE (x)))
4172 nonzero = 1;
4173 #endif
4174 break;
4176 case TRUNCATE:
4177 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4178 known_x, known_mode, known_ret)
4179 & GET_MODE_MASK (mode));
4180 break;
4182 case ZERO_EXTEND:
4183 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4184 known_x, known_mode, known_ret);
4185 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4186 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4187 break;
4189 case SIGN_EXTEND:
4190 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4191 Otherwise, show all the bits in the outer mode but not the inner
4192 may be nonzero. */
4193 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4194 known_x, known_mode, known_ret);
4195 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4197 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4198 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4199 inner_nz |= (GET_MODE_MASK (mode)
4200 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4203 nonzero &= inner_nz;
4204 break;
4206 case AND:
4207 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4208 known_x, known_mode, known_ret)
4209 & cached_nonzero_bits (XEXP (x, 1), mode,
4210 known_x, known_mode, known_ret);
4211 break;
4213 case XOR: case IOR:
4214 case UMIN: case UMAX: case SMIN: case SMAX:
4216 unsigned HOST_WIDE_INT nonzero0
4217 = cached_nonzero_bits (XEXP (x, 0), mode,
4218 known_x, known_mode, known_ret);
4220 /* Don't call nonzero_bits for the second time if it cannot change
4221 anything. */
4222 if ((nonzero & nonzero0) != nonzero)
4223 nonzero &= nonzero0
4224 | cached_nonzero_bits (XEXP (x, 1), mode,
4225 known_x, known_mode, known_ret);
4227 break;
4229 case PLUS: case MINUS:
4230 case MULT:
4231 case DIV: case UDIV:
4232 case MOD: case UMOD:
4233 /* We can apply the rules of arithmetic to compute the number of
4234 high- and low-order zero bits of these operations. We start by
4235 computing the width (position of the highest-order nonzero bit)
4236 and the number of low-order zero bits for each value. */
4238 unsigned HOST_WIDE_INT nz0
4239 = cached_nonzero_bits (XEXP (x, 0), mode,
4240 known_x, known_mode, known_ret);
4241 unsigned HOST_WIDE_INT nz1
4242 = cached_nonzero_bits (XEXP (x, 1), mode,
4243 known_x, known_mode, known_ret);
4244 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4245 int width0 = floor_log2 (nz0) + 1;
4246 int width1 = floor_log2 (nz1) + 1;
4247 int low0 = floor_log2 (nz0 & -nz0);
4248 int low1 = floor_log2 (nz1 & -nz1);
4249 unsigned HOST_WIDE_INT op0_maybe_minusp
4250 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4251 unsigned HOST_WIDE_INT op1_maybe_minusp
4252 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4253 unsigned int result_width = mode_width;
4254 int result_low = 0;
4256 switch (code)
4258 case PLUS:
4259 result_width = MAX (width0, width1) + 1;
4260 result_low = MIN (low0, low1);
4261 break;
4262 case MINUS:
4263 result_low = MIN (low0, low1);
4264 break;
4265 case MULT:
4266 result_width = width0 + width1;
4267 result_low = low0 + low1;
4268 break;
4269 case DIV:
4270 if (width1 == 0)
4271 break;
4272 if (!op0_maybe_minusp && !op1_maybe_minusp)
4273 result_width = width0;
4274 break;
4275 case UDIV:
4276 if (width1 == 0)
4277 break;
4278 result_width = width0;
4279 break;
4280 case MOD:
4281 if (width1 == 0)
4282 break;
4283 if (!op0_maybe_minusp && !op1_maybe_minusp)
4284 result_width = MIN (width0, width1);
4285 result_low = MIN (low0, low1);
4286 break;
4287 case UMOD:
4288 if (width1 == 0)
4289 break;
4290 result_width = MIN (width0, width1);
4291 result_low = MIN (low0, low1);
4292 break;
4293 default:
4294 gcc_unreachable ();
4297 if (result_width < mode_width)
4298 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4300 if (result_low > 0)
4301 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4303 break;
4305 case ZERO_EXTRACT:
4306 if (CONST_INT_P (XEXP (x, 1))
4307 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4308 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4309 break;
4311 case SUBREG:
4312 /* If this is a SUBREG formed for a promoted variable that has
4313 been zero-extended, we know that at least the high-order bits
4314 are zero, though others might be too. */
4316 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4317 nonzero = GET_MODE_MASK (GET_MODE (x))
4318 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4319 known_x, known_mode, known_ret);
4321 inner_mode = GET_MODE (SUBREG_REG (x));
4322 /* If the inner mode is a single word for both the host and target
4323 machines, we can compute this from which bits of the inner
4324 object might be nonzero. */
4325 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4326 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4328 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4329 known_x, known_mode, known_ret);
4331 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4332 /* If this is a typical RISC machine, we only have to worry
4333 about the way loads are extended. */
4334 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4335 ? val_signbit_known_set_p (inner_mode, nonzero)
4336 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4337 || !MEM_P (SUBREG_REG (x)))
4338 #endif
4340 /* On many CISC machines, accessing an object in a wider mode
4341 causes the high-order bits to become undefined. So they are
4342 not known to be zero. */
4343 if (GET_MODE_PRECISION (GET_MODE (x))
4344 > GET_MODE_PRECISION (inner_mode))
4345 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4346 & ~GET_MODE_MASK (inner_mode));
4349 break;
4351 case ASHIFTRT:
4352 case LSHIFTRT:
4353 case ASHIFT:
4354 case ROTATE:
4355 /* The nonzero bits are in two classes: any bits within MODE
4356 that aren't in GET_MODE (x) are always significant. The rest of the
4357 nonzero bits are those that are significant in the operand of
4358 the shift when shifted the appropriate number of bits. This
4359 shows that high-order bits are cleared by the right shift and
4360 low-order bits by left shifts. */
4361 if (CONST_INT_P (XEXP (x, 1))
4362 && INTVAL (XEXP (x, 1)) >= 0
4363 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4364 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4366 machine_mode inner_mode = GET_MODE (x);
4367 unsigned int width = GET_MODE_PRECISION (inner_mode);
4368 int count = INTVAL (XEXP (x, 1));
4369 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4370 unsigned HOST_WIDE_INT op_nonzero
4371 = cached_nonzero_bits (XEXP (x, 0), mode,
4372 known_x, known_mode, known_ret);
4373 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4374 unsigned HOST_WIDE_INT outer = 0;
4376 if (mode_width > width)
4377 outer = (op_nonzero & nonzero & ~mode_mask);
4379 if (code == LSHIFTRT)
4380 inner >>= count;
4381 else if (code == ASHIFTRT)
4383 inner >>= count;
4385 /* If the sign bit may have been nonzero before the shift, we
4386 need to mark all the places it could have been copied to
4387 by the shift as possibly nonzero. */
4388 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4389 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4390 << (width - count);
4392 else if (code == ASHIFT)
4393 inner <<= count;
4394 else
4395 inner = ((inner << (count % width)
4396 | (inner >> (width - (count % width)))) & mode_mask);
4398 nonzero &= (outer | inner);
4400 break;
4402 case FFS:
4403 case POPCOUNT:
4404 /* This is at most the number of bits in the mode. */
4405 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4406 break;
4408 case CLZ:
4409 /* If CLZ has a known value at zero, then the nonzero bits are
4410 that value, plus the number of bits in the mode minus one. */
4411 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4412 nonzero
4413 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4414 else
4415 nonzero = -1;
4416 break;
4418 case CTZ:
4419 /* If CTZ has a known value at zero, then the nonzero bits are
4420 that value, plus the number of bits in the mode minus one. */
4421 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4422 nonzero
4423 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4424 else
4425 nonzero = -1;
4426 break;
4428 case CLRSB:
4429 /* This is at most the number of bits in the mode minus 1. */
4430 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4431 break;
4433 case PARITY:
4434 nonzero = 1;
4435 break;
4437 case IF_THEN_ELSE:
4439 unsigned HOST_WIDE_INT nonzero_true
4440 = cached_nonzero_bits (XEXP (x, 1), mode,
4441 known_x, known_mode, known_ret);
4443 /* Don't call nonzero_bits for the second time if it cannot change
4444 anything. */
4445 if ((nonzero & nonzero_true) != nonzero)
4446 nonzero &= nonzero_true
4447 | cached_nonzero_bits (XEXP (x, 2), mode,
4448 known_x, known_mode, known_ret);
4450 break;
4452 default:
4453 break;
4456 return nonzero;
4459 /* See the macro definition above. */
4460 #undef cached_num_sign_bit_copies
4463 /* The function cached_num_sign_bit_copies is a wrapper around
4464 num_sign_bit_copies1. It avoids exponential behavior in
4465 num_sign_bit_copies1 when X has identical subexpressions on the
4466 first or the second level. */
4468 static unsigned int
4469 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4470 machine_mode known_mode,
4471 unsigned int known_ret)
4473 if (x == known_x && mode == known_mode)
4474 return known_ret;
4476 /* Try to find identical subexpressions. If found call
4477 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4478 the precomputed value for the subexpression as KNOWN_RET. */
4480 if (ARITHMETIC_P (x))
4482 rtx x0 = XEXP (x, 0);
4483 rtx x1 = XEXP (x, 1);
4485 /* Check the first level. */
4486 if (x0 == x1)
4487 return
4488 num_sign_bit_copies1 (x, mode, x0, mode,
4489 cached_num_sign_bit_copies (x0, mode, known_x,
4490 known_mode,
4491 known_ret));
4493 /* Check the second level. */
4494 if (ARITHMETIC_P (x0)
4495 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4496 return
4497 num_sign_bit_copies1 (x, mode, x1, mode,
4498 cached_num_sign_bit_copies (x1, mode, known_x,
4499 known_mode,
4500 known_ret));
4502 if (ARITHMETIC_P (x1)
4503 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4504 return
4505 num_sign_bit_copies1 (x, mode, x0, mode,
4506 cached_num_sign_bit_copies (x0, mode, known_x,
4507 known_mode,
4508 known_ret));
4511 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4514 /* Return the number of bits at the high-order end of X that are known to
4515 be equal to the sign bit. X will be used in mode MODE; if MODE is
4516 VOIDmode, X will be used in its own mode. The returned value will always
4517 be between 1 and the number of bits in MODE. */
4519 static unsigned int
4520 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4521 machine_mode known_mode,
4522 unsigned int known_ret)
4524 enum rtx_code code = GET_CODE (x);
4525 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4526 int num0, num1, result;
4527 unsigned HOST_WIDE_INT nonzero;
4529 /* If we weren't given a mode, use the mode of X. If the mode is still
4530 VOIDmode, we don't know anything. Likewise if one of the modes is
4531 floating-point. */
4533 if (mode == VOIDmode)
4534 mode = GET_MODE (x);
4536 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4537 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4538 return 1;
4540 /* For a smaller object, just ignore the high bits. */
4541 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4543 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4544 known_x, known_mode, known_ret);
4545 return MAX (1,
4546 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4549 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4551 #ifndef WORD_REGISTER_OPERATIONS
4552 /* If this machine does not do all register operations on the entire
4553 register and MODE is wider than the mode of X, we can say nothing
4554 at all about the high-order bits. */
4555 return 1;
4556 #else
4557 /* Likewise on machines that do, if the mode of the object is smaller
4558 than a word and loads of that size don't sign extend, we can say
4559 nothing about the high order bits. */
4560 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4561 #ifdef LOAD_EXTEND_OP
4562 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4563 #endif
4565 return 1;
4566 #endif
4569 switch (code)
4571 case REG:
4573 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4574 /* If pointers extend signed and this is a pointer in Pmode, say that
4575 all the bits above ptr_mode are known to be sign bit copies. */
4576 /* As we do not know which address space the pointer is referring to,
4577 we can do this only if the target does not support different pointer
4578 or address modes depending on the address space. */
4579 if (target_default_pointer_address_modes_p ()
4580 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4581 && mode == Pmode && REG_POINTER (x))
4582 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4583 #endif
4586 unsigned int copies_for_hook = 1, copies = 1;
4587 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4588 known_mode, known_ret,
4589 &copies_for_hook);
4591 if (new_rtx)
4592 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4593 known_mode, known_ret);
4595 if (copies > 1 || copies_for_hook > 1)
4596 return MAX (copies, copies_for_hook);
4598 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4600 break;
4602 case MEM:
4603 #ifdef LOAD_EXTEND_OP
4604 /* Some RISC machines sign-extend all loads of smaller than a word. */
4605 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4606 return MAX (1, ((int) bitwidth
4607 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4608 #endif
4609 break;
4611 case CONST_INT:
4612 /* If the constant is negative, take its 1's complement and remask.
4613 Then see how many zero bits we have. */
4614 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4615 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4616 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4617 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4619 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4621 case SUBREG:
4622 /* If this is a SUBREG for a promoted object that is sign-extended
4623 and we are looking at it in a wider mode, we know that at least the
4624 high-order bits are known to be sign bit copies. */
4626 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4628 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4629 known_x, known_mode, known_ret);
4630 return MAX ((int) bitwidth
4631 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4632 num0);
4635 /* For a smaller object, just ignore the high bits. */
4636 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4638 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4639 known_x, known_mode, known_ret);
4640 return MAX (1, (num0
4641 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4642 - bitwidth)));
4645 #ifdef WORD_REGISTER_OPERATIONS
4646 #ifdef LOAD_EXTEND_OP
4647 /* For paradoxical SUBREGs on machines where all register operations
4648 affect the entire register, just look inside. Note that we are
4649 passing MODE to the recursive call, so the number of sign bit copies
4650 will remain relative to that mode, not the inner mode. */
4652 /* This works only if loads sign extend. Otherwise, if we get a
4653 reload for the inner part, it may be loaded from the stack, and
4654 then we lose all sign bit copies that existed before the store
4655 to the stack. */
4657 if (paradoxical_subreg_p (x)
4658 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4659 && MEM_P (SUBREG_REG (x)))
4660 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4661 known_x, known_mode, known_ret);
4662 #endif
4663 #endif
4664 break;
4666 case SIGN_EXTRACT:
4667 if (CONST_INT_P (XEXP (x, 1)))
4668 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4669 break;
4671 case SIGN_EXTEND:
4672 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4673 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4674 known_x, known_mode, known_ret));
4676 case TRUNCATE:
4677 /* For a smaller object, just ignore the high bits. */
4678 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4679 known_x, known_mode, known_ret);
4680 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4681 - bitwidth)));
4683 case NOT:
4684 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4685 known_x, known_mode, known_ret);
4687 case ROTATE: case ROTATERT:
4688 /* If we are rotating left by a number of bits less than the number
4689 of sign bit copies, we can just subtract that amount from the
4690 number. */
4691 if (CONST_INT_P (XEXP (x, 1))
4692 && INTVAL (XEXP (x, 1)) >= 0
4693 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4695 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4696 known_x, known_mode, known_ret);
4697 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4698 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4700 break;
4702 case NEG:
4703 /* In general, this subtracts one sign bit copy. But if the value
4704 is known to be positive, the number of sign bit copies is the
4705 same as that of the input. Finally, if the input has just one bit
4706 that might be nonzero, all the bits are copies of the sign bit. */
4707 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4708 known_x, known_mode, known_ret);
4709 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4710 return num0 > 1 ? num0 - 1 : 1;
4712 nonzero = nonzero_bits (XEXP (x, 0), mode);
4713 if (nonzero == 1)
4714 return bitwidth;
4716 if (num0 > 1
4717 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4718 num0--;
4720 return num0;
4722 case IOR: case AND: case XOR:
4723 case SMIN: case SMAX: case UMIN: case UMAX:
4724 /* Logical operations will preserve the number of sign-bit copies.
4725 MIN and MAX operations always return one of the operands. */
4726 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4727 known_x, known_mode, known_ret);
4728 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4729 known_x, known_mode, known_ret);
4731 /* If num1 is clearing some of the top bits then regardless of
4732 the other term, we are guaranteed to have at least that many
4733 high-order zero bits. */
4734 if (code == AND
4735 && num1 > 1
4736 && bitwidth <= HOST_BITS_PER_WIDE_INT
4737 && CONST_INT_P (XEXP (x, 1))
4738 && (UINTVAL (XEXP (x, 1))
4739 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4740 return num1;
4742 /* Similarly for IOR when setting high-order bits. */
4743 if (code == IOR
4744 && num1 > 1
4745 && bitwidth <= HOST_BITS_PER_WIDE_INT
4746 && CONST_INT_P (XEXP (x, 1))
4747 && (UINTVAL (XEXP (x, 1))
4748 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4749 return num1;
4751 return MIN (num0, num1);
4753 case PLUS: case MINUS:
4754 /* For addition and subtraction, we can have a 1-bit carry. However,
4755 if we are subtracting 1 from a positive number, there will not
4756 be such a carry. Furthermore, if the positive number is known to
4757 be 0 or 1, we know the result is either -1 or 0. */
4759 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4760 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4762 nonzero = nonzero_bits (XEXP (x, 0), mode);
4763 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4764 return (nonzero == 1 || nonzero == 0 ? bitwidth
4765 : bitwidth - floor_log2 (nonzero) - 1);
4768 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4769 known_x, known_mode, known_ret);
4770 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4771 known_x, known_mode, known_ret);
4772 result = MAX (1, MIN (num0, num1) - 1);
4774 return result;
4776 case MULT:
4777 /* The number of bits of the product is the sum of the number of
4778 bits of both terms. However, unless one of the terms if known
4779 to be positive, we must allow for an additional bit since negating
4780 a negative number can remove one sign bit copy. */
4782 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4783 known_x, known_mode, known_ret);
4784 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4785 known_x, known_mode, known_ret);
4787 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4788 if (result > 0
4789 && (bitwidth > HOST_BITS_PER_WIDE_INT
4790 || (((nonzero_bits (XEXP (x, 0), mode)
4791 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4792 && ((nonzero_bits (XEXP (x, 1), mode)
4793 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
4794 != 0))))
4795 result--;
4797 return MAX (1, result);
4799 case UDIV:
4800 /* The result must be <= the first operand. If the first operand
4801 has the high bit set, we know nothing about the number of sign
4802 bit copies. */
4803 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4804 return 1;
4805 else if ((nonzero_bits (XEXP (x, 0), mode)
4806 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4807 return 1;
4808 else
4809 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4810 known_x, known_mode, known_ret);
4812 case UMOD:
4813 /* The result must be <= the second operand. If the second operand
4814 has (or just might have) the high bit set, we know nothing about
4815 the number of sign bit copies. */
4816 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4817 return 1;
4818 else if ((nonzero_bits (XEXP (x, 1), mode)
4819 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4820 return 1;
4821 else
4822 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4823 known_x, known_mode, known_ret);
4825 case DIV:
4826 /* Similar to unsigned division, except that we have to worry about
4827 the case where the divisor is negative, in which case we have
4828 to add 1. */
4829 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4830 known_x, known_mode, known_ret);
4831 if (result > 1
4832 && (bitwidth > HOST_BITS_PER_WIDE_INT
4833 || (nonzero_bits (XEXP (x, 1), mode)
4834 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4835 result--;
4837 return result;
4839 case MOD:
4840 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4841 known_x, known_mode, known_ret);
4842 if (result > 1
4843 && (bitwidth > HOST_BITS_PER_WIDE_INT
4844 || (nonzero_bits (XEXP (x, 1), mode)
4845 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4846 result--;
4848 return result;
4850 case ASHIFTRT:
4851 /* Shifts by a constant add to the number of bits equal to the
4852 sign bit. */
4853 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4854 known_x, known_mode, known_ret);
4855 if (CONST_INT_P (XEXP (x, 1))
4856 && INTVAL (XEXP (x, 1)) > 0
4857 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4858 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4860 return num0;
4862 case ASHIFT:
4863 /* Left shifts destroy copies. */
4864 if (!CONST_INT_P (XEXP (x, 1))
4865 || INTVAL (XEXP (x, 1)) < 0
4866 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
4867 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
4868 return 1;
4870 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4871 known_x, known_mode, known_ret);
4872 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4874 case IF_THEN_ELSE:
4875 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4876 known_x, known_mode, known_ret);
4877 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4878 known_x, known_mode, known_ret);
4879 return MIN (num0, num1);
4881 case EQ: case NE: case GE: case GT: case LE: case LT:
4882 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4883 case GEU: case GTU: case LEU: case LTU:
4884 case UNORDERED: case ORDERED:
4885 /* If the constant is negative, take its 1's complement and remask.
4886 Then see how many zero bits we have. */
4887 nonzero = STORE_FLAG_VALUE;
4888 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4889 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4890 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4892 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4894 default:
4895 break;
4898 /* If we haven't been able to figure it out by one of the above rules,
4899 see if some of the high-order bits are known to be zero. If so,
4900 count those bits and return one less than that amount. If we can't
4901 safely compute the mask for this mode, always return BITWIDTH. */
4903 bitwidth = GET_MODE_PRECISION (mode);
4904 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4905 return 1;
4907 nonzero = nonzero_bits (x, mode);
4908 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
4909 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4912 /* Calculate the rtx_cost of a single instruction. A return value of
4913 zero indicates an instruction pattern without a known cost. */
4916 insn_rtx_cost (rtx pat, bool speed)
4918 int i, cost;
4919 rtx set;
4921 /* Extract the single set rtx from the instruction pattern.
4922 We can't use single_set since we only have the pattern. */
4923 if (GET_CODE (pat) == SET)
4924 set = pat;
4925 else if (GET_CODE (pat) == PARALLEL)
4927 set = NULL_RTX;
4928 for (i = 0; i < XVECLEN (pat, 0); i++)
4930 rtx x = XVECEXP (pat, 0, i);
4931 if (GET_CODE (x) == SET)
4933 if (set)
4934 return 0;
4935 set = x;
4938 if (!set)
4939 return 0;
4941 else
4942 return 0;
4944 cost = set_src_cost (SET_SRC (set), speed);
4945 return cost > 0 ? cost : COSTS_N_INSNS (1);
4948 /* Returns estimate on cost of computing SEQ. */
4950 unsigned
4951 seq_cost (const rtx_insn *seq, bool speed)
4953 unsigned cost = 0;
4954 rtx set;
4956 for (; seq; seq = NEXT_INSN (seq))
4958 set = single_set (seq);
4959 if (set)
4960 cost += set_rtx_cost (set, speed);
4961 else
4962 cost++;
4965 return cost;
4968 /* Given an insn INSN and condition COND, return the condition in a
4969 canonical form to simplify testing by callers. Specifically:
4971 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4972 (2) Both operands will be machine operands; (cc0) will have been replaced.
4973 (3) If an operand is a constant, it will be the second operand.
4974 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4975 for GE, GEU, and LEU.
4977 If the condition cannot be understood, or is an inequality floating-point
4978 comparison which needs to be reversed, 0 will be returned.
4980 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4982 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4983 insn used in locating the condition was found. If a replacement test
4984 of the condition is desired, it should be placed in front of that
4985 insn and we will be sure that the inputs are still valid.
4987 If WANT_REG is nonzero, we wish the condition to be relative to that
4988 register, if possible. Therefore, do not canonicalize the condition
4989 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4990 to be a compare to a CC mode register.
4992 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4993 and at INSN. */
4996 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
4997 rtx_insn **earliest,
4998 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5000 enum rtx_code code;
5001 rtx_insn *prev = insn;
5002 const_rtx set;
5003 rtx tem;
5004 rtx op0, op1;
5005 int reverse_code = 0;
5006 machine_mode mode;
5007 basic_block bb = BLOCK_FOR_INSN (insn);
5009 code = GET_CODE (cond);
5010 mode = GET_MODE (cond);
5011 op0 = XEXP (cond, 0);
5012 op1 = XEXP (cond, 1);
5014 if (reverse)
5015 code = reversed_comparison_code (cond, insn);
5016 if (code == UNKNOWN)
5017 return 0;
5019 if (earliest)
5020 *earliest = insn;
5022 /* If we are comparing a register with zero, see if the register is set
5023 in the previous insn to a COMPARE or a comparison operation. Perform
5024 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5025 in cse.c */
5027 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5028 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5029 && op1 == CONST0_RTX (GET_MODE (op0))
5030 && op0 != want_reg)
5032 /* Set nonzero when we find something of interest. */
5033 rtx x = 0;
5035 /* If comparison with cc0, import actual comparison from compare
5036 insn. */
5037 if (op0 == cc0_rtx)
5039 if ((prev = prev_nonnote_insn (prev)) == 0
5040 || !NONJUMP_INSN_P (prev)
5041 || (set = single_set (prev)) == 0
5042 || SET_DEST (set) != cc0_rtx)
5043 return 0;
5045 op0 = SET_SRC (set);
5046 op1 = CONST0_RTX (GET_MODE (op0));
5047 if (earliest)
5048 *earliest = prev;
5051 /* If this is a COMPARE, pick up the two things being compared. */
5052 if (GET_CODE (op0) == COMPARE)
5054 op1 = XEXP (op0, 1);
5055 op0 = XEXP (op0, 0);
5056 continue;
5058 else if (!REG_P (op0))
5059 break;
5061 /* Go back to the previous insn. Stop if it is not an INSN. We also
5062 stop if it isn't a single set or if it has a REG_INC note because
5063 we don't want to bother dealing with it. */
5065 prev = prev_nonnote_nondebug_insn (prev);
5067 if (prev == 0
5068 || !NONJUMP_INSN_P (prev)
5069 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5070 /* In cfglayout mode, there do not have to be labels at the
5071 beginning of a block, or jumps at the end, so the previous
5072 conditions would not stop us when we reach bb boundary. */
5073 || BLOCK_FOR_INSN (prev) != bb)
5074 break;
5076 set = set_of (op0, prev);
5078 if (set
5079 && (GET_CODE (set) != SET
5080 || !rtx_equal_p (SET_DEST (set), op0)))
5081 break;
5083 /* If this is setting OP0, get what it sets it to if it looks
5084 relevant. */
5085 if (set)
5087 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5088 #ifdef FLOAT_STORE_FLAG_VALUE
5089 REAL_VALUE_TYPE fsfv;
5090 #endif
5092 /* ??? We may not combine comparisons done in a CCmode with
5093 comparisons not done in a CCmode. This is to aid targets
5094 like Alpha that have an IEEE compliant EQ instruction, and
5095 a non-IEEE compliant BEQ instruction. The use of CCmode is
5096 actually artificial, simply to prevent the combination, but
5097 should not affect other platforms.
5099 However, we must allow VOIDmode comparisons to match either
5100 CCmode or non-CCmode comparison, because some ports have
5101 modeless comparisons inside branch patterns.
5103 ??? This mode check should perhaps look more like the mode check
5104 in simplify_comparison in combine. */
5105 if (((GET_MODE_CLASS (mode) == MODE_CC)
5106 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5107 && mode != VOIDmode
5108 && inner_mode != VOIDmode)
5109 break;
5110 if (GET_CODE (SET_SRC (set)) == COMPARE
5111 || (((code == NE
5112 || (code == LT
5113 && val_signbit_known_set_p (inner_mode,
5114 STORE_FLAG_VALUE))
5115 #ifdef FLOAT_STORE_FLAG_VALUE
5116 || (code == LT
5117 && SCALAR_FLOAT_MODE_P (inner_mode)
5118 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5119 REAL_VALUE_NEGATIVE (fsfv)))
5120 #endif
5122 && COMPARISON_P (SET_SRC (set))))
5123 x = SET_SRC (set);
5124 else if (((code == EQ
5125 || (code == GE
5126 && val_signbit_known_set_p (inner_mode,
5127 STORE_FLAG_VALUE))
5128 #ifdef FLOAT_STORE_FLAG_VALUE
5129 || (code == GE
5130 && SCALAR_FLOAT_MODE_P (inner_mode)
5131 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5132 REAL_VALUE_NEGATIVE (fsfv)))
5133 #endif
5135 && COMPARISON_P (SET_SRC (set)))
5137 reverse_code = 1;
5138 x = SET_SRC (set);
5140 else if ((code == EQ || code == NE)
5141 && GET_CODE (SET_SRC (set)) == XOR)
5142 /* Handle sequences like:
5144 (set op0 (xor X Y))
5145 ...(eq|ne op0 (const_int 0))...
5147 in which case:
5149 (eq op0 (const_int 0)) reduces to (eq X Y)
5150 (ne op0 (const_int 0)) reduces to (ne X Y)
5152 This is the form used by MIPS16, for example. */
5153 x = SET_SRC (set);
5154 else
5155 break;
5158 else if (reg_set_p (op0, prev))
5159 /* If this sets OP0, but not directly, we have to give up. */
5160 break;
5162 if (x)
5164 /* If the caller is expecting the condition to be valid at INSN,
5165 make sure X doesn't change before INSN. */
5166 if (valid_at_insn_p)
5167 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5168 break;
5169 if (COMPARISON_P (x))
5170 code = GET_CODE (x);
5171 if (reverse_code)
5173 code = reversed_comparison_code (x, prev);
5174 if (code == UNKNOWN)
5175 return 0;
5176 reverse_code = 0;
5179 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5180 if (earliest)
5181 *earliest = prev;
5185 /* If constant is first, put it last. */
5186 if (CONSTANT_P (op0))
5187 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5189 /* If OP0 is the result of a comparison, we weren't able to find what
5190 was really being compared, so fail. */
5191 if (!allow_cc_mode
5192 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5193 return 0;
5195 /* Canonicalize any ordered comparison with integers involving equality
5196 if we can do computations in the relevant mode and we do not
5197 overflow. */
5199 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5200 && CONST_INT_P (op1)
5201 && GET_MODE (op0) != VOIDmode
5202 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5204 HOST_WIDE_INT const_val = INTVAL (op1);
5205 unsigned HOST_WIDE_INT uconst_val = const_val;
5206 unsigned HOST_WIDE_INT max_val
5207 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5209 switch (code)
5211 case LE:
5212 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5213 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5214 break;
5216 /* When cross-compiling, const_val might be sign-extended from
5217 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5218 case GE:
5219 if ((const_val & max_val)
5220 != ((unsigned HOST_WIDE_INT) 1
5221 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5222 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5223 break;
5225 case LEU:
5226 if (uconst_val < max_val)
5227 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5228 break;
5230 case GEU:
5231 if (uconst_val != 0)
5232 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5233 break;
5235 default:
5236 break;
5240 /* Never return CC0; return zero instead. */
5241 if (CC0_P (op0))
5242 return 0;
5244 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5247 /* Given a jump insn JUMP, return the condition that will cause it to branch
5248 to its JUMP_LABEL. If the condition cannot be understood, or is an
5249 inequality floating-point comparison which needs to be reversed, 0 will
5250 be returned.
5252 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5253 insn used in locating the condition was found. If a replacement test
5254 of the condition is desired, it should be placed in front of that
5255 insn and we will be sure that the inputs are still valid. If EARLIEST
5256 is null, the returned condition will be valid at INSN.
5258 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5259 compare CC mode register.
5261 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5264 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5265 int valid_at_insn_p)
5267 rtx cond;
5268 int reverse;
5269 rtx set;
5271 /* If this is not a standard conditional jump, we can't parse it. */
5272 if (!JUMP_P (jump)
5273 || ! any_condjump_p (jump))
5274 return 0;
5275 set = pc_set (jump);
5277 cond = XEXP (SET_SRC (set), 0);
5279 /* If this branches to JUMP_LABEL when the condition is false, reverse
5280 the condition. */
5281 reverse
5282 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5283 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5285 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5286 allow_cc_mode, valid_at_insn_p);
5289 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5290 TARGET_MODE_REP_EXTENDED.
5292 Note that we assume that the property of
5293 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5294 narrower than mode B. I.e., if A is a mode narrower than B then in
5295 order to be able to operate on it in mode B, mode A needs to
5296 satisfy the requirements set by the representation of mode B. */
5298 static void
5299 init_num_sign_bit_copies_in_rep (void)
5301 machine_mode mode, in_mode;
5303 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5304 in_mode = GET_MODE_WIDER_MODE (mode))
5305 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5306 mode = GET_MODE_WIDER_MODE (mode))
5308 machine_mode i;
5310 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5311 extends to the next widest mode. */
5312 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5313 || GET_MODE_WIDER_MODE (mode) == in_mode);
5315 /* We are in in_mode. Count how many bits outside of mode
5316 have to be copies of the sign-bit. */
5317 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5319 machine_mode wider = GET_MODE_WIDER_MODE (i);
5321 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5322 /* We can only check sign-bit copies starting from the
5323 top-bit. In order to be able to check the bits we
5324 have already seen we pretend that subsequent bits
5325 have to be sign-bit copies too. */
5326 || num_sign_bit_copies_in_rep [in_mode][mode])
5327 num_sign_bit_copies_in_rep [in_mode][mode]
5328 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5333 /* Suppose that truncation from the machine mode of X to MODE is not a
5334 no-op. See if there is anything special about X so that we can
5335 assume it already contains a truncated value of MODE. */
5337 bool
5338 truncated_to_mode (machine_mode mode, const_rtx x)
5340 /* This register has already been used in MODE without explicit
5341 truncation. */
5342 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5343 return true;
5345 /* See if we already satisfy the requirements of MODE. If yes we
5346 can just switch to MODE. */
5347 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5348 && (num_sign_bit_copies (x, GET_MODE (x))
5349 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5350 return true;
5352 return false;
5355 /* Return true if RTX code CODE has a single sequence of zero or more
5356 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5357 entry in that case. */
5359 static bool
5360 setup_reg_subrtx_bounds (unsigned int code)
5362 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5363 unsigned int i = 0;
5364 for (; format[i] != 'e'; ++i)
5366 if (!format[i])
5367 /* No subrtxes. Leave start and count as 0. */
5368 return true;
5369 if (format[i] == 'E' || format[i] == 'V')
5370 return false;
5373 /* Record the sequence of 'e's. */
5374 rtx_all_subrtx_bounds[code].start = i;
5376 ++i;
5377 while (format[i] == 'e');
5378 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5379 /* rtl-iter.h relies on this. */
5380 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5382 for (; format[i]; ++i)
5383 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5384 return false;
5386 return true;
5389 /* Initialize rtx_all_subrtx_bounds. */
5390 void
5391 init_rtlanal (void)
5393 int i;
5394 for (i = 0; i < NUM_RTX_CODE; i++)
5396 if (!setup_reg_subrtx_bounds (i))
5397 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5398 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5399 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5402 init_num_sign_bit_copies_in_rep ();
5405 /* Check whether this is a constant pool constant. */
5406 bool
5407 constant_pool_constant_p (rtx x)
5409 x = avoid_constant_pool_reference (x);
5410 return CONST_DOUBLE_P (x);
5413 /* If M is a bitmask that selects a field of low-order bits within an item but
5414 not the entire word, return the length of the field. Return -1 otherwise.
5415 M is used in machine mode MODE. */
5418 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5420 if (mode != VOIDmode)
5422 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5423 return -1;
5424 m &= GET_MODE_MASK (mode);
5427 return exact_log2 (m + 1);
5430 /* Return the mode of MEM's address. */
5432 machine_mode
5433 get_address_mode (rtx mem)
5435 machine_mode mode;
5437 gcc_assert (MEM_P (mem));
5438 mode = GET_MODE (XEXP (mem, 0));
5439 if (mode != VOIDmode)
5440 return mode;
5441 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5444 /* Split up a CONST_DOUBLE or integer constant rtx
5445 into two rtx's for single words,
5446 storing in *FIRST the word that comes first in memory in the target
5447 and in *SECOND the other.
5449 TODO: This function needs to be rewritten to work on any size
5450 integer. */
5452 void
5453 split_double (rtx value, rtx *first, rtx *second)
5455 if (CONST_INT_P (value))
5457 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5459 /* In this case the CONST_INT holds both target words.
5460 Extract the bits from it into two word-sized pieces.
5461 Sign extend each half to HOST_WIDE_INT. */
5462 unsigned HOST_WIDE_INT low, high;
5463 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5464 unsigned bits_per_word = BITS_PER_WORD;
5466 /* Set sign_bit to the most significant bit of a word. */
5467 sign_bit = 1;
5468 sign_bit <<= bits_per_word - 1;
5470 /* Set mask so that all bits of the word are set. We could
5471 have used 1 << BITS_PER_WORD instead of basing the
5472 calculation on sign_bit. However, on machines where
5473 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5474 compiler warning, even though the code would never be
5475 executed. */
5476 mask = sign_bit << 1;
5477 mask--;
5479 /* Set sign_extend as any remaining bits. */
5480 sign_extend = ~mask;
5482 /* Pick the lower word and sign-extend it. */
5483 low = INTVAL (value);
5484 low &= mask;
5485 if (low & sign_bit)
5486 low |= sign_extend;
5488 /* Pick the higher word, shifted to the least significant
5489 bits, and sign-extend it. */
5490 high = INTVAL (value);
5491 high >>= bits_per_word - 1;
5492 high >>= 1;
5493 high &= mask;
5494 if (high & sign_bit)
5495 high |= sign_extend;
5497 /* Store the words in the target machine order. */
5498 if (WORDS_BIG_ENDIAN)
5500 *first = GEN_INT (high);
5501 *second = GEN_INT (low);
5503 else
5505 *first = GEN_INT (low);
5506 *second = GEN_INT (high);
5509 else
5511 /* The rule for using CONST_INT for a wider mode
5512 is that we regard the value as signed.
5513 So sign-extend it. */
5514 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5515 if (WORDS_BIG_ENDIAN)
5517 *first = high;
5518 *second = value;
5520 else
5522 *first = value;
5523 *second = high;
5527 else if (GET_CODE (value) == CONST_WIDE_INT)
5529 /* All of this is scary code and needs to be converted to
5530 properly work with any size integer. */
5531 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5532 if (WORDS_BIG_ENDIAN)
5534 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5535 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5537 else
5539 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5540 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5543 else if (!CONST_DOUBLE_P (value))
5545 if (WORDS_BIG_ENDIAN)
5547 *first = const0_rtx;
5548 *second = value;
5550 else
5552 *first = value;
5553 *second = const0_rtx;
5556 else if (GET_MODE (value) == VOIDmode
5557 /* This is the old way we did CONST_DOUBLE integers. */
5558 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5560 /* In an integer, the words are defined as most and least significant.
5561 So order them by the target's convention. */
5562 if (WORDS_BIG_ENDIAN)
5564 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5565 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5567 else
5569 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5570 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5573 else
5575 REAL_VALUE_TYPE r;
5576 long l[2];
5577 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5579 /* Note, this converts the REAL_VALUE_TYPE to the target's
5580 format, splits up the floating point double and outputs
5581 exactly 32 bits of it into each of l[0] and l[1] --
5582 not necessarily BITS_PER_WORD bits. */
5583 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5585 /* If 32 bits is an entire word for the target, but not for the host,
5586 then sign-extend on the host so that the number will look the same
5587 way on the host that it would on the target. See for instance
5588 simplify_unary_operation. The #if is needed to avoid compiler
5589 warnings. */
5591 #if HOST_BITS_PER_LONG > 32
5592 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5594 if (l[0] & ((long) 1 << 31))
5595 l[0] |= ((long) (-1) << 32);
5596 if (l[1] & ((long) 1 << 31))
5597 l[1] |= ((long) (-1) << 32);
5599 #endif
5601 *first = GEN_INT (l[0]);
5602 *second = GEN_INT (l[1]);
5606 /* Return true if X is a sign_extract or zero_extract from the least
5607 significant bit. */
5609 static bool
5610 lsb_bitfield_op_p (rtx x)
5612 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5614 machine_mode mode = GET_MODE (XEXP (x, 0));
5615 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5616 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5618 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5620 return false;
5623 /* Strip outer address "mutations" from LOC and return a pointer to the
5624 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5625 stripped expression there.
5627 "Mutations" either convert between modes or apply some kind of
5628 extension, truncation or alignment. */
5630 rtx *
5631 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5633 for (;;)
5635 enum rtx_code code = GET_CODE (*loc);
5636 if (GET_RTX_CLASS (code) == RTX_UNARY)
5637 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5638 used to convert between pointer sizes. */
5639 loc = &XEXP (*loc, 0);
5640 else if (lsb_bitfield_op_p (*loc))
5641 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5642 acts as a combined truncation and extension. */
5643 loc = &XEXP (*loc, 0);
5644 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5645 /* (and ... (const_int -X)) is used to align to X bytes. */
5646 loc = &XEXP (*loc, 0);
5647 else if (code == SUBREG
5648 && !OBJECT_P (SUBREG_REG (*loc))
5649 && subreg_lowpart_p (*loc))
5650 /* (subreg (operator ...) ...) inside and is used for mode
5651 conversion too. */
5652 loc = &SUBREG_REG (*loc);
5653 else
5654 return loc;
5655 if (outer_code)
5656 *outer_code = code;
5660 /* Return true if CODE applies some kind of scale. The scaled value is
5661 is the first operand and the scale is the second. */
5663 static bool
5664 binary_scale_code_p (enum rtx_code code)
5666 return (code == MULT
5667 || code == ASHIFT
5668 /* Needed by ARM targets. */
5669 || code == ASHIFTRT
5670 || code == LSHIFTRT
5671 || code == ROTATE
5672 || code == ROTATERT);
5675 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5676 (see address_info). Return null otherwise. */
5678 static rtx *
5679 get_base_term (rtx *inner)
5681 if (GET_CODE (*inner) == LO_SUM)
5682 inner = strip_address_mutations (&XEXP (*inner, 0));
5683 if (REG_P (*inner)
5684 || MEM_P (*inner)
5685 || GET_CODE (*inner) == SUBREG
5686 || GET_CODE (*inner) == SCRATCH)
5687 return inner;
5688 return 0;
5691 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5692 (see address_info). Return null otherwise. */
5694 static rtx *
5695 get_index_term (rtx *inner)
5697 /* At present, only constant scales are allowed. */
5698 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5699 inner = strip_address_mutations (&XEXP (*inner, 0));
5700 if (REG_P (*inner)
5701 || MEM_P (*inner)
5702 || GET_CODE (*inner) == SUBREG
5703 || GET_CODE (*inner) == SCRATCH)
5704 return inner;
5705 return 0;
5708 /* Set the segment part of address INFO to LOC, given that INNER is the
5709 unmutated value. */
5711 static void
5712 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5714 gcc_assert (!info->segment);
5715 info->segment = loc;
5716 info->segment_term = inner;
5719 /* Set the base part of address INFO to LOC, given that INNER is the
5720 unmutated value. */
5722 static void
5723 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5725 gcc_assert (!info->base);
5726 info->base = loc;
5727 info->base_term = inner;
5730 /* Set the index part of address INFO to LOC, given that INNER is the
5731 unmutated value. */
5733 static void
5734 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5736 gcc_assert (!info->index);
5737 info->index = loc;
5738 info->index_term = inner;
5741 /* Set the displacement part of address INFO to LOC, given that INNER
5742 is the constant term. */
5744 static void
5745 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5747 gcc_assert (!info->disp);
5748 info->disp = loc;
5749 info->disp_term = inner;
5752 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5753 rest of INFO accordingly. */
5755 static void
5756 decompose_incdec_address (struct address_info *info)
5758 info->autoinc_p = true;
5760 rtx *base = &XEXP (*info->inner, 0);
5761 set_address_base (info, base, base);
5762 gcc_checking_assert (info->base == info->base_term);
5764 /* These addresses are only valid when the size of the addressed
5765 value is known. */
5766 gcc_checking_assert (info->mode != VOIDmode);
5769 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5770 of INFO accordingly. */
5772 static void
5773 decompose_automod_address (struct address_info *info)
5775 info->autoinc_p = true;
5777 rtx *base = &XEXP (*info->inner, 0);
5778 set_address_base (info, base, base);
5779 gcc_checking_assert (info->base == info->base_term);
5781 rtx plus = XEXP (*info->inner, 1);
5782 gcc_assert (GET_CODE (plus) == PLUS);
5784 info->base_term2 = &XEXP (plus, 0);
5785 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5787 rtx *step = &XEXP (plus, 1);
5788 rtx *inner_step = strip_address_mutations (step);
5789 if (CONSTANT_P (*inner_step))
5790 set_address_disp (info, step, inner_step);
5791 else
5792 set_address_index (info, step, inner_step);
5795 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5796 values in [PTR, END). Return a pointer to the end of the used array. */
5798 static rtx **
5799 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
5801 rtx x = *loc;
5802 if (GET_CODE (x) == PLUS)
5804 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
5805 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
5807 else
5809 gcc_assert (ptr != end);
5810 *ptr++ = loc;
5812 return ptr;
5815 /* Evaluate the likelihood of X being a base or index value, returning
5816 positive if it is likely to be a base, negative if it is likely to be
5817 an index, and 0 if we can't tell. Make the magnitude of the return
5818 value reflect the amount of confidence we have in the answer.
5820 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5822 static int
5823 baseness (rtx x, machine_mode mode, addr_space_t as,
5824 enum rtx_code outer_code, enum rtx_code index_code)
5826 /* Believe *_POINTER unless the address shape requires otherwise. */
5827 if (REG_P (x) && REG_POINTER (x))
5828 return 2;
5829 if (MEM_P (x) && MEM_POINTER (x))
5830 return 2;
5832 if (REG_P (x) && HARD_REGISTER_P (x))
5834 /* X is a hard register. If it only fits one of the base
5835 or index classes, choose that interpretation. */
5836 int regno = REGNO (x);
5837 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
5838 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
5839 if (base_p != index_p)
5840 return base_p ? 1 : -1;
5842 return 0;
5845 /* INFO->INNER describes a normal, non-automodified address.
5846 Fill in the rest of INFO accordingly. */
5848 static void
5849 decompose_normal_address (struct address_info *info)
5851 /* Treat the address as the sum of up to four values. */
5852 rtx *ops[4];
5853 size_t n_ops = extract_plus_operands (info->inner, ops,
5854 ops + ARRAY_SIZE (ops)) - ops;
5856 /* If there is more than one component, any base component is in a PLUS. */
5857 if (n_ops > 1)
5858 info->base_outer_code = PLUS;
5860 /* Try to classify each sum operand now. Leave those that could be
5861 either a base or an index in OPS. */
5862 rtx *inner_ops[4];
5863 size_t out = 0;
5864 for (size_t in = 0; in < n_ops; ++in)
5866 rtx *loc = ops[in];
5867 rtx *inner = strip_address_mutations (loc);
5868 if (CONSTANT_P (*inner))
5869 set_address_disp (info, loc, inner);
5870 else if (GET_CODE (*inner) == UNSPEC)
5871 set_address_segment (info, loc, inner);
5872 else
5874 /* The only other possibilities are a base or an index. */
5875 rtx *base_term = get_base_term (inner);
5876 rtx *index_term = get_index_term (inner);
5877 gcc_assert (base_term || index_term);
5878 if (!base_term)
5879 set_address_index (info, loc, index_term);
5880 else if (!index_term)
5881 set_address_base (info, loc, base_term);
5882 else
5884 gcc_assert (base_term == index_term);
5885 ops[out] = loc;
5886 inner_ops[out] = base_term;
5887 ++out;
5892 /* Classify the remaining OPS members as bases and indexes. */
5893 if (out == 1)
5895 /* If we haven't seen a base or an index yet, assume that this is
5896 the base. If we were confident that another term was the base
5897 or index, treat the remaining operand as the other kind. */
5898 if (!info->base)
5899 set_address_base (info, ops[0], inner_ops[0]);
5900 else
5901 set_address_index (info, ops[0], inner_ops[0]);
5903 else if (out == 2)
5905 /* In the event of a tie, assume the base comes first. */
5906 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
5907 GET_CODE (*ops[1]))
5908 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
5909 GET_CODE (*ops[0])))
5911 set_address_base (info, ops[0], inner_ops[0]);
5912 set_address_index (info, ops[1], inner_ops[1]);
5914 else
5916 set_address_base (info, ops[1], inner_ops[1]);
5917 set_address_index (info, ops[0], inner_ops[0]);
5920 else
5921 gcc_assert (out == 0);
5924 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5925 or VOIDmode if not known. AS is the address space associated with LOC.
5926 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5928 void
5929 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
5930 addr_space_t as, enum rtx_code outer_code)
5932 memset (info, 0, sizeof (*info));
5933 info->mode = mode;
5934 info->as = as;
5935 info->addr_outer_code = outer_code;
5936 info->outer = loc;
5937 info->inner = strip_address_mutations (loc, &outer_code);
5938 info->base_outer_code = outer_code;
5939 switch (GET_CODE (*info->inner))
5941 case PRE_DEC:
5942 case PRE_INC:
5943 case POST_DEC:
5944 case POST_INC:
5945 decompose_incdec_address (info);
5946 break;
5948 case PRE_MODIFY:
5949 case POST_MODIFY:
5950 decompose_automod_address (info);
5951 break;
5953 default:
5954 decompose_normal_address (info);
5955 break;
5959 /* Describe address operand LOC in INFO. */
5961 void
5962 decompose_lea_address (struct address_info *info, rtx *loc)
5964 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
5967 /* Describe the address of MEM X in INFO. */
5969 void
5970 decompose_mem_address (struct address_info *info, rtx x)
5972 gcc_assert (MEM_P (x));
5973 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
5974 MEM_ADDR_SPACE (x), MEM);
5977 /* Update INFO after a change to the address it describes. */
5979 void
5980 update_address (struct address_info *info)
5982 decompose_address (info, info->outer, info->mode, info->as,
5983 info->addr_outer_code);
5986 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5987 more complicated than that. */
5989 HOST_WIDE_INT
5990 get_index_scale (const struct address_info *info)
5992 rtx index = *info->index;
5993 if (GET_CODE (index) == MULT
5994 && CONST_INT_P (XEXP (index, 1))
5995 && info->index_term == &XEXP (index, 0))
5996 return INTVAL (XEXP (index, 1));
5998 if (GET_CODE (index) == ASHIFT
5999 && CONST_INT_P (XEXP (index, 1))
6000 && info->index_term == &XEXP (index, 0))
6001 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6003 if (info->index == info->index_term)
6004 return 1;
6006 return 0;
6009 /* Return the "index code" of INFO, in the form required by
6010 ok_for_base_p_1. */
6012 enum rtx_code
6013 get_index_code (const struct address_info *info)
6015 if (info->index)
6016 return GET_CODE (*info->index);
6018 if (info->disp)
6019 return GET_CODE (*info->disp);
6021 return SCRATCH;
6024 /* Return true if X contains a thread-local symbol. */
6026 bool
6027 tls_referenced_p (const_rtx x)
6029 if (!targetm.have_tls)
6030 return false;
6032 subrtx_iterator::array_type array;
6033 FOR_EACH_SUBRTX (iter, array, x, ALL)
6034 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6035 return true;
6036 return false;