2016-01-21 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / rtlanal.c
blob0e0d097f72745c999874ec94b5a54cfc12b1e7fe
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
34 #include "recog.h"
35 #include "addresses.h"
36 #include "rtl-iter.h"
38 /* Forward declarations */
39 static void set_of_1 (rtx, const_rtx, void *);
40 static bool covers_regno_p (const_rtx, unsigned int);
41 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
42 static int computed_jump_p_1 (const_rtx);
43 static void parms_set (rtx, const_rtx, void *);
45 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
46 const_rtx, machine_mode,
47 unsigned HOST_WIDE_INT);
48 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
49 const_rtx, machine_mode,
50 unsigned HOST_WIDE_INT);
51 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
52 machine_mode,
53 unsigned int);
54 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
55 machine_mode, unsigned int);
57 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
58 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
60 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
61 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
62 SIGN_EXTEND then while narrowing we also have to enforce the
63 representation and sign-extend the value to mode DESTINATION_REP.
65 If the value is already sign-extended to DESTINATION_REP mode we
66 can just switch to DESTINATION mode on it. For each pair of
67 integral modes SOURCE and DESTINATION, when truncating from SOURCE
68 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
69 contains the number of high-order bits in SOURCE that have to be
70 copies of the sign-bit so that we can do this mode-switch to
71 DESTINATION. */
73 static unsigned int
74 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
76 /* Store X into index I of ARRAY. ARRAY is known to have at least I
77 elements. Return the new base of ARRAY. */
79 template <typename T>
80 typename T::value_type *
81 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
82 value_type *base,
83 size_t i, value_type x)
85 if (base == array.stack)
87 if (i < LOCAL_ELEMS)
89 base[i] = x;
90 return base;
92 gcc_checking_assert (i == LOCAL_ELEMS);
93 /* A previous iteration might also have moved from the stack to the
94 heap, in which case the heap array will already be big enough. */
95 if (vec_safe_length (array.heap) <= i)
96 vec_safe_grow (array.heap, i + 1);
97 base = array.heap->address ();
98 memcpy (base, array.stack, sizeof (array.stack));
99 base[LOCAL_ELEMS] = x;
100 return base;
102 unsigned int length = array.heap->length ();
103 if (length > i)
105 gcc_checking_assert (base == array.heap->address ());
106 base[i] = x;
107 return base;
109 else
111 gcc_checking_assert (i == length);
112 vec_safe_push (array.heap, x);
113 return array.heap->address ();
117 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
118 number of elements added to the worklist. */
120 template <typename T>
121 size_t
122 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
123 value_type *base,
124 size_t end, rtx_type x)
126 enum rtx_code code = GET_CODE (x);
127 const char *format = GET_RTX_FORMAT (code);
128 size_t orig_end = end;
129 if (__builtin_expect (INSN_P (x), false))
131 /* Put the pattern at the top of the queue, since that's what
132 we're likely to want most. It also allows for the SEQUENCE
133 code below. */
134 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
135 if (format[i] == 'e')
137 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
138 if (__builtin_expect (end < LOCAL_ELEMS, true))
139 base[end++] = subx;
140 else
141 base = add_single_to_queue (array, base, end++, subx);
144 else
145 for (int i = 0; format[i]; ++i)
146 if (format[i] == 'e')
148 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
149 if (__builtin_expect (end < LOCAL_ELEMS, true))
150 base[end++] = subx;
151 else
152 base = add_single_to_queue (array, base, end++, subx);
154 else if (format[i] == 'E')
156 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
157 rtx *vec = x->u.fld[i].rt_rtvec->elem;
158 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
159 for (unsigned int j = 0; j < length; j++)
160 base[end++] = T::get_value (vec[j]);
161 else
162 for (unsigned int j = 0; j < length; j++)
163 base = add_single_to_queue (array, base, end++,
164 T::get_value (vec[j]));
165 if (code == SEQUENCE && end == length)
166 /* If the subrtxes of the sequence fill the entire array then
167 we know that no other parts of a containing insn are queued.
168 The caller is therefore iterating over the sequence as a
169 PATTERN (...), so we also want the patterns of the
170 subinstructions. */
171 for (unsigned int j = 0; j < length; j++)
173 typename T::rtx_type x = T::get_rtx (base[j]);
174 if (INSN_P (x))
175 base[j] = T::get_value (PATTERN (x));
178 return end - orig_end;
181 template <typename T>
182 void
183 generic_subrtx_iterator <T>::free_array (array_type &array)
185 vec_free (array.heap);
188 template <typename T>
189 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
191 template class generic_subrtx_iterator <const_rtx_accessor>;
192 template class generic_subrtx_iterator <rtx_var_accessor>;
193 template class generic_subrtx_iterator <rtx_ptr_accessor>;
195 /* Return 1 if the value of X is unstable
196 (would be different at a different point in the program).
197 The frame pointer, arg pointer, etc. are considered stable
198 (within one function) and so is anything marked `unchanging'. */
201 rtx_unstable_p (const_rtx x)
203 const RTX_CODE code = GET_CODE (x);
204 int i;
205 const char *fmt;
207 switch (code)
209 case MEM:
210 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
212 case CONST:
213 CASE_CONST_ANY:
214 case SYMBOL_REF:
215 case LABEL_REF:
216 return 0;
218 case REG:
219 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
220 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
221 /* The arg pointer varies if it is not a fixed register. */
222 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
223 return 0;
224 /* ??? When call-clobbered, the value is stable modulo the restore
225 that must happen after a call. This currently screws up local-alloc
226 into believing that the restore is not needed. */
227 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
228 return 0;
229 return 1;
231 case ASM_OPERANDS:
232 if (MEM_VOLATILE_P (x))
233 return 1;
235 /* Fall through. */
237 default:
238 break;
241 fmt = GET_RTX_FORMAT (code);
242 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
243 if (fmt[i] == 'e')
245 if (rtx_unstable_p (XEXP (x, i)))
246 return 1;
248 else if (fmt[i] == 'E')
250 int j;
251 for (j = 0; j < XVECLEN (x, i); j++)
252 if (rtx_unstable_p (XVECEXP (x, i, j)))
253 return 1;
256 return 0;
259 /* Return 1 if X has a value that can vary even between two
260 executions of the program. 0 means X can be compared reliably
261 against certain constants or near-constants.
262 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
263 zero, we are slightly more conservative.
264 The frame pointer and the arg pointer are considered constant. */
266 bool
267 rtx_varies_p (const_rtx x, bool for_alias)
269 RTX_CODE code;
270 int i;
271 const char *fmt;
273 if (!x)
274 return 0;
276 code = GET_CODE (x);
277 switch (code)
279 case MEM:
280 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
282 case CONST:
283 CASE_CONST_ANY:
284 case SYMBOL_REF:
285 case LABEL_REF:
286 return 0;
288 case REG:
289 /* Note that we have to test for the actual rtx used for the frame
290 and arg pointers and not just the register number in case we have
291 eliminated the frame and/or arg pointer and are using it
292 for pseudos. */
293 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
294 /* The arg pointer varies if it is not a fixed register. */
295 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
296 return 0;
297 if (x == pic_offset_table_rtx
298 /* ??? When call-clobbered, the value is stable modulo the restore
299 that must happen after a call. This currently screws up
300 local-alloc into believing that the restore is not needed, so we
301 must return 0 only if we are called from alias analysis. */
302 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
303 return 0;
304 return 1;
306 case LO_SUM:
307 /* The operand 0 of a LO_SUM is considered constant
308 (in fact it is related specifically to operand 1)
309 during alias analysis. */
310 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
311 || rtx_varies_p (XEXP (x, 1), for_alias);
313 case ASM_OPERANDS:
314 if (MEM_VOLATILE_P (x))
315 return 1;
317 /* Fall through. */
319 default:
320 break;
323 fmt = GET_RTX_FORMAT (code);
324 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
325 if (fmt[i] == 'e')
327 if (rtx_varies_p (XEXP (x, i), for_alias))
328 return 1;
330 else if (fmt[i] == 'E')
332 int j;
333 for (j = 0; j < XVECLEN (x, i); j++)
334 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
335 return 1;
338 return 0;
341 /* Compute an approximation for the offset between the register
342 FROM and TO for the current function, as it was at the start
343 of the routine. */
345 static HOST_WIDE_INT
346 get_initial_register_offset (int from, int to)
348 #ifdef ELIMINABLE_REGS
349 static const struct elim_table_t
351 const int from;
352 const int to;
353 } table[] = ELIMINABLE_REGS;
354 HOST_WIDE_INT offset1, offset2;
355 unsigned int i, j;
357 if (to == from)
358 return 0;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed)
365 offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367 offset1 = - offset1;
368 #endif
369 if (to == STACK_POINTER_REGNUM)
370 return offset1;
371 else if (from == STACK_POINTER_REGNUM)
372 return - offset1;
373 else
374 return 0;
377 for (i = 0; i < ARRAY_SIZE (table); i++)
378 if (table[i].from == from)
380 if (table[i].to == to)
382 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 offset1);
384 return offset1;
386 for (j = 0; j < ARRAY_SIZE (table); j++)
388 if (table[j].to == to
389 && table[j].from == table[i].to)
391 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 offset1);
393 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 offset2);
395 return offset1 + offset2;
397 if (table[j].from == to
398 && table[j].to == table[i].to)
400 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 offset1);
402 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 offset2);
404 return offset1 - offset2;
408 else if (table[i].to == from)
410 if (table[i].from == to)
412 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 offset1);
414 return - offset1;
416 for (j = 0; j < ARRAY_SIZE (table); j++)
418 if (table[j].to == to
419 && table[j].from == table[i].from)
421 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 offset1);
423 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 offset2);
425 return - offset1 + offset2;
427 if (table[j].from == to
428 && table[j].to == table[i].from)
430 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 offset1);
432 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 offset2);
434 return - offset1 - offset2;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from == ARG_POINTER_REGNUM)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443 else if (to == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445 else if (from == HARD_FRAME_POINTER_REGNUM)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447 else if (to == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449 else
450 return 0;
452 #else
453 HOST_WIDE_INT offset;
455 if (to == from)
456 return 0;
458 if (reload_completed)
460 INITIAL_FRAME_POINTER_OFFSET (offset);
462 else
464 offset = crtl->outgoing_args_size + get_frame_size ();
465 #if !STACK_GROWS_DOWNWARD
466 offset = - offset;
467 #endif
470 if (to == STACK_POINTER_REGNUM)
471 return offset;
472 else if (from == STACK_POINTER_REGNUM)
473 return - offset;
474 else
475 return 0;
477 #endif
480 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
481 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
482 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
483 references on strict alignment machines. */
485 static int
486 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
487 machine_mode mode, bool unaligned_mems)
489 enum rtx_code code = GET_CODE (x);
491 /* The offset must be a multiple of the mode size if we are considering
492 unaligned memory references on strict alignment machines. */
493 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
495 HOST_WIDE_INT actual_offset = offset;
497 #ifdef SPARC_STACK_BOUNDARY_HACK
498 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
499 the real alignment of %sp. However, when it does this, the
500 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
501 if (SPARC_STACK_BOUNDARY_HACK
502 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
503 actual_offset -= STACK_POINTER_OFFSET;
504 #endif
506 if (actual_offset % GET_MODE_SIZE (mode) != 0)
507 return 1;
510 switch (code)
512 case SYMBOL_REF:
513 if (SYMBOL_REF_WEAK (x))
514 return 1;
515 if (!CONSTANT_POOL_ADDRESS_P (x))
517 tree decl;
518 HOST_WIDE_INT decl_size;
520 if (offset < 0)
521 return 1;
522 if (size == 0)
523 size = GET_MODE_SIZE (mode);
524 if (size == 0)
525 return offset != 0;
527 /* If the size of the access or of the symbol is unknown,
528 assume the worst. */
529 decl = SYMBOL_REF_DECL (x);
531 /* Else check that the access is in bounds. TODO: restructure
532 expr_size/tree_expr_size/int_expr_size and just use the latter. */
533 if (!decl)
534 decl_size = -1;
535 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
536 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
537 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
538 : -1);
539 else if (TREE_CODE (decl) == STRING_CST)
540 decl_size = TREE_STRING_LENGTH (decl);
541 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
542 decl_size = int_size_in_bytes (TREE_TYPE (decl));
543 else
544 decl_size = -1;
546 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
549 return 0;
551 case LABEL_REF:
552 return 0;
554 case REG:
555 /* Stack references are assumed not to trap, but we need to deal with
556 nonsensical offsets. */
557 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
558 || x == stack_pointer_rtx
559 /* The arg pointer varies if it is not a fixed register. */
560 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
562 #ifdef RED_ZONE_SIZE
563 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
564 #else
565 HOST_WIDE_INT red_zone_size = 0;
566 #endif
567 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
568 / BITS_PER_UNIT;
569 HOST_WIDE_INT low_bound, high_bound;
571 if (size == 0)
572 size = GET_MODE_SIZE (mode);
574 if (x == frame_pointer_rtx)
576 if (FRAME_GROWS_DOWNWARD)
578 high_bound = STARTING_FRAME_OFFSET;
579 low_bound = high_bound - get_frame_size ();
581 else
583 low_bound = STARTING_FRAME_OFFSET;
584 high_bound = low_bound + get_frame_size ();
587 else if (x == hard_frame_pointer_rtx)
589 HOST_WIDE_INT sp_offset
590 = get_initial_register_offset (STACK_POINTER_REGNUM,
591 HARD_FRAME_POINTER_REGNUM);
592 HOST_WIDE_INT ap_offset
593 = get_initial_register_offset (ARG_POINTER_REGNUM,
594 HARD_FRAME_POINTER_REGNUM);
596 #if STACK_GROWS_DOWNWARD
597 low_bound = sp_offset - red_zone_size - stack_boundary;
598 high_bound = ap_offset
599 + FIRST_PARM_OFFSET (current_function_decl)
600 #if !ARGS_GROW_DOWNWARD
601 + crtl->args.size
602 #endif
603 + stack_boundary;
604 #else
605 high_bound = sp_offset + red_zone_size + stack_boundary;
606 low_bound = ap_offset
607 + FIRST_PARM_OFFSET (current_function_decl)
608 #if ARGS_GROW_DOWNWARD
609 - crtl->args.size
610 #endif
611 - stack_boundary;
612 #endif
614 else if (x == stack_pointer_rtx)
616 HOST_WIDE_INT ap_offset
617 = get_initial_register_offset (ARG_POINTER_REGNUM,
618 STACK_POINTER_REGNUM);
620 #if STACK_GROWS_DOWNWARD
621 low_bound = - red_zone_size - stack_boundary;
622 high_bound = ap_offset
623 + FIRST_PARM_OFFSET (current_function_decl)
624 #if !ARGS_GROW_DOWNWARD
625 + crtl->args.size
626 #endif
627 + stack_boundary;
628 #else
629 high_bound = red_zone_size + stack_boundary;
630 low_bound = ap_offset
631 + FIRST_PARM_OFFSET (current_function_decl)
632 #if ARGS_GROW_DOWNWARD
633 - crtl->args.size
634 #endif
635 - stack_boundary;
636 #endif
638 else
640 /* We assume that accesses are safe to at least the
641 next stack boundary.
642 Examples are varargs and __builtin_return_address. */
643 #if ARGS_GROW_DOWNWARD
644 high_bound = FIRST_PARM_OFFSET (current_function_decl)
645 + stack_boundary;
646 low_bound = FIRST_PARM_OFFSET (current_function_decl)
647 - crtl->args.size - stack_boundary;
648 #else
649 low_bound = FIRST_PARM_OFFSET (current_function_decl)
650 - stack_boundary;
651 high_bound = FIRST_PARM_OFFSET (current_function_decl)
652 + crtl->args.size + stack_boundary;
653 #endif
656 if (offset >= low_bound && offset <= high_bound - size)
657 return 0;
658 return 1;
660 /* All of the virtual frame registers are stack references. */
661 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
662 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
663 return 0;
664 return 1;
666 case CONST:
667 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
668 mode, unaligned_mems);
670 case PLUS:
671 /* An address is assumed not to trap if:
672 - it is the pic register plus a constant. */
673 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
674 return 0;
676 /* - or it is an address that can't trap plus a constant integer. */
677 if (CONST_INT_P (XEXP (x, 1))
678 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
679 size, mode, unaligned_mems))
680 return 0;
682 return 1;
684 case LO_SUM:
685 case PRE_MODIFY:
686 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
687 mode, unaligned_mems);
689 case PRE_DEC:
690 case PRE_INC:
691 case POST_DEC:
692 case POST_INC:
693 case POST_MODIFY:
694 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
695 mode, unaligned_mems);
697 default:
698 break;
701 /* If it isn't one of the case above, it can cause a trap. */
702 return 1;
705 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
708 rtx_addr_can_trap_p (const_rtx x)
710 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
713 /* Return true if X is an address that is known to not be zero. */
715 bool
716 nonzero_address_p (const_rtx x)
718 const enum rtx_code code = GET_CODE (x);
720 switch (code)
722 case SYMBOL_REF:
723 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
725 case LABEL_REF:
726 return true;
728 case REG:
729 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
730 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
731 || x == stack_pointer_rtx
732 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
733 return true;
734 /* All of the virtual frame registers are stack references. */
735 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
736 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
737 return true;
738 return false;
740 case CONST:
741 return nonzero_address_p (XEXP (x, 0));
743 case PLUS:
744 /* Handle PIC references. */
745 if (XEXP (x, 0) == pic_offset_table_rtx
746 && CONSTANT_P (XEXP (x, 1)))
747 return true;
748 return false;
750 case PRE_MODIFY:
751 /* Similar to the above; allow positive offsets. Further, since
752 auto-inc is only allowed in memories, the register must be a
753 pointer. */
754 if (CONST_INT_P (XEXP (x, 1))
755 && INTVAL (XEXP (x, 1)) > 0)
756 return true;
757 return nonzero_address_p (XEXP (x, 0));
759 case PRE_INC:
760 /* Similarly. Further, the offset is always positive. */
761 return true;
763 case PRE_DEC:
764 case POST_DEC:
765 case POST_INC:
766 case POST_MODIFY:
767 return nonzero_address_p (XEXP (x, 0));
769 case LO_SUM:
770 return nonzero_address_p (XEXP (x, 1));
772 default:
773 break;
776 /* If it isn't one of the case above, might be zero. */
777 return false;
780 /* Return 1 if X refers to a memory location whose address
781 cannot be compared reliably with constant addresses,
782 or if X refers to a BLKmode memory object.
783 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
784 zero, we are slightly more conservative. */
786 bool
787 rtx_addr_varies_p (const_rtx x, bool for_alias)
789 enum rtx_code code;
790 int i;
791 const char *fmt;
793 if (x == 0)
794 return 0;
796 code = GET_CODE (x);
797 if (code == MEM)
798 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
800 fmt = GET_RTX_FORMAT (code);
801 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
802 if (fmt[i] == 'e')
804 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
805 return 1;
807 else if (fmt[i] == 'E')
809 int j;
810 for (j = 0; j < XVECLEN (x, i); j++)
811 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
812 return 1;
814 return 0;
817 /* Return the CALL in X if there is one. */
820 get_call_rtx_from (rtx x)
822 if (INSN_P (x))
823 x = PATTERN (x);
824 if (GET_CODE (x) == PARALLEL)
825 x = XVECEXP (x, 0, 0);
826 if (GET_CODE (x) == SET)
827 x = SET_SRC (x);
828 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
829 return x;
830 return NULL_RTX;
833 /* Return the value of the integer term in X, if one is apparent;
834 otherwise return 0.
835 Only obvious integer terms are detected.
836 This is used in cse.c with the `related_value' field. */
838 HOST_WIDE_INT
839 get_integer_term (const_rtx x)
841 if (GET_CODE (x) == CONST)
842 x = XEXP (x, 0);
844 if (GET_CODE (x) == MINUS
845 && CONST_INT_P (XEXP (x, 1)))
846 return - INTVAL (XEXP (x, 1));
847 if (GET_CODE (x) == PLUS
848 && CONST_INT_P (XEXP (x, 1)))
849 return INTVAL (XEXP (x, 1));
850 return 0;
853 /* If X is a constant, return the value sans apparent integer term;
854 otherwise return 0.
855 Only obvious integer terms are detected. */
858 get_related_value (const_rtx x)
860 if (GET_CODE (x) != CONST)
861 return 0;
862 x = XEXP (x, 0);
863 if (GET_CODE (x) == PLUS
864 && CONST_INT_P (XEXP (x, 1)))
865 return XEXP (x, 0);
866 else if (GET_CODE (x) == MINUS
867 && CONST_INT_P (XEXP (x, 1)))
868 return XEXP (x, 0);
869 return 0;
872 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
873 to somewhere in the same object or object_block as SYMBOL. */
875 bool
876 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
878 tree decl;
880 if (GET_CODE (symbol) != SYMBOL_REF)
881 return false;
883 if (offset == 0)
884 return true;
886 if (offset > 0)
888 if (CONSTANT_POOL_ADDRESS_P (symbol)
889 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
890 return true;
892 decl = SYMBOL_REF_DECL (symbol);
893 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
894 return true;
897 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
898 && SYMBOL_REF_BLOCK (symbol)
899 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
900 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
901 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
902 return true;
904 return false;
907 /* Split X into a base and a constant offset, storing them in *BASE_OUT
908 and *OFFSET_OUT respectively. */
910 void
911 split_const (rtx x, rtx *base_out, rtx *offset_out)
913 if (GET_CODE (x) == CONST)
915 x = XEXP (x, 0);
916 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
918 *base_out = XEXP (x, 0);
919 *offset_out = XEXP (x, 1);
920 return;
923 *base_out = x;
924 *offset_out = const0_rtx;
927 /* Return the number of places FIND appears within X. If COUNT_DEST is
928 zero, we do not count occurrences inside the destination of a SET. */
931 count_occurrences (const_rtx x, const_rtx find, int count_dest)
933 int i, j;
934 enum rtx_code code;
935 const char *format_ptr;
936 int count;
938 if (x == find)
939 return 1;
941 code = GET_CODE (x);
943 switch (code)
945 case REG:
946 CASE_CONST_ANY:
947 case SYMBOL_REF:
948 case CODE_LABEL:
949 case PC:
950 case CC0:
951 return 0;
953 case EXPR_LIST:
954 count = count_occurrences (XEXP (x, 0), find, count_dest);
955 if (XEXP (x, 1))
956 count += count_occurrences (XEXP (x, 1), find, count_dest);
957 return count;
959 case MEM:
960 if (MEM_P (find) && rtx_equal_p (x, find))
961 return 1;
962 break;
964 case SET:
965 if (SET_DEST (x) == find && ! count_dest)
966 return count_occurrences (SET_SRC (x), find, count_dest);
967 break;
969 default:
970 break;
973 format_ptr = GET_RTX_FORMAT (code);
974 count = 0;
976 for (i = 0; i < GET_RTX_LENGTH (code); i++)
978 switch (*format_ptr++)
980 case 'e':
981 count += count_occurrences (XEXP (x, i), find, count_dest);
982 break;
984 case 'E':
985 for (j = 0; j < XVECLEN (x, i); j++)
986 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
987 break;
990 return count;
994 /* Return TRUE if OP is a register or subreg of a register that
995 holds an unsigned quantity. Otherwise, return FALSE. */
997 bool
998 unsigned_reg_p (rtx op)
1000 if (REG_P (op)
1001 && REG_EXPR (op)
1002 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1003 return true;
1005 if (GET_CODE (op) == SUBREG
1006 && SUBREG_PROMOTED_SIGN (op))
1007 return true;
1009 return false;
1013 /* Nonzero if register REG appears somewhere within IN.
1014 Also works if REG is not a register; in this case it checks
1015 for a subexpression of IN that is Lisp "equal" to REG. */
1018 reg_mentioned_p (const_rtx reg, const_rtx in)
1020 const char *fmt;
1021 int i;
1022 enum rtx_code code;
1024 if (in == 0)
1025 return 0;
1027 if (reg == in)
1028 return 1;
1030 if (GET_CODE (in) == LABEL_REF)
1031 return reg == LABEL_REF_LABEL (in);
1033 code = GET_CODE (in);
1035 switch (code)
1037 /* Compare registers by number. */
1038 case REG:
1039 return REG_P (reg) && REGNO (in) == REGNO (reg);
1041 /* These codes have no constituent expressions
1042 and are unique. */
1043 case SCRATCH:
1044 case CC0:
1045 case PC:
1046 return 0;
1048 CASE_CONST_ANY:
1049 /* These are kept unique for a given value. */
1050 return 0;
1052 default:
1053 break;
1056 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1057 return 1;
1059 fmt = GET_RTX_FORMAT (code);
1061 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1063 if (fmt[i] == 'E')
1065 int j;
1066 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1067 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1068 return 1;
1070 else if (fmt[i] == 'e'
1071 && reg_mentioned_p (reg, XEXP (in, i)))
1072 return 1;
1074 return 0;
1077 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1078 no CODE_LABEL insn. */
1081 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1083 rtx_insn *p;
1084 if (beg == end)
1085 return 0;
1086 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1087 if (LABEL_P (p))
1088 return 0;
1089 return 1;
1092 /* Nonzero if register REG is used in an insn between
1093 FROM_INSN and TO_INSN (exclusive of those two). */
1096 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1097 const rtx_insn *to_insn)
1099 rtx_insn *insn;
1101 if (from_insn == to_insn)
1102 return 0;
1104 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1105 if (NONDEBUG_INSN_P (insn)
1106 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1107 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1108 return 1;
1109 return 0;
1112 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1113 is entirely replaced by a new value and the only use is as a SET_DEST,
1114 we do not consider it a reference. */
1117 reg_referenced_p (const_rtx x, const_rtx body)
1119 int i;
1121 switch (GET_CODE (body))
1123 case SET:
1124 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1125 return 1;
1127 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1128 of a REG that occupies all of the REG, the insn references X if
1129 it is mentioned in the destination. */
1130 if (GET_CODE (SET_DEST (body)) != CC0
1131 && GET_CODE (SET_DEST (body)) != PC
1132 && !REG_P (SET_DEST (body))
1133 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1134 && REG_P (SUBREG_REG (SET_DEST (body)))
1135 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1136 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1137 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1138 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1139 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1140 return 1;
1141 return 0;
1143 case ASM_OPERANDS:
1144 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1145 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1146 return 1;
1147 return 0;
1149 case CALL:
1150 case USE:
1151 case IF_THEN_ELSE:
1152 return reg_overlap_mentioned_p (x, body);
1154 case TRAP_IF:
1155 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1157 case PREFETCH:
1158 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1160 case UNSPEC:
1161 case UNSPEC_VOLATILE:
1162 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1163 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1164 return 1;
1165 return 0;
1167 case PARALLEL:
1168 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1169 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1170 return 1;
1171 return 0;
1173 case CLOBBER:
1174 if (MEM_P (XEXP (body, 0)))
1175 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1176 return 1;
1177 return 0;
1179 case COND_EXEC:
1180 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1181 return 1;
1182 return reg_referenced_p (x, COND_EXEC_CODE (body));
1184 default:
1185 return 0;
1189 /* Nonzero if register REG is set or clobbered in an insn between
1190 FROM_INSN and TO_INSN (exclusive of those two). */
1193 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1194 const rtx_insn *to_insn)
1196 const rtx_insn *insn;
1198 if (from_insn == to_insn)
1199 return 0;
1201 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1202 if (INSN_P (insn) && reg_set_p (reg, insn))
1203 return 1;
1204 return 0;
1207 /* Return true if REG is set or clobbered inside INSN. */
1210 reg_set_p (const_rtx reg, const_rtx insn)
1212 /* After delay slot handling, call and branch insns might be in a
1213 sequence. Check all the elements there. */
1214 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1216 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1217 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1218 return true;
1220 return false;
1223 /* We can be passed an insn or part of one. If we are passed an insn,
1224 check if a side-effect of the insn clobbers REG. */
1225 if (INSN_P (insn)
1226 && (FIND_REG_INC_NOTE (insn, reg)
1227 || (CALL_P (insn)
1228 && ((REG_P (reg)
1229 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1230 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1231 GET_MODE (reg), REGNO (reg)))
1232 || MEM_P (reg)
1233 || find_reg_fusage (insn, CLOBBER, reg)))))
1234 return true;
1236 return set_of (reg, insn) != NULL_RTX;
1239 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1240 only if none of them are modified between START and END. Return 1 if
1241 X contains a MEM; this routine does use memory aliasing. */
1244 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1246 const enum rtx_code code = GET_CODE (x);
1247 const char *fmt;
1248 int i, j;
1249 rtx_insn *insn;
1251 if (start == end)
1252 return 0;
1254 switch (code)
1256 CASE_CONST_ANY:
1257 case CONST:
1258 case SYMBOL_REF:
1259 case LABEL_REF:
1260 return 0;
1262 case PC:
1263 case CC0:
1264 return 1;
1266 case MEM:
1267 if (modified_between_p (XEXP (x, 0), start, end))
1268 return 1;
1269 if (MEM_READONLY_P (x))
1270 return 0;
1271 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1272 if (memory_modified_in_insn_p (x, insn))
1273 return 1;
1274 return 0;
1275 break;
1277 case REG:
1278 return reg_set_between_p (x, start, end);
1280 default:
1281 break;
1284 fmt = GET_RTX_FORMAT (code);
1285 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1287 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1288 return 1;
1290 else if (fmt[i] == 'E')
1291 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1292 if (modified_between_p (XVECEXP (x, i, j), start, end))
1293 return 1;
1296 return 0;
1299 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1300 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1301 does use memory aliasing. */
1304 modified_in_p (const_rtx x, const_rtx insn)
1306 const enum rtx_code code = GET_CODE (x);
1307 const char *fmt;
1308 int i, j;
1310 switch (code)
1312 CASE_CONST_ANY:
1313 case CONST:
1314 case SYMBOL_REF:
1315 case LABEL_REF:
1316 return 0;
1318 case PC:
1319 case CC0:
1320 return 1;
1322 case MEM:
1323 if (modified_in_p (XEXP (x, 0), insn))
1324 return 1;
1325 if (MEM_READONLY_P (x))
1326 return 0;
1327 if (memory_modified_in_insn_p (x, insn))
1328 return 1;
1329 return 0;
1330 break;
1332 case REG:
1333 return reg_set_p (x, insn);
1335 default:
1336 break;
1339 fmt = GET_RTX_FORMAT (code);
1340 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1342 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1343 return 1;
1345 else if (fmt[i] == 'E')
1346 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1347 if (modified_in_p (XVECEXP (x, i, j), insn))
1348 return 1;
1351 return 0;
1354 /* Helper function for set_of. */
1355 struct set_of_data
1357 const_rtx found;
1358 const_rtx pat;
1361 static void
1362 set_of_1 (rtx x, const_rtx pat, void *data1)
1364 struct set_of_data *const data = (struct set_of_data *) (data1);
1365 if (rtx_equal_p (x, data->pat)
1366 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1367 data->found = pat;
1370 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1371 (either directly or via STRICT_LOW_PART and similar modifiers). */
1372 const_rtx
1373 set_of (const_rtx pat, const_rtx insn)
1375 struct set_of_data data;
1376 data.found = NULL_RTX;
1377 data.pat = pat;
1378 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1379 return data.found;
1382 /* Add all hard register in X to *PSET. */
1383 void
1384 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1386 subrtx_iterator::array_type array;
1387 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1389 const_rtx x = *iter;
1390 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1391 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1395 /* This function, called through note_stores, collects sets and
1396 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1397 by DATA. */
1398 void
1399 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1401 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1402 if (REG_P (x) && HARD_REGISTER_P (x))
1403 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1406 /* Examine INSN, and compute the set of hard registers written by it.
1407 Store it in *PSET. Should only be called after reload. */
1408 void
1409 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1411 rtx link;
1413 CLEAR_HARD_REG_SET (*pset);
1414 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1415 if (CALL_P (insn))
1417 if (implicit)
1418 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1420 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1421 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1423 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1424 if (REG_NOTE_KIND (link) == REG_INC)
1425 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1428 /* Like record_hard_reg_sets, but called through note_uses. */
1429 void
1430 record_hard_reg_uses (rtx *px, void *data)
1432 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1435 /* Given an INSN, return a SET expression if this insn has only a single SET.
1436 It may also have CLOBBERs, USEs, or SET whose output
1437 will not be used, which we ignore. */
1440 single_set_2 (const rtx_insn *insn, const_rtx pat)
1442 rtx set = NULL;
1443 int set_verified = 1;
1444 int i;
1446 if (GET_CODE (pat) == PARALLEL)
1448 for (i = 0; i < XVECLEN (pat, 0); i++)
1450 rtx sub = XVECEXP (pat, 0, i);
1451 switch (GET_CODE (sub))
1453 case USE:
1454 case CLOBBER:
1455 break;
1457 case SET:
1458 /* We can consider insns having multiple sets, where all
1459 but one are dead as single set insns. In common case
1460 only single set is present in the pattern so we want
1461 to avoid checking for REG_UNUSED notes unless necessary.
1463 When we reach set first time, we just expect this is
1464 the single set we are looking for and only when more
1465 sets are found in the insn, we check them. */
1466 if (!set_verified)
1468 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1469 && !side_effects_p (set))
1470 set = NULL;
1471 else
1472 set_verified = 1;
1474 if (!set)
1475 set = sub, set_verified = 0;
1476 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1477 || side_effects_p (sub))
1478 return NULL_RTX;
1479 break;
1481 default:
1482 return NULL_RTX;
1486 return set;
1489 /* Given an INSN, return nonzero if it has more than one SET, else return
1490 zero. */
1493 multiple_sets (const_rtx insn)
1495 int found;
1496 int i;
1498 /* INSN must be an insn. */
1499 if (! INSN_P (insn))
1500 return 0;
1502 /* Only a PARALLEL can have multiple SETs. */
1503 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1505 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1506 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1508 /* If we have already found a SET, then return now. */
1509 if (found)
1510 return 1;
1511 else
1512 found = 1;
1516 /* Either zero or one SET. */
1517 return 0;
1520 /* Return nonzero if the destination of SET equals the source
1521 and there are no side effects. */
1524 set_noop_p (const_rtx set)
1526 rtx src = SET_SRC (set);
1527 rtx dst = SET_DEST (set);
1529 if (dst == pc_rtx && src == pc_rtx)
1530 return 1;
1532 if (MEM_P (dst) && MEM_P (src))
1533 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1535 if (GET_CODE (dst) == ZERO_EXTRACT)
1536 return rtx_equal_p (XEXP (dst, 0), src)
1537 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1538 && !side_effects_p (src);
1540 if (GET_CODE (dst) == STRICT_LOW_PART)
1541 dst = XEXP (dst, 0);
1543 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1545 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1546 return 0;
1547 src = SUBREG_REG (src);
1548 dst = SUBREG_REG (dst);
1551 /* It is a NOOP if destination overlaps with selected src vector
1552 elements. */
1553 if (GET_CODE (src) == VEC_SELECT
1554 && REG_P (XEXP (src, 0)) && REG_P (dst)
1555 && HARD_REGISTER_P (XEXP (src, 0))
1556 && HARD_REGISTER_P (dst))
1558 int i;
1559 rtx par = XEXP (src, 1);
1560 rtx src0 = XEXP (src, 0);
1561 int c0 = INTVAL (XVECEXP (par, 0, 0));
1562 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1564 for (i = 1; i < XVECLEN (par, 0); i++)
1565 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1566 return 0;
1567 return
1568 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1569 offset, GET_MODE (dst)) == (int) REGNO (dst);
1572 return (REG_P (src) && REG_P (dst)
1573 && REGNO (src) == REGNO (dst));
1576 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1577 value to itself. */
1580 noop_move_p (const rtx_insn *insn)
1582 rtx pat = PATTERN (insn);
1584 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1585 return 1;
1587 /* Insns carrying these notes are useful later on. */
1588 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1589 return 0;
1591 /* Check the code to be executed for COND_EXEC. */
1592 if (GET_CODE (pat) == COND_EXEC)
1593 pat = COND_EXEC_CODE (pat);
1595 if (GET_CODE (pat) == SET && set_noop_p (pat))
1596 return 1;
1598 if (GET_CODE (pat) == PARALLEL)
1600 int i;
1601 /* If nothing but SETs of registers to themselves,
1602 this insn can also be deleted. */
1603 for (i = 0; i < XVECLEN (pat, 0); i++)
1605 rtx tem = XVECEXP (pat, 0, i);
1607 if (GET_CODE (tem) == USE
1608 || GET_CODE (tem) == CLOBBER)
1609 continue;
1611 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1612 return 0;
1615 return 1;
1617 return 0;
1621 /* Return nonzero if register in range [REGNO, ENDREGNO)
1622 appears either explicitly or implicitly in X
1623 other than being stored into.
1625 References contained within the substructure at LOC do not count.
1626 LOC may be zero, meaning don't ignore anything. */
1628 bool
1629 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1630 rtx *loc)
1632 int i;
1633 unsigned int x_regno;
1634 RTX_CODE code;
1635 const char *fmt;
1637 repeat:
1638 /* The contents of a REG_NONNEG note is always zero, so we must come here
1639 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1640 if (x == 0)
1641 return false;
1643 code = GET_CODE (x);
1645 switch (code)
1647 case REG:
1648 x_regno = REGNO (x);
1650 /* If we modifying the stack, frame, or argument pointer, it will
1651 clobber a virtual register. In fact, we could be more precise,
1652 but it isn't worth it. */
1653 if ((x_regno == STACK_POINTER_REGNUM
1654 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1655 && x_regno == ARG_POINTER_REGNUM)
1656 || x_regno == FRAME_POINTER_REGNUM)
1657 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1658 return true;
1660 return endregno > x_regno && regno < END_REGNO (x);
1662 case SUBREG:
1663 /* If this is a SUBREG of a hard reg, we can see exactly which
1664 registers are being modified. Otherwise, handle normally. */
1665 if (REG_P (SUBREG_REG (x))
1666 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1668 unsigned int inner_regno = subreg_regno (x);
1669 unsigned int inner_endregno
1670 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1671 ? subreg_nregs (x) : 1);
1673 return endregno > inner_regno && regno < inner_endregno;
1675 break;
1677 case CLOBBER:
1678 case SET:
1679 if (&SET_DEST (x) != loc
1680 /* Note setting a SUBREG counts as referring to the REG it is in for
1681 a pseudo but not for hard registers since we can
1682 treat each word individually. */
1683 && ((GET_CODE (SET_DEST (x)) == SUBREG
1684 && loc != &SUBREG_REG (SET_DEST (x))
1685 && REG_P (SUBREG_REG (SET_DEST (x)))
1686 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1687 && refers_to_regno_p (regno, endregno,
1688 SUBREG_REG (SET_DEST (x)), loc))
1689 || (!REG_P (SET_DEST (x))
1690 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1691 return true;
1693 if (code == CLOBBER || loc == &SET_SRC (x))
1694 return false;
1695 x = SET_SRC (x);
1696 goto repeat;
1698 default:
1699 break;
1702 /* X does not match, so try its subexpressions. */
1704 fmt = GET_RTX_FORMAT (code);
1705 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1707 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1709 if (i == 0)
1711 x = XEXP (x, 0);
1712 goto repeat;
1714 else
1715 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1716 return true;
1718 else if (fmt[i] == 'E')
1720 int j;
1721 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1722 if (loc != &XVECEXP (x, i, j)
1723 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1724 return true;
1727 return false;
1730 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1731 we check if any register number in X conflicts with the relevant register
1732 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1733 contains a MEM (we don't bother checking for memory addresses that can't
1734 conflict because we expect this to be a rare case. */
1737 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1739 unsigned int regno, endregno;
1741 /* If either argument is a constant, then modifying X can not
1742 affect IN. Here we look at IN, we can profitably combine
1743 CONSTANT_P (x) with the switch statement below. */
1744 if (CONSTANT_P (in))
1745 return 0;
1747 recurse:
1748 switch (GET_CODE (x))
1750 case STRICT_LOW_PART:
1751 case ZERO_EXTRACT:
1752 case SIGN_EXTRACT:
1753 /* Overly conservative. */
1754 x = XEXP (x, 0);
1755 goto recurse;
1757 case SUBREG:
1758 regno = REGNO (SUBREG_REG (x));
1759 if (regno < FIRST_PSEUDO_REGISTER)
1760 regno = subreg_regno (x);
1761 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1762 ? subreg_nregs (x) : 1);
1763 goto do_reg;
1765 case REG:
1766 regno = REGNO (x);
1767 endregno = END_REGNO (x);
1768 do_reg:
1769 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1771 case MEM:
1773 const char *fmt;
1774 int i;
1776 if (MEM_P (in))
1777 return 1;
1779 fmt = GET_RTX_FORMAT (GET_CODE (in));
1780 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1781 if (fmt[i] == 'e')
1783 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1784 return 1;
1786 else if (fmt[i] == 'E')
1788 int j;
1789 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1790 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1791 return 1;
1794 return 0;
1797 case SCRATCH:
1798 case PC:
1799 case CC0:
1800 return reg_mentioned_p (x, in);
1802 case PARALLEL:
1804 int i;
1806 /* If any register in here refers to it we return true. */
1807 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1808 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1809 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1810 return 1;
1811 return 0;
1814 default:
1815 gcc_assert (CONSTANT_P (x));
1816 return 0;
1820 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1821 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1822 ignored by note_stores, but passed to FUN.
1824 FUN receives three arguments:
1825 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1826 2. the SET or CLOBBER rtx that does the store,
1827 3. the pointer DATA provided to note_stores.
1829 If the item being stored in or clobbered is a SUBREG of a hard register,
1830 the SUBREG will be passed. */
1832 void
1833 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1835 int i;
1837 if (GET_CODE (x) == COND_EXEC)
1838 x = COND_EXEC_CODE (x);
1840 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1842 rtx dest = SET_DEST (x);
1844 while ((GET_CODE (dest) == SUBREG
1845 && (!REG_P (SUBREG_REG (dest))
1846 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1847 || GET_CODE (dest) == ZERO_EXTRACT
1848 || GET_CODE (dest) == STRICT_LOW_PART)
1849 dest = XEXP (dest, 0);
1851 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1852 each of whose first operand is a register. */
1853 if (GET_CODE (dest) == PARALLEL)
1855 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1856 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1857 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1859 else
1860 (*fun) (dest, x, data);
1863 else if (GET_CODE (x) == PARALLEL)
1864 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1865 note_stores (XVECEXP (x, 0, i), fun, data);
1868 /* Like notes_stores, but call FUN for each expression that is being
1869 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1870 FUN for each expression, not any interior subexpressions. FUN receives a
1871 pointer to the expression and the DATA passed to this function.
1873 Note that this is not quite the same test as that done in reg_referenced_p
1874 since that considers something as being referenced if it is being
1875 partially set, while we do not. */
1877 void
1878 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1880 rtx body = *pbody;
1881 int i;
1883 switch (GET_CODE (body))
1885 case COND_EXEC:
1886 (*fun) (&COND_EXEC_TEST (body), data);
1887 note_uses (&COND_EXEC_CODE (body), fun, data);
1888 return;
1890 case PARALLEL:
1891 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1892 note_uses (&XVECEXP (body, 0, i), fun, data);
1893 return;
1895 case SEQUENCE:
1896 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1897 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1898 return;
1900 case USE:
1901 (*fun) (&XEXP (body, 0), data);
1902 return;
1904 case ASM_OPERANDS:
1905 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1906 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1907 return;
1909 case TRAP_IF:
1910 (*fun) (&TRAP_CONDITION (body), data);
1911 return;
1913 case PREFETCH:
1914 (*fun) (&XEXP (body, 0), data);
1915 return;
1917 case UNSPEC:
1918 case UNSPEC_VOLATILE:
1919 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1920 (*fun) (&XVECEXP (body, 0, i), data);
1921 return;
1923 case CLOBBER:
1924 if (MEM_P (XEXP (body, 0)))
1925 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1926 return;
1928 case SET:
1930 rtx dest = SET_DEST (body);
1932 /* For sets we replace everything in source plus registers in memory
1933 expression in store and operands of a ZERO_EXTRACT. */
1934 (*fun) (&SET_SRC (body), data);
1936 if (GET_CODE (dest) == ZERO_EXTRACT)
1938 (*fun) (&XEXP (dest, 1), data);
1939 (*fun) (&XEXP (dest, 2), data);
1942 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1943 dest = XEXP (dest, 0);
1945 if (MEM_P (dest))
1946 (*fun) (&XEXP (dest, 0), data);
1948 return;
1950 default:
1951 /* All the other possibilities never store. */
1952 (*fun) (pbody, data);
1953 return;
1957 /* Return nonzero if X's old contents don't survive after INSN.
1958 This will be true if X is (cc0) or if X is a register and
1959 X dies in INSN or because INSN entirely sets X.
1961 "Entirely set" means set directly and not through a SUBREG, or
1962 ZERO_EXTRACT, so no trace of the old contents remains.
1963 Likewise, REG_INC does not count.
1965 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1966 but for this use that makes no difference, since regs don't overlap
1967 during their lifetimes. Therefore, this function may be used
1968 at any time after deaths have been computed.
1970 If REG is a hard reg that occupies multiple machine registers, this
1971 function will only return 1 if each of those registers will be replaced
1972 by INSN. */
1975 dead_or_set_p (const_rtx insn, const_rtx x)
1977 unsigned int regno, end_regno;
1978 unsigned int i;
1980 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1981 if (GET_CODE (x) == CC0)
1982 return 1;
1984 gcc_assert (REG_P (x));
1986 regno = REGNO (x);
1987 end_regno = END_REGNO (x);
1988 for (i = regno; i < end_regno; i++)
1989 if (! dead_or_set_regno_p (insn, i))
1990 return 0;
1992 return 1;
1995 /* Return TRUE iff DEST is a register or subreg of a register and
1996 doesn't change the number of words of the inner register, and any
1997 part of the register is TEST_REGNO. */
1999 static bool
2000 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2002 unsigned int regno, endregno;
2004 if (GET_CODE (dest) == SUBREG
2005 && (((GET_MODE_SIZE (GET_MODE (dest))
2006 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
2007 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
2008 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
2009 dest = SUBREG_REG (dest);
2011 if (!REG_P (dest))
2012 return false;
2014 regno = REGNO (dest);
2015 endregno = END_REGNO (dest);
2016 return (test_regno >= regno && test_regno < endregno);
2019 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2020 any member matches the covers_regno_no_parallel_p criteria. */
2022 static bool
2023 covers_regno_p (const_rtx dest, unsigned int test_regno)
2025 if (GET_CODE (dest) == PARALLEL)
2027 /* Some targets place small structures in registers for return
2028 values of functions, and those registers are wrapped in
2029 PARALLELs that we may see as the destination of a SET. */
2030 int i;
2032 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2034 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2035 if (inner != NULL_RTX
2036 && covers_regno_no_parallel_p (inner, test_regno))
2037 return true;
2040 return false;
2042 else
2043 return covers_regno_no_parallel_p (dest, test_regno);
2046 /* Utility function for dead_or_set_p to check an individual register. */
2049 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
2051 const_rtx pattern;
2053 /* See if there is a death note for something that includes TEST_REGNO. */
2054 if (find_regno_note (insn, REG_DEAD, test_regno))
2055 return 1;
2057 if (CALL_P (insn)
2058 && find_regno_fusage (insn, CLOBBER, test_regno))
2059 return 1;
2061 pattern = PATTERN (insn);
2063 /* If a COND_EXEC is not executed, the value survives. */
2064 if (GET_CODE (pattern) == COND_EXEC)
2065 return 0;
2067 if (GET_CODE (pattern) == SET)
2068 return covers_regno_p (SET_DEST (pattern), test_regno);
2069 else if (GET_CODE (pattern) == PARALLEL)
2071 int i;
2073 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2075 rtx body = XVECEXP (pattern, 0, i);
2077 if (GET_CODE (body) == COND_EXEC)
2078 body = COND_EXEC_CODE (body);
2080 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2081 && covers_regno_p (SET_DEST (body), test_regno))
2082 return 1;
2086 return 0;
2089 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2090 If DATUM is nonzero, look for one whose datum is DATUM. */
2093 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2095 rtx link;
2097 gcc_checking_assert (insn);
2099 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2100 if (! INSN_P (insn))
2101 return 0;
2102 if (datum == 0)
2104 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2105 if (REG_NOTE_KIND (link) == kind)
2106 return link;
2107 return 0;
2110 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2111 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2112 return link;
2113 return 0;
2116 /* Return the reg-note of kind KIND in insn INSN which applies to register
2117 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2118 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2119 it might be the case that the note overlaps REGNO. */
2122 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2124 rtx link;
2126 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2127 if (! INSN_P (insn))
2128 return 0;
2130 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2131 if (REG_NOTE_KIND (link) == kind
2132 /* Verify that it is a register, so that scratch and MEM won't cause a
2133 problem here. */
2134 && REG_P (XEXP (link, 0))
2135 && REGNO (XEXP (link, 0)) <= regno
2136 && END_REGNO (XEXP (link, 0)) > regno)
2137 return link;
2138 return 0;
2141 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2142 has such a note. */
2145 find_reg_equal_equiv_note (const_rtx insn)
2147 rtx link;
2149 if (!INSN_P (insn))
2150 return 0;
2152 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2153 if (REG_NOTE_KIND (link) == REG_EQUAL
2154 || REG_NOTE_KIND (link) == REG_EQUIV)
2156 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2157 insns that have multiple sets. Checking single_set to
2158 make sure of this is not the proper check, as explained
2159 in the comment in set_unique_reg_note.
2161 This should be changed into an assert. */
2162 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2163 return 0;
2164 return link;
2166 return NULL;
2169 /* Check whether INSN is a single_set whose source is known to be
2170 equivalent to a constant. Return that constant if so, otherwise
2171 return null. */
2174 find_constant_src (const rtx_insn *insn)
2176 rtx note, set, x;
2178 set = single_set (insn);
2179 if (set)
2181 x = avoid_constant_pool_reference (SET_SRC (set));
2182 if (CONSTANT_P (x))
2183 return x;
2186 note = find_reg_equal_equiv_note (insn);
2187 if (note && CONSTANT_P (XEXP (note, 0)))
2188 return XEXP (note, 0);
2190 return NULL_RTX;
2193 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2194 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2197 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2199 /* If it's not a CALL_INSN, it can't possibly have a
2200 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2201 if (!CALL_P (insn))
2202 return 0;
2204 gcc_assert (datum);
2206 if (!REG_P (datum))
2208 rtx link;
2210 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2211 link;
2212 link = XEXP (link, 1))
2213 if (GET_CODE (XEXP (link, 0)) == code
2214 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2215 return 1;
2217 else
2219 unsigned int regno = REGNO (datum);
2221 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2222 to pseudo registers, so don't bother checking. */
2224 if (regno < FIRST_PSEUDO_REGISTER)
2226 unsigned int end_regno = END_REGNO (datum);
2227 unsigned int i;
2229 for (i = regno; i < end_regno; i++)
2230 if (find_regno_fusage (insn, code, i))
2231 return 1;
2235 return 0;
2238 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2239 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2242 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2244 rtx link;
2246 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2247 to pseudo registers, so don't bother checking. */
2249 if (regno >= FIRST_PSEUDO_REGISTER
2250 || !CALL_P (insn) )
2251 return 0;
2253 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2255 rtx op, reg;
2257 if (GET_CODE (op = XEXP (link, 0)) == code
2258 && REG_P (reg = XEXP (op, 0))
2259 && REGNO (reg) <= regno
2260 && END_REGNO (reg) > regno)
2261 return 1;
2264 return 0;
2268 /* Return true if KIND is an integer REG_NOTE. */
2270 static bool
2271 int_reg_note_p (enum reg_note kind)
2273 return kind == REG_BR_PROB;
2276 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2277 stored as the pointer to the next register note. */
2280 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2282 rtx note;
2284 gcc_checking_assert (!int_reg_note_p (kind));
2285 switch (kind)
2287 case REG_CC_SETTER:
2288 case REG_CC_USER:
2289 case REG_LABEL_TARGET:
2290 case REG_LABEL_OPERAND:
2291 case REG_TM:
2292 /* These types of register notes use an INSN_LIST rather than an
2293 EXPR_LIST, so that copying is done right and dumps look
2294 better. */
2295 note = alloc_INSN_LIST (datum, list);
2296 PUT_REG_NOTE_KIND (note, kind);
2297 break;
2299 default:
2300 note = alloc_EXPR_LIST (kind, datum, list);
2301 break;
2304 return note;
2307 /* Add register note with kind KIND and datum DATUM to INSN. */
2309 void
2310 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2312 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2315 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2317 void
2318 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2320 gcc_checking_assert (int_reg_note_p (kind));
2321 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2322 datum, REG_NOTES (insn));
2325 /* Add a register note like NOTE to INSN. */
2327 void
2328 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2330 if (GET_CODE (note) == INT_LIST)
2331 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2332 else
2333 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2336 /* Remove register note NOTE from the REG_NOTES of INSN. */
2338 void
2339 remove_note (rtx insn, const_rtx note)
2341 rtx link;
2343 if (note == NULL_RTX)
2344 return;
2346 if (REG_NOTES (insn) == note)
2347 REG_NOTES (insn) = XEXP (note, 1);
2348 else
2349 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2350 if (XEXP (link, 1) == note)
2352 XEXP (link, 1) = XEXP (note, 1);
2353 break;
2356 switch (REG_NOTE_KIND (note))
2358 case REG_EQUAL:
2359 case REG_EQUIV:
2360 df_notes_rescan (as_a <rtx_insn *> (insn));
2361 break;
2362 default:
2363 break;
2367 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2369 void
2370 remove_reg_equal_equiv_notes (rtx_insn *insn)
2372 rtx *loc;
2374 loc = &REG_NOTES (insn);
2375 while (*loc)
2377 enum reg_note kind = REG_NOTE_KIND (*loc);
2378 if (kind == REG_EQUAL || kind == REG_EQUIV)
2379 *loc = XEXP (*loc, 1);
2380 else
2381 loc = &XEXP (*loc, 1);
2385 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2387 void
2388 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2390 df_ref eq_use;
2392 if (!df)
2393 return;
2395 /* This loop is a little tricky. We cannot just go down the chain because
2396 it is being modified by some actions in the loop. So we just iterate
2397 over the head. We plan to drain the list anyway. */
2398 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2400 rtx_insn *insn = DF_REF_INSN (eq_use);
2401 rtx note = find_reg_equal_equiv_note (insn);
2403 /* This assert is generally triggered when someone deletes a REG_EQUAL
2404 or REG_EQUIV note by hacking the list manually rather than calling
2405 remove_note. */
2406 gcc_assert (note);
2408 remove_note (insn, note);
2412 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2413 return 1 if it is found. A simple equality test is used to determine if
2414 NODE matches. */
2416 bool
2417 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2419 const_rtx x;
2421 for (x = listp; x; x = XEXP (x, 1))
2422 if (node == XEXP (x, 0))
2423 return true;
2425 return false;
2428 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2429 remove that entry from the list if it is found.
2431 A simple equality test is used to determine if NODE matches. */
2433 void
2434 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2436 rtx_expr_list *temp = *listp;
2437 rtx_expr_list *prev = NULL;
2439 while (temp)
2441 if (node == temp->element ())
2443 /* Splice the node out of the list. */
2444 if (prev)
2445 XEXP (prev, 1) = temp->next ();
2446 else
2447 *listp = temp->next ();
2449 return;
2452 prev = temp;
2453 temp = temp->next ();
2457 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2458 remove that entry from the list if it is found.
2460 A simple equality test is used to determine if NODE matches. */
2462 void
2463 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2465 rtx_insn_list *temp = *listp;
2466 rtx_insn_list *prev = NULL;
2468 while (temp)
2470 if (node == temp->insn ())
2472 /* Splice the node out of the list. */
2473 if (prev)
2474 XEXP (prev, 1) = temp->next ();
2475 else
2476 *listp = temp->next ();
2478 return;
2481 prev = temp;
2482 temp = temp->next ();
2486 /* Nonzero if X contains any volatile instructions. These are instructions
2487 which may cause unpredictable machine state instructions, and thus no
2488 instructions or register uses should be moved or combined across them.
2489 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2492 volatile_insn_p (const_rtx x)
2494 const RTX_CODE code = GET_CODE (x);
2495 switch (code)
2497 case LABEL_REF:
2498 case SYMBOL_REF:
2499 case CONST:
2500 CASE_CONST_ANY:
2501 case CC0:
2502 case PC:
2503 case REG:
2504 case SCRATCH:
2505 case CLOBBER:
2506 case ADDR_VEC:
2507 case ADDR_DIFF_VEC:
2508 case CALL:
2509 case MEM:
2510 return 0;
2512 case UNSPEC_VOLATILE:
2513 return 1;
2515 case ASM_INPUT:
2516 case ASM_OPERANDS:
2517 if (MEM_VOLATILE_P (x))
2518 return 1;
2520 default:
2521 break;
2524 /* Recursively scan the operands of this expression. */
2527 const char *const fmt = GET_RTX_FORMAT (code);
2528 int i;
2530 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2532 if (fmt[i] == 'e')
2534 if (volatile_insn_p (XEXP (x, i)))
2535 return 1;
2537 else if (fmt[i] == 'E')
2539 int j;
2540 for (j = 0; j < XVECLEN (x, i); j++)
2541 if (volatile_insn_p (XVECEXP (x, i, j)))
2542 return 1;
2546 return 0;
2549 /* Nonzero if X contains any volatile memory references
2550 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2553 volatile_refs_p (const_rtx x)
2555 const RTX_CODE code = GET_CODE (x);
2556 switch (code)
2558 case LABEL_REF:
2559 case SYMBOL_REF:
2560 case CONST:
2561 CASE_CONST_ANY:
2562 case CC0:
2563 case PC:
2564 case REG:
2565 case SCRATCH:
2566 case CLOBBER:
2567 case ADDR_VEC:
2568 case ADDR_DIFF_VEC:
2569 return 0;
2571 case UNSPEC_VOLATILE:
2572 return 1;
2574 case MEM:
2575 case ASM_INPUT:
2576 case ASM_OPERANDS:
2577 if (MEM_VOLATILE_P (x))
2578 return 1;
2580 default:
2581 break;
2584 /* Recursively scan the operands of this expression. */
2587 const char *const fmt = GET_RTX_FORMAT (code);
2588 int i;
2590 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2592 if (fmt[i] == 'e')
2594 if (volatile_refs_p (XEXP (x, i)))
2595 return 1;
2597 else if (fmt[i] == 'E')
2599 int j;
2600 for (j = 0; j < XVECLEN (x, i); j++)
2601 if (volatile_refs_p (XVECEXP (x, i, j)))
2602 return 1;
2606 return 0;
2609 /* Similar to above, except that it also rejects register pre- and post-
2610 incrementing. */
2613 side_effects_p (const_rtx x)
2615 const RTX_CODE code = GET_CODE (x);
2616 switch (code)
2618 case LABEL_REF:
2619 case SYMBOL_REF:
2620 case CONST:
2621 CASE_CONST_ANY:
2622 case CC0:
2623 case PC:
2624 case REG:
2625 case SCRATCH:
2626 case ADDR_VEC:
2627 case ADDR_DIFF_VEC:
2628 case VAR_LOCATION:
2629 return 0;
2631 case CLOBBER:
2632 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2633 when some combination can't be done. If we see one, don't think
2634 that we can simplify the expression. */
2635 return (GET_MODE (x) != VOIDmode);
2637 case PRE_INC:
2638 case PRE_DEC:
2639 case POST_INC:
2640 case POST_DEC:
2641 case PRE_MODIFY:
2642 case POST_MODIFY:
2643 case CALL:
2644 case UNSPEC_VOLATILE:
2645 return 1;
2647 case MEM:
2648 case ASM_INPUT:
2649 case ASM_OPERANDS:
2650 if (MEM_VOLATILE_P (x))
2651 return 1;
2653 default:
2654 break;
2657 /* Recursively scan the operands of this expression. */
2660 const char *fmt = GET_RTX_FORMAT (code);
2661 int i;
2663 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2665 if (fmt[i] == 'e')
2667 if (side_effects_p (XEXP (x, i)))
2668 return 1;
2670 else if (fmt[i] == 'E')
2672 int j;
2673 for (j = 0; j < XVECLEN (x, i); j++)
2674 if (side_effects_p (XVECEXP (x, i, j)))
2675 return 1;
2679 return 0;
2682 /* Return nonzero if evaluating rtx X might cause a trap.
2683 FLAGS controls how to consider MEMs. A nonzero means the context
2684 of the access may have changed from the original, such that the
2685 address may have become invalid. */
2688 may_trap_p_1 (const_rtx x, unsigned flags)
2690 int i;
2691 enum rtx_code code;
2692 const char *fmt;
2694 /* We make no distinction currently, but this function is part of
2695 the internal target-hooks ABI so we keep the parameter as
2696 "unsigned flags". */
2697 bool code_changed = flags != 0;
2699 if (x == 0)
2700 return 0;
2701 code = GET_CODE (x);
2702 switch (code)
2704 /* Handle these cases quickly. */
2705 CASE_CONST_ANY:
2706 case SYMBOL_REF:
2707 case LABEL_REF:
2708 case CONST:
2709 case PC:
2710 case CC0:
2711 case REG:
2712 case SCRATCH:
2713 return 0;
2715 case UNSPEC:
2716 return targetm.unspec_may_trap_p (x, flags);
2718 case UNSPEC_VOLATILE:
2719 case ASM_INPUT:
2720 case TRAP_IF:
2721 return 1;
2723 case ASM_OPERANDS:
2724 return MEM_VOLATILE_P (x);
2726 /* Memory ref can trap unless it's a static var or a stack slot. */
2727 case MEM:
2728 /* Recognize specific pattern of stack checking probes. */
2729 if (flag_stack_check
2730 && MEM_VOLATILE_P (x)
2731 && XEXP (x, 0) == stack_pointer_rtx)
2732 return 1;
2733 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2734 reference; moving it out of context such as when moving code
2735 when optimizing, might cause its address to become invalid. */
2736 code_changed
2737 || !MEM_NOTRAP_P (x))
2739 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2740 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2741 GET_MODE (x), code_changed);
2744 return 0;
2746 /* Division by a non-constant might trap. */
2747 case DIV:
2748 case MOD:
2749 case UDIV:
2750 case UMOD:
2751 if (HONOR_SNANS (x))
2752 return 1;
2753 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2754 return flag_trapping_math;
2755 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2756 return 1;
2757 break;
2759 case EXPR_LIST:
2760 /* An EXPR_LIST is used to represent a function call. This
2761 certainly may trap. */
2762 return 1;
2764 case GE:
2765 case GT:
2766 case LE:
2767 case LT:
2768 case LTGT:
2769 case COMPARE:
2770 /* Some floating point comparisons may trap. */
2771 if (!flag_trapping_math)
2772 break;
2773 /* ??? There is no machine independent way to check for tests that trap
2774 when COMPARE is used, though many targets do make this distinction.
2775 For instance, sparc uses CCFPE for compares which generate exceptions
2776 and CCFP for compares which do not generate exceptions. */
2777 if (HONOR_NANS (x))
2778 return 1;
2779 /* But often the compare has some CC mode, so check operand
2780 modes as well. */
2781 if (HONOR_NANS (XEXP (x, 0))
2782 || HONOR_NANS (XEXP (x, 1)))
2783 return 1;
2784 break;
2786 case EQ:
2787 case NE:
2788 if (HONOR_SNANS (x))
2789 return 1;
2790 /* Often comparison is CC mode, so check operand modes. */
2791 if (HONOR_SNANS (XEXP (x, 0))
2792 || HONOR_SNANS (XEXP (x, 1)))
2793 return 1;
2794 break;
2796 case FIX:
2797 /* Conversion of floating point might trap. */
2798 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2799 return 1;
2800 break;
2802 case NEG:
2803 case ABS:
2804 case SUBREG:
2805 /* These operations don't trap even with floating point. */
2806 break;
2808 default:
2809 /* Any floating arithmetic may trap. */
2810 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2811 return 1;
2814 fmt = GET_RTX_FORMAT (code);
2815 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2817 if (fmt[i] == 'e')
2819 if (may_trap_p_1 (XEXP (x, i), flags))
2820 return 1;
2822 else if (fmt[i] == 'E')
2824 int j;
2825 for (j = 0; j < XVECLEN (x, i); j++)
2826 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2827 return 1;
2830 return 0;
2833 /* Return nonzero if evaluating rtx X might cause a trap. */
2836 may_trap_p (const_rtx x)
2838 return may_trap_p_1 (x, 0);
2841 /* Same as above, but additionally return nonzero if evaluating rtx X might
2842 cause a fault. We define a fault for the purpose of this function as a
2843 erroneous execution condition that cannot be encountered during the normal
2844 execution of a valid program; the typical example is an unaligned memory
2845 access on a strict alignment machine. The compiler guarantees that it
2846 doesn't generate code that will fault from a valid program, but this
2847 guarantee doesn't mean anything for individual instructions. Consider
2848 the following example:
2850 struct S { int d; union { char *cp; int *ip; }; };
2852 int foo(struct S *s)
2854 if (s->d == 1)
2855 return *s->ip;
2856 else
2857 return *s->cp;
2860 on a strict alignment machine. In a valid program, foo will never be
2861 invoked on a structure for which d is equal to 1 and the underlying
2862 unique field of the union not aligned on a 4-byte boundary, but the
2863 expression *s->ip might cause a fault if considered individually.
2865 At the RTL level, potentially problematic expressions will almost always
2866 verify may_trap_p; for example, the above dereference can be emitted as
2867 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2868 However, suppose that foo is inlined in a caller that causes s->cp to
2869 point to a local character variable and guarantees that s->d is not set
2870 to 1; foo may have been effectively translated into pseudo-RTL as:
2872 if ((reg:SI) == 1)
2873 (set (reg:SI) (mem:SI (%fp - 7)))
2874 else
2875 (set (reg:QI) (mem:QI (%fp - 7)))
2877 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2878 memory reference to a stack slot, but it will certainly cause a fault
2879 on a strict alignment machine. */
2882 may_trap_or_fault_p (const_rtx x)
2884 return may_trap_p_1 (x, 1);
2887 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2888 i.e., an inequality. */
2891 inequality_comparisons_p (const_rtx x)
2893 const char *fmt;
2894 int len, i;
2895 const enum rtx_code code = GET_CODE (x);
2897 switch (code)
2899 case REG:
2900 case SCRATCH:
2901 case PC:
2902 case CC0:
2903 CASE_CONST_ANY:
2904 case CONST:
2905 case LABEL_REF:
2906 case SYMBOL_REF:
2907 return 0;
2909 case LT:
2910 case LTU:
2911 case GT:
2912 case GTU:
2913 case LE:
2914 case LEU:
2915 case GE:
2916 case GEU:
2917 return 1;
2919 default:
2920 break;
2923 len = GET_RTX_LENGTH (code);
2924 fmt = GET_RTX_FORMAT (code);
2926 for (i = 0; i < len; i++)
2928 if (fmt[i] == 'e')
2930 if (inequality_comparisons_p (XEXP (x, i)))
2931 return 1;
2933 else if (fmt[i] == 'E')
2935 int j;
2936 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2937 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2938 return 1;
2942 return 0;
2945 /* Replace any occurrence of FROM in X with TO. The function does
2946 not enter into CONST_DOUBLE for the replace.
2948 Note that copying is not done so X must not be shared unless all copies
2949 are to be modified. */
2952 replace_rtx (rtx x, rtx from, rtx to)
2954 int i, j;
2955 const char *fmt;
2957 if (x == from)
2958 return to;
2960 /* Allow this function to make replacements in EXPR_LISTs. */
2961 if (x == 0)
2962 return 0;
2964 if (GET_CODE (x) == SUBREG)
2966 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2968 if (CONST_INT_P (new_rtx))
2970 x = simplify_subreg (GET_MODE (x), new_rtx,
2971 GET_MODE (SUBREG_REG (x)),
2972 SUBREG_BYTE (x));
2973 gcc_assert (x);
2975 else
2976 SUBREG_REG (x) = new_rtx;
2978 return x;
2980 else if (GET_CODE (x) == ZERO_EXTEND)
2982 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2984 if (CONST_INT_P (new_rtx))
2986 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2987 new_rtx, GET_MODE (XEXP (x, 0)));
2988 gcc_assert (x);
2990 else
2991 XEXP (x, 0) = new_rtx;
2993 return x;
2996 fmt = GET_RTX_FORMAT (GET_CODE (x));
2997 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2999 if (fmt[i] == 'e')
3000 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
3001 else if (fmt[i] == 'E')
3002 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3003 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
3006 return x;
3009 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3010 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3012 void
3013 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3015 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3016 rtx x = *loc;
3017 if (JUMP_TABLE_DATA_P (x))
3019 x = PATTERN (x);
3020 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3021 int len = GET_NUM_ELEM (vec);
3022 for (int i = 0; i < len; ++i)
3024 rtx ref = RTVEC_ELT (vec, i);
3025 if (XEXP (ref, 0) == old_label)
3027 XEXP (ref, 0) = new_label;
3028 if (update_label_nuses)
3030 ++LABEL_NUSES (new_label);
3031 --LABEL_NUSES (old_label);
3035 return;
3038 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3039 field. This is not handled by the iterator because it doesn't
3040 handle unprinted ('0') fields. */
3041 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3042 JUMP_LABEL (x) = new_label;
3044 subrtx_ptr_iterator::array_type array;
3045 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3047 rtx *loc = *iter;
3048 if (rtx x = *loc)
3050 if (GET_CODE (x) == SYMBOL_REF
3051 && CONSTANT_POOL_ADDRESS_P (x))
3053 rtx c = get_pool_constant (x);
3054 if (rtx_referenced_p (old_label, c))
3056 /* Create a copy of constant C; replace the label inside
3057 but do not update LABEL_NUSES because uses in constant pool
3058 are not counted. */
3059 rtx new_c = copy_rtx (c);
3060 replace_label (&new_c, old_label, new_label, false);
3062 /* Add the new constant NEW_C to constant pool and replace
3063 the old reference to constant by new reference. */
3064 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3065 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3069 if ((GET_CODE (x) == LABEL_REF
3070 || GET_CODE (x) == INSN_LIST)
3071 && XEXP (x, 0) == old_label)
3073 XEXP (x, 0) = new_label;
3074 if (update_label_nuses)
3076 ++LABEL_NUSES (new_label);
3077 --LABEL_NUSES (old_label);
3084 void
3085 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
3086 bool update_label_nuses)
3088 rtx insn_as_rtx = insn;
3089 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3090 gcc_checking_assert (insn_as_rtx == insn);
3093 /* Return true if X is referenced in BODY. */
3095 bool
3096 rtx_referenced_p (const_rtx x, const_rtx body)
3098 subrtx_iterator::array_type array;
3099 FOR_EACH_SUBRTX (iter, array, body, ALL)
3100 if (const_rtx y = *iter)
3102 /* Check if a label_ref Y refers to label X. */
3103 if (GET_CODE (y) == LABEL_REF
3104 && LABEL_P (x)
3105 && LABEL_REF_LABEL (y) == x)
3106 return true;
3108 if (rtx_equal_p (x, y))
3109 return true;
3111 /* If Y is a reference to pool constant traverse the constant. */
3112 if (GET_CODE (y) == SYMBOL_REF
3113 && CONSTANT_POOL_ADDRESS_P (y))
3114 iter.substitute (get_pool_constant (y));
3116 return false;
3119 /* If INSN is a tablejump return true and store the label (before jump table) to
3120 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3122 bool
3123 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
3125 rtx label;
3126 rtx_insn *table;
3128 if (!JUMP_P (insn))
3129 return false;
3131 label = JUMP_LABEL (insn);
3132 if (label != NULL_RTX && !ANY_RETURN_P (label)
3133 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
3134 && JUMP_TABLE_DATA_P (table))
3136 if (labelp)
3137 *labelp = label;
3138 if (tablep)
3139 *tablep = as_a <rtx_jump_table_data *> (table);
3140 return true;
3142 return false;
3145 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3146 constant that is not in the constant pool and not in the condition
3147 of an IF_THEN_ELSE. */
3149 static int
3150 computed_jump_p_1 (const_rtx x)
3152 const enum rtx_code code = GET_CODE (x);
3153 int i, j;
3154 const char *fmt;
3156 switch (code)
3158 case LABEL_REF:
3159 case PC:
3160 return 0;
3162 case CONST:
3163 CASE_CONST_ANY:
3164 case SYMBOL_REF:
3165 case REG:
3166 return 1;
3168 case MEM:
3169 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3170 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3172 case IF_THEN_ELSE:
3173 return (computed_jump_p_1 (XEXP (x, 1))
3174 || computed_jump_p_1 (XEXP (x, 2)));
3176 default:
3177 break;
3180 fmt = GET_RTX_FORMAT (code);
3181 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3183 if (fmt[i] == 'e'
3184 && computed_jump_p_1 (XEXP (x, i)))
3185 return 1;
3187 else if (fmt[i] == 'E')
3188 for (j = 0; j < XVECLEN (x, i); j++)
3189 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3190 return 1;
3193 return 0;
3196 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3198 Tablejumps and casesi insns are not considered indirect jumps;
3199 we can recognize them by a (use (label_ref)). */
3202 computed_jump_p (const rtx_insn *insn)
3204 int i;
3205 if (JUMP_P (insn))
3207 rtx pat = PATTERN (insn);
3209 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3210 if (JUMP_LABEL (insn) != NULL)
3211 return 0;
3213 if (GET_CODE (pat) == PARALLEL)
3215 int len = XVECLEN (pat, 0);
3216 int has_use_labelref = 0;
3218 for (i = len - 1; i >= 0; i--)
3219 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3220 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3221 == LABEL_REF))
3223 has_use_labelref = 1;
3224 break;
3227 if (! has_use_labelref)
3228 for (i = len - 1; i >= 0; i--)
3229 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3230 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3231 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3232 return 1;
3234 else if (GET_CODE (pat) == SET
3235 && SET_DEST (pat) == pc_rtx
3236 && computed_jump_p_1 (SET_SRC (pat)))
3237 return 1;
3239 return 0;
3244 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3245 the equivalent add insn and pass the result to FN, using DATA as the
3246 final argument. */
3248 static int
3249 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3251 rtx x = XEXP (mem, 0);
3252 switch (GET_CODE (x))
3254 case PRE_INC:
3255 case POST_INC:
3257 int size = GET_MODE_SIZE (GET_MODE (mem));
3258 rtx r1 = XEXP (x, 0);
3259 rtx c = gen_int_mode (size, GET_MODE (r1));
3260 return fn (mem, x, r1, r1, c, data);
3263 case PRE_DEC:
3264 case POST_DEC:
3266 int size = GET_MODE_SIZE (GET_MODE (mem));
3267 rtx r1 = XEXP (x, 0);
3268 rtx c = gen_int_mode (-size, GET_MODE (r1));
3269 return fn (mem, x, r1, r1, c, data);
3272 case PRE_MODIFY:
3273 case POST_MODIFY:
3275 rtx r1 = XEXP (x, 0);
3276 rtx add = XEXP (x, 1);
3277 return fn (mem, x, r1, add, NULL, data);
3280 default:
3281 gcc_unreachable ();
3285 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3286 For each such autoinc operation found, call FN, passing it
3287 the innermost enclosing MEM, the operation itself, the RTX modified
3288 by the operation, two RTXs (the second may be NULL) that, once
3289 added, represent the value to be held by the modified RTX
3290 afterwards, and DATA. FN is to return 0 to continue the
3291 traversal or any other value to have it returned to the caller of
3292 for_each_inc_dec. */
3295 for_each_inc_dec (rtx x,
3296 for_each_inc_dec_fn fn,
3297 void *data)
3299 subrtx_var_iterator::array_type array;
3300 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3302 rtx mem = *iter;
3303 if (mem
3304 && MEM_P (mem)
3305 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3307 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3308 if (res != 0)
3309 return res;
3310 iter.skip_subrtxes ();
3313 return 0;
3317 /* Searches X for any reference to REGNO, returning the rtx of the
3318 reference found if any. Otherwise, returns NULL_RTX. */
3321 regno_use_in (unsigned int regno, rtx x)
3323 const char *fmt;
3324 int i, j;
3325 rtx tem;
3327 if (REG_P (x) && REGNO (x) == regno)
3328 return x;
3330 fmt = GET_RTX_FORMAT (GET_CODE (x));
3331 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3333 if (fmt[i] == 'e')
3335 if ((tem = regno_use_in (regno, XEXP (x, i))))
3336 return tem;
3338 else if (fmt[i] == 'E')
3339 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3340 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3341 return tem;
3344 return NULL_RTX;
3347 /* Return a value indicating whether OP, an operand of a commutative
3348 operation, is preferred as the first or second operand. The more
3349 positive the value, the stronger the preference for being the first
3350 operand. */
3353 commutative_operand_precedence (rtx op)
3355 enum rtx_code code = GET_CODE (op);
3357 /* Constants always become the second operand. Prefer "nice" constants. */
3358 if (code == CONST_INT)
3359 return -8;
3360 if (code == CONST_WIDE_INT)
3361 return -8;
3362 if (code == CONST_DOUBLE)
3363 return -7;
3364 if (code == CONST_FIXED)
3365 return -7;
3366 op = avoid_constant_pool_reference (op);
3367 code = GET_CODE (op);
3369 switch (GET_RTX_CLASS (code))
3371 case RTX_CONST_OBJ:
3372 if (code == CONST_INT)
3373 return -6;
3374 if (code == CONST_WIDE_INT)
3375 return -6;
3376 if (code == CONST_DOUBLE)
3377 return -5;
3378 if (code == CONST_FIXED)
3379 return -5;
3380 return -4;
3382 case RTX_EXTRA:
3383 /* SUBREGs of objects should come second. */
3384 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3385 return -3;
3386 return 0;
3388 case RTX_OBJ:
3389 /* Complex expressions should be the first, so decrease priority
3390 of objects. Prefer pointer objects over non pointer objects. */
3391 if ((REG_P (op) && REG_POINTER (op))
3392 || (MEM_P (op) && MEM_POINTER (op)))
3393 return -1;
3394 return -2;
3396 case RTX_COMM_ARITH:
3397 /* Prefer operands that are themselves commutative to be first.
3398 This helps to make things linear. In particular,
3399 (and (and (reg) (reg)) (not (reg))) is canonical. */
3400 return 4;
3402 case RTX_BIN_ARITH:
3403 /* If only one operand is a binary expression, it will be the first
3404 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3405 is canonical, although it will usually be further simplified. */
3406 return 2;
3408 case RTX_UNARY:
3409 /* Then prefer NEG and NOT. */
3410 if (code == NEG || code == NOT)
3411 return 1;
3413 default:
3414 return 0;
3418 /* Return 1 iff it is necessary to swap operands of commutative operation
3419 in order to canonicalize expression. */
3421 bool
3422 swap_commutative_operands_p (rtx x, rtx y)
3424 return (commutative_operand_precedence (x)
3425 < commutative_operand_precedence (y));
3428 /* Return 1 if X is an autoincrement side effect and the register is
3429 not the stack pointer. */
3431 auto_inc_p (const_rtx x)
3433 switch (GET_CODE (x))
3435 case PRE_INC:
3436 case POST_INC:
3437 case PRE_DEC:
3438 case POST_DEC:
3439 case PRE_MODIFY:
3440 case POST_MODIFY:
3441 /* There are no REG_INC notes for SP. */
3442 if (XEXP (x, 0) != stack_pointer_rtx)
3443 return 1;
3444 default:
3445 break;
3447 return 0;
3450 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3452 loc_mentioned_in_p (rtx *loc, const_rtx in)
3454 enum rtx_code code;
3455 const char *fmt;
3456 int i, j;
3458 if (!in)
3459 return 0;
3461 code = GET_CODE (in);
3462 fmt = GET_RTX_FORMAT (code);
3463 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3465 if (fmt[i] == 'e')
3467 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3468 return 1;
3470 else if (fmt[i] == 'E')
3471 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3472 if (loc == &XVECEXP (in, i, j)
3473 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3474 return 1;
3476 return 0;
3479 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3480 and SUBREG_BYTE, return the bit offset where the subreg begins
3481 (counting from the least significant bit of the operand). */
3483 unsigned int
3484 subreg_lsb_1 (machine_mode outer_mode,
3485 machine_mode inner_mode,
3486 unsigned int subreg_byte)
3488 unsigned int bitpos;
3489 unsigned int byte;
3490 unsigned int word;
3492 /* A paradoxical subreg begins at bit position 0. */
3493 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3494 return 0;
3496 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3497 /* If the subreg crosses a word boundary ensure that
3498 it also begins and ends on a word boundary. */
3499 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3500 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3501 && (subreg_byte % UNITS_PER_WORD
3502 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3504 if (WORDS_BIG_ENDIAN)
3505 word = (GET_MODE_SIZE (inner_mode)
3506 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3507 else
3508 word = subreg_byte / UNITS_PER_WORD;
3509 bitpos = word * BITS_PER_WORD;
3511 if (BYTES_BIG_ENDIAN)
3512 byte = (GET_MODE_SIZE (inner_mode)
3513 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3514 else
3515 byte = subreg_byte % UNITS_PER_WORD;
3516 bitpos += byte * BITS_PER_UNIT;
3518 return bitpos;
3521 /* Given a subreg X, return the bit offset where the subreg begins
3522 (counting from the least significant bit of the reg). */
3524 unsigned int
3525 subreg_lsb (const_rtx x)
3527 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3528 SUBREG_BYTE (x));
3531 /* Fill in information about a subreg of a hard register.
3532 xregno - A regno of an inner hard subreg_reg (or what will become one).
3533 xmode - The mode of xregno.
3534 offset - The byte offset.
3535 ymode - The mode of a top level SUBREG (or what may become one).
3536 info - Pointer to structure to fill in.
3538 Rather than considering one particular inner register (and thus one
3539 particular "outer" register) in isolation, this function really uses
3540 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3541 function does not check whether adding INFO->offset to XREGNO gives
3542 a valid hard register; even if INFO->offset + XREGNO is out of range,
3543 there might be another register of the same type that is in range.
3544 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3545 register, since that can depend on things like whether the final
3546 register number is even or odd. Callers that want to check whether
3547 this particular subreg can be replaced by a simple (reg ...) should
3548 use simplify_subreg_regno. */
3550 void
3551 subreg_get_info (unsigned int xregno, machine_mode xmode,
3552 unsigned int offset, machine_mode ymode,
3553 struct subreg_info *info)
3555 int nregs_xmode, nregs_ymode;
3556 int mode_multiple, nregs_multiple;
3557 int offset_adj, y_offset, y_offset_adj;
3558 int regsize_xmode, regsize_ymode;
3559 bool rknown;
3561 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3563 rknown = false;
3565 /* If there are holes in a non-scalar mode in registers, we expect
3566 that it is made up of its units concatenated together. */
3567 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3569 machine_mode xmode_unit;
3571 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3572 xmode_unit = GET_MODE_INNER (xmode);
3573 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3574 gcc_assert (nregs_xmode
3575 == (GET_MODE_NUNITS (xmode)
3576 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3577 gcc_assert (hard_regno_nregs[xregno][xmode]
3578 == (hard_regno_nregs[xregno][xmode_unit]
3579 * GET_MODE_NUNITS (xmode)));
3581 /* You can only ask for a SUBREG of a value with holes in the middle
3582 if you don't cross the holes. (Such a SUBREG should be done by
3583 picking a different register class, or doing it in memory if
3584 necessary.) An example of a value with holes is XCmode on 32-bit
3585 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3586 3 for each part, but in memory it's two 128-bit parts.
3587 Padding is assumed to be at the end (not necessarily the 'high part')
3588 of each unit. */
3589 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3590 < GET_MODE_NUNITS (xmode))
3591 && (offset / GET_MODE_SIZE (xmode_unit)
3592 != ((offset + GET_MODE_SIZE (ymode) - 1)
3593 / GET_MODE_SIZE (xmode_unit))))
3595 info->representable_p = false;
3596 rknown = true;
3599 else
3600 nregs_xmode = hard_regno_nregs[xregno][xmode];
3602 nregs_ymode = hard_regno_nregs[xregno][ymode];
3604 /* Paradoxical subregs are otherwise valid. */
3605 if (!rknown
3606 && offset == 0
3607 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3609 info->representable_p = true;
3610 /* If this is a big endian paradoxical subreg, which uses more
3611 actual hard registers than the original register, we must
3612 return a negative offset so that we find the proper highpart
3613 of the register. */
3614 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3615 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3616 info->offset = nregs_xmode - nregs_ymode;
3617 else
3618 info->offset = 0;
3619 info->nregs = nregs_ymode;
3620 return;
3623 /* If registers store different numbers of bits in the different
3624 modes, we cannot generally form this subreg. */
3625 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3626 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3627 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3628 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3630 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3631 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3632 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3634 info->representable_p = false;
3635 info->nregs
3636 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3637 info->offset = offset / regsize_xmode;
3638 return;
3640 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3642 info->representable_p = false;
3643 info->nregs
3644 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3645 info->offset = offset / regsize_xmode;
3646 return;
3648 /* Quick exit for the simple and common case of extracting whole
3649 subregisters from a multiregister value. */
3650 /* ??? It would be better to integrate this into the code below,
3651 if we can generalize the concept enough and figure out how
3652 odd-sized modes can coexist with the other weird cases we support. */
3653 if (!rknown
3654 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3655 && regsize_xmode == regsize_ymode
3656 && (offset % regsize_ymode) == 0)
3658 info->representable_p = true;
3659 info->nregs = nregs_ymode;
3660 info->offset = offset / regsize_ymode;
3661 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3662 return;
3666 /* Lowpart subregs are otherwise valid. */
3667 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3669 info->representable_p = true;
3670 rknown = true;
3672 if (offset == 0 || nregs_xmode == nregs_ymode)
3674 info->offset = 0;
3675 info->nregs = nregs_ymode;
3676 return;
3680 /* This should always pass, otherwise we don't know how to verify
3681 the constraint. These conditions may be relaxed but
3682 subreg_regno_offset would need to be redesigned. */
3683 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3684 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3686 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3687 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3689 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3690 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3691 HOST_WIDE_INT off_low = offset & (ysize - 1);
3692 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3693 offset = (xsize - ysize - off_high) | off_low;
3695 /* The XMODE value can be seen as a vector of NREGS_XMODE
3696 values. The subreg must represent a lowpart of given field.
3697 Compute what field it is. */
3698 offset_adj = offset;
3699 offset_adj -= subreg_lowpart_offset (ymode,
3700 mode_for_size (GET_MODE_BITSIZE (xmode)
3701 / nregs_xmode,
3702 MODE_INT, 0));
3704 /* Size of ymode must not be greater than the size of xmode. */
3705 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3706 gcc_assert (mode_multiple != 0);
3708 y_offset = offset / GET_MODE_SIZE (ymode);
3709 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3710 nregs_multiple = nregs_xmode / nregs_ymode;
3712 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3713 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3715 if (!rknown)
3717 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3718 rknown = true;
3720 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3721 info->nregs = nregs_ymode;
3724 /* This function returns the regno offset of a subreg expression.
3725 xregno - A regno of an inner hard subreg_reg (or what will become one).
3726 xmode - The mode of xregno.
3727 offset - The byte offset.
3728 ymode - The mode of a top level SUBREG (or what may become one).
3729 RETURN - The regno offset which would be used. */
3730 unsigned int
3731 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3732 unsigned int offset, machine_mode ymode)
3734 struct subreg_info info;
3735 subreg_get_info (xregno, xmode, offset, ymode, &info);
3736 return info.offset;
3739 /* This function returns true when the offset is representable via
3740 subreg_offset in the given regno.
3741 xregno - A regno of an inner hard subreg_reg (or what will become one).
3742 xmode - The mode of xregno.
3743 offset - The byte offset.
3744 ymode - The mode of a top level SUBREG (or what may become one).
3745 RETURN - Whether the offset is representable. */
3746 bool
3747 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3748 unsigned int offset, machine_mode ymode)
3750 struct subreg_info info;
3751 subreg_get_info (xregno, xmode, offset, ymode, &info);
3752 return info.representable_p;
3755 /* Return the number of a YMODE register to which
3757 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3759 can be simplified. Return -1 if the subreg can't be simplified.
3761 XREGNO is a hard register number. */
3764 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3765 unsigned int offset, machine_mode ymode)
3767 struct subreg_info info;
3768 unsigned int yregno;
3770 #ifdef CANNOT_CHANGE_MODE_CLASS
3771 /* Give the backend a chance to disallow the mode change. */
3772 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3773 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3774 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3775 /* We can use mode change in LRA for some transformations. */
3776 && ! lra_in_progress)
3777 return -1;
3778 #endif
3780 /* We shouldn't simplify stack-related registers. */
3781 if ((!reload_completed || frame_pointer_needed)
3782 && xregno == FRAME_POINTER_REGNUM)
3783 return -1;
3785 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3786 && xregno == ARG_POINTER_REGNUM)
3787 return -1;
3789 if (xregno == STACK_POINTER_REGNUM
3790 /* We should convert hard stack register in LRA if it is
3791 possible. */
3792 && ! lra_in_progress)
3793 return -1;
3795 /* Try to get the register offset. */
3796 subreg_get_info (xregno, xmode, offset, ymode, &info);
3797 if (!info.representable_p)
3798 return -1;
3800 /* Make sure that the offsetted register value is in range. */
3801 yregno = xregno + info.offset;
3802 if (!HARD_REGISTER_NUM_P (yregno))
3803 return -1;
3805 /* See whether (reg:YMODE YREGNO) is valid.
3807 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3808 This is a kludge to work around how complex FP arguments are passed
3809 on IA-64 and should be fixed. See PR target/49226. */
3810 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3811 && HARD_REGNO_MODE_OK (xregno, xmode))
3812 return -1;
3814 return (int) yregno;
3817 /* Return the final regno that a subreg expression refers to. */
3818 unsigned int
3819 subreg_regno (const_rtx x)
3821 unsigned int ret;
3822 rtx subreg = SUBREG_REG (x);
3823 int regno = REGNO (subreg);
3825 ret = regno + subreg_regno_offset (regno,
3826 GET_MODE (subreg),
3827 SUBREG_BYTE (x),
3828 GET_MODE (x));
3829 return ret;
3833 /* Return the number of registers that a subreg expression refers
3834 to. */
3835 unsigned int
3836 subreg_nregs (const_rtx x)
3838 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3841 /* Return the number of registers that a subreg REG with REGNO
3842 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3843 changed so that the regno can be passed in. */
3845 unsigned int
3846 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3848 struct subreg_info info;
3849 rtx subreg = SUBREG_REG (x);
3851 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3852 &info);
3853 return info.nregs;
3857 struct parms_set_data
3859 int nregs;
3860 HARD_REG_SET regs;
3863 /* Helper function for noticing stores to parameter registers. */
3864 static void
3865 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3867 struct parms_set_data *const d = (struct parms_set_data *) data;
3868 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3869 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3871 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3872 d->nregs--;
3876 /* Look backward for first parameter to be loaded.
3877 Note that loads of all parameters will not necessarily be
3878 found if CSE has eliminated some of them (e.g., an argument
3879 to the outer function is passed down as a parameter).
3880 Do not skip BOUNDARY. */
3881 rtx_insn *
3882 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3884 struct parms_set_data parm;
3885 rtx p;
3886 rtx_insn *before, *first_set;
3888 /* Since different machines initialize their parameter registers
3889 in different orders, assume nothing. Collect the set of all
3890 parameter registers. */
3891 CLEAR_HARD_REG_SET (parm.regs);
3892 parm.nregs = 0;
3893 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3894 if (GET_CODE (XEXP (p, 0)) == USE
3895 && REG_P (XEXP (XEXP (p, 0), 0)))
3897 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3899 /* We only care about registers which can hold function
3900 arguments. */
3901 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3902 continue;
3904 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3905 parm.nregs++;
3907 before = call_insn;
3908 first_set = call_insn;
3910 /* Search backward for the first set of a register in this set. */
3911 while (parm.nregs && before != boundary)
3913 before = PREV_INSN (before);
3915 /* It is possible that some loads got CSEed from one call to
3916 another. Stop in that case. */
3917 if (CALL_P (before))
3918 break;
3920 /* Our caller needs either ensure that we will find all sets
3921 (in case code has not been optimized yet), or take care
3922 for possible labels in a way by setting boundary to preceding
3923 CODE_LABEL. */
3924 if (LABEL_P (before))
3926 gcc_assert (before == boundary);
3927 break;
3930 if (INSN_P (before))
3932 int nregs_old = parm.nregs;
3933 note_stores (PATTERN (before), parms_set, &parm);
3934 /* If we found something that did not set a parameter reg,
3935 we're done. Do not keep going, as that might result
3936 in hoisting an insn before the setting of a pseudo
3937 that is used by the hoisted insn. */
3938 if (nregs_old != parm.nregs)
3939 first_set = before;
3940 else
3941 break;
3944 return first_set;
3947 /* Return true if we should avoid inserting code between INSN and preceding
3948 call instruction. */
3950 bool
3951 keep_with_call_p (const rtx_insn *insn)
3953 rtx set;
3955 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3957 if (REG_P (SET_DEST (set))
3958 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3959 && fixed_regs[REGNO (SET_DEST (set))]
3960 && general_operand (SET_SRC (set), VOIDmode))
3961 return true;
3962 if (REG_P (SET_SRC (set))
3963 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3964 && REG_P (SET_DEST (set))
3965 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3966 return true;
3967 /* There may be a stack pop just after the call and before the store
3968 of the return register. Search for the actual store when deciding
3969 if we can break or not. */
3970 if (SET_DEST (set) == stack_pointer_rtx)
3972 /* This CONST_CAST is okay because next_nonnote_insn just
3973 returns its argument and we assign it to a const_rtx
3974 variable. */
3975 const rtx_insn *i2
3976 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3977 if (i2 && keep_with_call_p (i2))
3978 return true;
3981 return false;
3984 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3985 to non-complex jumps. That is, direct unconditional, conditional,
3986 and tablejumps, but not computed jumps or returns. It also does
3987 not apply to the fallthru case of a conditional jump. */
3989 bool
3990 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3992 rtx tmp = JUMP_LABEL (jump_insn);
3993 rtx_jump_table_data *table;
3995 if (label == tmp)
3996 return true;
3998 if (tablejump_p (jump_insn, NULL, &table))
4000 rtvec vec = table->get_labels ();
4001 int i, veclen = GET_NUM_ELEM (vec);
4003 for (i = 0; i < veclen; ++i)
4004 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4005 return true;
4008 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4009 return true;
4011 return false;
4015 /* Return an estimate of the cost of computing rtx X.
4016 One use is in cse, to decide which expression to keep in the hash table.
4017 Another is in rtl generation, to pick the cheapest way to multiply.
4018 Other uses like the latter are expected in the future.
4020 X appears as operand OPNO in an expression with code OUTER_CODE.
4021 SPEED specifies whether costs optimized for speed or size should
4022 be returned. */
4025 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4026 int opno, bool speed)
4028 int i, j;
4029 enum rtx_code code;
4030 const char *fmt;
4031 int total;
4032 int factor;
4034 if (x == 0)
4035 return 0;
4037 if (GET_MODE (x) != VOIDmode)
4038 mode = GET_MODE (x);
4040 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4041 many insns, taking N times as long. */
4042 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4043 if (factor == 0)
4044 factor = 1;
4046 /* Compute the default costs of certain things.
4047 Note that targetm.rtx_costs can override the defaults. */
4049 code = GET_CODE (x);
4050 switch (code)
4052 case MULT:
4053 /* Multiplication has time-complexity O(N*N), where N is the
4054 number of units (translated from digits) when using
4055 schoolbook long multiplication. */
4056 total = factor * factor * COSTS_N_INSNS (5);
4057 break;
4058 case DIV:
4059 case UDIV:
4060 case MOD:
4061 case UMOD:
4062 /* Similarly, complexity for schoolbook long division. */
4063 total = factor * factor * COSTS_N_INSNS (7);
4064 break;
4065 case USE:
4066 /* Used in combine.c as a marker. */
4067 total = 0;
4068 break;
4069 case SET:
4070 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4071 the mode for the factor. */
4072 mode = GET_MODE (SET_DEST (x));
4073 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4074 if (factor == 0)
4075 factor = 1;
4076 /* Pass through. */
4077 default:
4078 total = factor * COSTS_N_INSNS (1);
4081 switch (code)
4083 case REG:
4084 return 0;
4086 case SUBREG:
4087 total = 0;
4088 /* If we can't tie these modes, make this expensive. The larger
4089 the mode, the more expensive it is. */
4090 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4091 return COSTS_N_INSNS (2 + factor);
4092 break;
4094 default:
4095 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4096 return total;
4097 break;
4100 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4101 which is already in total. */
4103 fmt = GET_RTX_FORMAT (code);
4104 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4105 if (fmt[i] == 'e')
4106 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4107 else if (fmt[i] == 'E')
4108 for (j = 0; j < XVECLEN (x, i); j++)
4109 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4111 return total;
4114 /* Fill in the structure C with information about both speed and size rtx
4115 costs for X, which is operand OPNO in an expression with code OUTER. */
4117 void
4118 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4119 struct full_rtx_costs *c)
4121 c->speed = rtx_cost (x, mode, outer, opno, true);
4122 c->size = rtx_cost (x, mode, outer, opno, false);
4126 /* Return cost of address expression X.
4127 Expect that X is properly formed address reference.
4129 SPEED parameter specify whether costs optimized for speed or size should
4130 be returned. */
4133 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4135 /* We may be asked for cost of various unusual addresses, such as operands
4136 of push instruction. It is not worthwhile to complicate writing
4137 of the target hook by such cases. */
4139 if (!memory_address_addr_space_p (mode, x, as))
4140 return 1000;
4142 return targetm.address_cost (x, mode, as, speed);
4145 /* If the target doesn't override, compute the cost as with arithmetic. */
4148 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4150 return rtx_cost (x, Pmode, MEM, 0, speed);
4154 unsigned HOST_WIDE_INT
4155 nonzero_bits (const_rtx x, machine_mode mode)
4157 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4160 unsigned int
4161 num_sign_bit_copies (const_rtx x, machine_mode mode)
4163 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4166 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4167 It avoids exponential behavior in nonzero_bits1 when X has
4168 identical subexpressions on the first or the second level. */
4170 static unsigned HOST_WIDE_INT
4171 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4172 machine_mode known_mode,
4173 unsigned HOST_WIDE_INT known_ret)
4175 if (x == known_x && mode == known_mode)
4176 return known_ret;
4178 /* Try to find identical subexpressions. If found call
4179 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4180 precomputed value for the subexpression as KNOWN_RET. */
4182 if (ARITHMETIC_P (x))
4184 rtx x0 = XEXP (x, 0);
4185 rtx x1 = XEXP (x, 1);
4187 /* Check the first level. */
4188 if (x0 == x1)
4189 return nonzero_bits1 (x, mode, x0, mode,
4190 cached_nonzero_bits (x0, mode, known_x,
4191 known_mode, known_ret));
4193 /* Check the second level. */
4194 if (ARITHMETIC_P (x0)
4195 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4196 return nonzero_bits1 (x, mode, x1, mode,
4197 cached_nonzero_bits (x1, mode, known_x,
4198 known_mode, known_ret));
4200 if (ARITHMETIC_P (x1)
4201 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4202 return nonzero_bits1 (x, mode, x0, mode,
4203 cached_nonzero_bits (x0, mode, known_x,
4204 known_mode, known_ret));
4207 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4210 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4211 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4212 is less useful. We can't allow both, because that results in exponential
4213 run time recursion. There is a nullstone testcase that triggered
4214 this. This macro avoids accidental uses of num_sign_bit_copies. */
4215 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4217 /* Given an expression, X, compute which bits in X can be nonzero.
4218 We don't care about bits outside of those defined in MODE.
4220 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4221 an arithmetic operation, we can do better. */
4223 static unsigned HOST_WIDE_INT
4224 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4225 machine_mode known_mode,
4226 unsigned HOST_WIDE_INT known_ret)
4228 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4229 unsigned HOST_WIDE_INT inner_nz;
4230 enum rtx_code code;
4231 machine_mode inner_mode;
4232 unsigned int mode_width = GET_MODE_PRECISION (mode);
4234 /* For floating-point and vector values, assume all bits are needed. */
4235 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4236 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4237 return nonzero;
4239 /* If X is wider than MODE, use its mode instead. */
4240 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4242 mode = GET_MODE (x);
4243 nonzero = GET_MODE_MASK (mode);
4244 mode_width = GET_MODE_PRECISION (mode);
4247 if (mode_width > HOST_BITS_PER_WIDE_INT)
4248 /* Our only callers in this case look for single bit values. So
4249 just return the mode mask. Those tests will then be false. */
4250 return nonzero;
4252 /* If MODE is wider than X, but both are a single word for both the host
4253 and target machines, we can compute this from which bits of the
4254 object might be nonzero in its own mode, taking into account the fact
4255 that on many CISC machines, accessing an object in a wider mode
4256 causes the high-order bits to become undefined. So they are
4257 not known to be zero. */
4259 if (!WORD_REGISTER_OPERATIONS
4260 && GET_MODE (x) != VOIDmode
4261 && GET_MODE (x) != mode
4262 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4263 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4264 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4266 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4267 known_x, known_mode, known_ret);
4268 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4269 return nonzero;
4272 code = GET_CODE (x);
4273 switch (code)
4275 case REG:
4276 #if defined(POINTERS_EXTEND_UNSIGNED)
4277 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4278 all the bits above ptr_mode are known to be zero. */
4279 /* As we do not know which address space the pointer is referring to,
4280 we can do this only if the target does not support different pointer
4281 or address modes depending on the address space. */
4282 if (target_default_pointer_address_modes_p ()
4283 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4284 && REG_POINTER (x)
4285 && !targetm.have_ptr_extend ())
4286 nonzero &= GET_MODE_MASK (ptr_mode);
4287 #endif
4289 /* Include declared information about alignment of pointers. */
4290 /* ??? We don't properly preserve REG_POINTER changes across
4291 pointer-to-integer casts, so we can't trust it except for
4292 things that we know must be pointers. See execute/960116-1.c. */
4293 if ((x == stack_pointer_rtx
4294 || x == frame_pointer_rtx
4295 || x == arg_pointer_rtx)
4296 && REGNO_POINTER_ALIGN (REGNO (x)))
4298 unsigned HOST_WIDE_INT alignment
4299 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4301 #ifdef PUSH_ROUNDING
4302 /* If PUSH_ROUNDING is defined, it is possible for the
4303 stack to be momentarily aligned only to that amount,
4304 so we pick the least alignment. */
4305 if (x == stack_pointer_rtx && PUSH_ARGS)
4306 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4307 alignment);
4308 #endif
4310 nonzero &= ~(alignment - 1);
4314 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4315 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4316 known_mode, known_ret,
4317 &nonzero_for_hook);
4319 if (new_rtx)
4320 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4321 known_mode, known_ret);
4323 return nonzero_for_hook;
4326 case CONST_INT:
4327 /* If X is negative in MODE, sign-extend the value. */
4328 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4329 && mode_width < BITS_PER_WORD
4330 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4331 != 0)
4332 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4334 return UINTVAL (x);
4336 case MEM:
4337 #ifdef LOAD_EXTEND_OP
4338 /* In many, if not most, RISC machines, reading a byte from memory
4339 zeros the rest of the register. Noticing that fact saves a lot
4340 of extra zero-extends. */
4341 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4342 nonzero &= GET_MODE_MASK (GET_MODE (x));
4343 #endif
4344 break;
4346 case EQ: case NE:
4347 case UNEQ: case LTGT:
4348 case GT: case GTU: case UNGT:
4349 case LT: case LTU: case UNLT:
4350 case GE: case GEU: case UNGE:
4351 case LE: case LEU: case UNLE:
4352 case UNORDERED: case ORDERED:
4353 /* If this produces an integer result, we know which bits are set.
4354 Code here used to clear bits outside the mode of X, but that is
4355 now done above. */
4356 /* Mind that MODE is the mode the caller wants to look at this
4357 operation in, and not the actual operation mode. We can wind
4358 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4359 that describes the results of a vector compare. */
4360 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4361 && mode_width <= HOST_BITS_PER_WIDE_INT)
4362 nonzero = STORE_FLAG_VALUE;
4363 break;
4365 case NEG:
4366 #if 0
4367 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4368 and num_sign_bit_copies. */
4369 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4370 == GET_MODE_PRECISION (GET_MODE (x)))
4371 nonzero = 1;
4372 #endif
4374 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4375 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4376 break;
4378 case ABS:
4379 #if 0
4380 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4381 and num_sign_bit_copies. */
4382 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4383 == GET_MODE_PRECISION (GET_MODE (x)))
4384 nonzero = 1;
4385 #endif
4386 break;
4388 case TRUNCATE:
4389 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4390 known_x, known_mode, known_ret)
4391 & GET_MODE_MASK (mode));
4392 break;
4394 case ZERO_EXTEND:
4395 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4396 known_x, known_mode, known_ret);
4397 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4398 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4399 break;
4401 case SIGN_EXTEND:
4402 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4403 Otherwise, show all the bits in the outer mode but not the inner
4404 may be nonzero. */
4405 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4406 known_x, known_mode, known_ret);
4407 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4409 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4410 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4411 inner_nz |= (GET_MODE_MASK (mode)
4412 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4415 nonzero &= inner_nz;
4416 break;
4418 case AND:
4419 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4420 known_x, known_mode, known_ret)
4421 & cached_nonzero_bits (XEXP (x, 1), mode,
4422 known_x, known_mode, known_ret);
4423 break;
4425 case XOR: case IOR:
4426 case UMIN: case UMAX: case SMIN: case SMAX:
4428 unsigned HOST_WIDE_INT nonzero0
4429 = cached_nonzero_bits (XEXP (x, 0), mode,
4430 known_x, known_mode, known_ret);
4432 /* Don't call nonzero_bits for the second time if it cannot change
4433 anything. */
4434 if ((nonzero & nonzero0) != nonzero)
4435 nonzero &= nonzero0
4436 | cached_nonzero_bits (XEXP (x, 1), mode,
4437 known_x, known_mode, known_ret);
4439 break;
4441 case PLUS: case MINUS:
4442 case MULT:
4443 case DIV: case UDIV:
4444 case MOD: case UMOD:
4445 /* We can apply the rules of arithmetic to compute the number of
4446 high- and low-order zero bits of these operations. We start by
4447 computing the width (position of the highest-order nonzero bit)
4448 and the number of low-order zero bits for each value. */
4450 unsigned HOST_WIDE_INT nz0
4451 = cached_nonzero_bits (XEXP (x, 0), mode,
4452 known_x, known_mode, known_ret);
4453 unsigned HOST_WIDE_INT nz1
4454 = cached_nonzero_bits (XEXP (x, 1), mode,
4455 known_x, known_mode, known_ret);
4456 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4457 int width0 = floor_log2 (nz0) + 1;
4458 int width1 = floor_log2 (nz1) + 1;
4459 int low0 = floor_log2 (nz0 & -nz0);
4460 int low1 = floor_log2 (nz1 & -nz1);
4461 unsigned HOST_WIDE_INT op0_maybe_minusp
4462 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4463 unsigned HOST_WIDE_INT op1_maybe_minusp
4464 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4465 unsigned int result_width = mode_width;
4466 int result_low = 0;
4468 switch (code)
4470 case PLUS:
4471 result_width = MAX (width0, width1) + 1;
4472 result_low = MIN (low0, low1);
4473 break;
4474 case MINUS:
4475 result_low = MIN (low0, low1);
4476 break;
4477 case MULT:
4478 result_width = width0 + width1;
4479 result_low = low0 + low1;
4480 break;
4481 case DIV:
4482 if (width1 == 0)
4483 break;
4484 if (!op0_maybe_minusp && !op1_maybe_minusp)
4485 result_width = width0;
4486 break;
4487 case UDIV:
4488 if (width1 == 0)
4489 break;
4490 result_width = width0;
4491 break;
4492 case MOD:
4493 if (width1 == 0)
4494 break;
4495 if (!op0_maybe_minusp && !op1_maybe_minusp)
4496 result_width = MIN (width0, width1);
4497 result_low = MIN (low0, low1);
4498 break;
4499 case UMOD:
4500 if (width1 == 0)
4501 break;
4502 result_width = MIN (width0, width1);
4503 result_low = MIN (low0, low1);
4504 break;
4505 default:
4506 gcc_unreachable ();
4509 if (result_width < mode_width)
4510 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4512 if (result_low > 0)
4513 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4515 break;
4517 case ZERO_EXTRACT:
4518 if (CONST_INT_P (XEXP (x, 1))
4519 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4520 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4521 break;
4523 case SUBREG:
4524 /* If this is a SUBREG formed for a promoted variable that has
4525 been zero-extended, we know that at least the high-order bits
4526 are zero, though others might be too. */
4528 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4529 nonzero = GET_MODE_MASK (GET_MODE (x))
4530 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4531 known_x, known_mode, known_ret);
4533 inner_mode = GET_MODE (SUBREG_REG (x));
4534 /* If the inner mode is a single word for both the host and target
4535 machines, we can compute this from which bits of the inner
4536 object might be nonzero. */
4537 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4538 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4540 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4541 known_x, known_mode, known_ret);
4543 #if WORD_REGISTER_OPERATIONS && defined (LOAD_EXTEND_OP)
4544 /* If this is a typical RISC machine, we only have to worry
4545 about the way loads are extended. */
4546 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4547 ? val_signbit_known_set_p (inner_mode, nonzero)
4548 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4549 || !MEM_P (SUBREG_REG (x)))
4550 #endif
4552 /* On many CISC machines, accessing an object in a wider mode
4553 causes the high-order bits to become undefined. So they are
4554 not known to be zero. */
4555 if (GET_MODE_PRECISION (GET_MODE (x))
4556 > GET_MODE_PRECISION (inner_mode))
4557 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4558 & ~GET_MODE_MASK (inner_mode));
4561 break;
4563 case ASHIFTRT:
4564 case LSHIFTRT:
4565 case ASHIFT:
4566 case ROTATE:
4567 /* The nonzero bits are in two classes: any bits within MODE
4568 that aren't in GET_MODE (x) are always significant. The rest of the
4569 nonzero bits are those that are significant in the operand of
4570 the shift when shifted the appropriate number of bits. This
4571 shows that high-order bits are cleared by the right shift and
4572 low-order bits by left shifts. */
4573 if (CONST_INT_P (XEXP (x, 1))
4574 && INTVAL (XEXP (x, 1)) >= 0
4575 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4576 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4578 machine_mode inner_mode = GET_MODE (x);
4579 unsigned int width = GET_MODE_PRECISION (inner_mode);
4580 int count = INTVAL (XEXP (x, 1));
4581 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4582 unsigned HOST_WIDE_INT op_nonzero
4583 = cached_nonzero_bits (XEXP (x, 0), mode,
4584 known_x, known_mode, known_ret);
4585 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4586 unsigned HOST_WIDE_INT outer = 0;
4588 if (mode_width > width)
4589 outer = (op_nonzero & nonzero & ~mode_mask);
4591 if (code == LSHIFTRT)
4592 inner >>= count;
4593 else if (code == ASHIFTRT)
4595 inner >>= count;
4597 /* If the sign bit may have been nonzero before the shift, we
4598 need to mark all the places it could have been copied to
4599 by the shift as possibly nonzero. */
4600 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4601 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4602 << (width - count);
4604 else if (code == ASHIFT)
4605 inner <<= count;
4606 else
4607 inner = ((inner << (count % width)
4608 | (inner >> (width - (count % width)))) & mode_mask);
4610 nonzero &= (outer | inner);
4612 break;
4614 case FFS:
4615 case POPCOUNT:
4616 /* This is at most the number of bits in the mode. */
4617 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4618 break;
4620 case CLZ:
4621 /* If CLZ has a known value at zero, then the nonzero bits are
4622 that value, plus the number of bits in the mode minus one. */
4623 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4624 nonzero
4625 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4626 else
4627 nonzero = -1;
4628 break;
4630 case CTZ:
4631 /* If CTZ has a known value at zero, then the nonzero bits are
4632 that value, plus the number of bits in the mode minus one. */
4633 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4634 nonzero
4635 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4636 else
4637 nonzero = -1;
4638 break;
4640 case CLRSB:
4641 /* This is at most the number of bits in the mode minus 1. */
4642 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4643 break;
4645 case PARITY:
4646 nonzero = 1;
4647 break;
4649 case IF_THEN_ELSE:
4651 unsigned HOST_WIDE_INT nonzero_true
4652 = cached_nonzero_bits (XEXP (x, 1), mode,
4653 known_x, known_mode, known_ret);
4655 /* Don't call nonzero_bits for the second time if it cannot change
4656 anything. */
4657 if ((nonzero & nonzero_true) != nonzero)
4658 nonzero &= nonzero_true
4659 | cached_nonzero_bits (XEXP (x, 2), mode,
4660 known_x, known_mode, known_ret);
4662 break;
4664 default:
4665 break;
4668 return nonzero;
4671 /* See the macro definition above. */
4672 #undef cached_num_sign_bit_copies
4675 /* The function cached_num_sign_bit_copies is a wrapper around
4676 num_sign_bit_copies1. It avoids exponential behavior in
4677 num_sign_bit_copies1 when X has identical subexpressions on the
4678 first or the second level. */
4680 static unsigned int
4681 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4682 machine_mode known_mode,
4683 unsigned int known_ret)
4685 if (x == known_x && mode == known_mode)
4686 return known_ret;
4688 /* Try to find identical subexpressions. If found call
4689 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4690 the precomputed value for the subexpression as KNOWN_RET. */
4692 if (ARITHMETIC_P (x))
4694 rtx x0 = XEXP (x, 0);
4695 rtx x1 = XEXP (x, 1);
4697 /* Check the first level. */
4698 if (x0 == x1)
4699 return
4700 num_sign_bit_copies1 (x, mode, x0, mode,
4701 cached_num_sign_bit_copies (x0, mode, known_x,
4702 known_mode,
4703 known_ret));
4705 /* Check the second level. */
4706 if (ARITHMETIC_P (x0)
4707 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4708 return
4709 num_sign_bit_copies1 (x, mode, x1, mode,
4710 cached_num_sign_bit_copies (x1, mode, known_x,
4711 known_mode,
4712 known_ret));
4714 if (ARITHMETIC_P (x1)
4715 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4716 return
4717 num_sign_bit_copies1 (x, mode, x0, mode,
4718 cached_num_sign_bit_copies (x0, mode, known_x,
4719 known_mode,
4720 known_ret));
4723 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4726 /* Return the number of bits at the high-order end of X that are known to
4727 be equal to the sign bit. X will be used in mode MODE; if MODE is
4728 VOIDmode, X will be used in its own mode. The returned value will always
4729 be between 1 and the number of bits in MODE. */
4731 static unsigned int
4732 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4733 machine_mode known_mode,
4734 unsigned int known_ret)
4736 enum rtx_code code = GET_CODE (x);
4737 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4738 int num0, num1, result;
4739 unsigned HOST_WIDE_INT nonzero;
4741 /* If we weren't given a mode, use the mode of X. If the mode is still
4742 VOIDmode, we don't know anything. Likewise if one of the modes is
4743 floating-point. */
4745 if (mode == VOIDmode)
4746 mode = GET_MODE (x);
4748 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4749 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4750 return 1;
4752 /* For a smaller object, just ignore the high bits. */
4753 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4755 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4756 known_x, known_mode, known_ret);
4757 return MAX (1,
4758 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4761 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4763 /* If this machine does not do all register operations on the entire
4764 register and MODE is wider than the mode of X, we can say nothing
4765 at all about the high-order bits. */
4766 if (!WORD_REGISTER_OPERATIONS)
4767 return 1;
4769 /* Likewise on machines that do, if the mode of the object is smaller
4770 than a word and loads of that size don't sign extend, we can say
4771 nothing about the high order bits. */
4772 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4773 #ifdef LOAD_EXTEND_OP
4774 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4775 #endif
4777 return 1;
4780 switch (code)
4782 case REG:
4784 #if defined(POINTERS_EXTEND_UNSIGNED)
4785 /* If pointers extend signed and this is a pointer in Pmode, say that
4786 all the bits above ptr_mode are known to be sign bit copies. */
4787 /* As we do not know which address space the pointer is referring to,
4788 we can do this only if the target does not support different pointer
4789 or address modes depending on the address space. */
4790 if (target_default_pointer_address_modes_p ()
4791 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4792 && mode == Pmode && REG_POINTER (x)
4793 && !targetm.have_ptr_extend ())
4794 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4795 #endif
4798 unsigned int copies_for_hook = 1, copies = 1;
4799 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4800 known_mode, known_ret,
4801 &copies_for_hook);
4803 if (new_rtx)
4804 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4805 known_mode, known_ret);
4807 if (copies > 1 || copies_for_hook > 1)
4808 return MAX (copies, copies_for_hook);
4810 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4812 break;
4814 case MEM:
4815 #ifdef LOAD_EXTEND_OP
4816 /* Some RISC machines sign-extend all loads of smaller than a word. */
4817 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4818 return MAX (1, ((int) bitwidth
4819 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4820 #endif
4821 break;
4823 case CONST_INT:
4824 /* If the constant is negative, take its 1's complement and remask.
4825 Then see how many zero bits we have. */
4826 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4827 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4828 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4829 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4831 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4833 case SUBREG:
4834 /* If this is a SUBREG for a promoted object that is sign-extended
4835 and we are looking at it in a wider mode, we know that at least the
4836 high-order bits are known to be sign bit copies. */
4838 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4840 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4841 known_x, known_mode, known_ret);
4842 return MAX ((int) bitwidth
4843 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4844 num0);
4847 /* For a smaller object, just ignore the high bits. */
4848 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4850 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4851 known_x, known_mode, known_ret);
4852 return MAX (1, (num0
4853 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4854 - bitwidth)));
4857 #ifdef LOAD_EXTEND_OP
4858 /* For paradoxical SUBREGs on machines where all register operations
4859 affect the entire register, just look inside. Note that we are
4860 passing MODE to the recursive call, so the number of sign bit copies
4861 will remain relative to that mode, not the inner mode. */
4863 /* This works only if loads sign extend. Otherwise, if we get a
4864 reload for the inner part, it may be loaded from the stack, and
4865 then we lose all sign bit copies that existed before the store
4866 to the stack. */
4868 if (WORD_REGISTER_OPERATIONS
4869 && paradoxical_subreg_p (x)
4870 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4871 && MEM_P (SUBREG_REG (x)))
4872 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4873 known_x, known_mode, known_ret);
4874 #endif
4875 break;
4877 case SIGN_EXTRACT:
4878 if (CONST_INT_P (XEXP (x, 1)))
4879 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4880 break;
4882 case SIGN_EXTEND:
4883 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4884 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4885 known_x, known_mode, known_ret));
4887 case TRUNCATE:
4888 /* For a smaller object, just ignore the high bits. */
4889 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4890 known_x, known_mode, known_ret);
4891 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4892 - bitwidth)));
4894 case NOT:
4895 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4896 known_x, known_mode, known_ret);
4898 case ROTATE: case ROTATERT:
4899 /* If we are rotating left by a number of bits less than the number
4900 of sign bit copies, we can just subtract that amount from the
4901 number. */
4902 if (CONST_INT_P (XEXP (x, 1))
4903 && INTVAL (XEXP (x, 1)) >= 0
4904 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4906 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4907 known_x, known_mode, known_ret);
4908 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4909 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4911 break;
4913 case NEG:
4914 /* In general, this subtracts one sign bit copy. But if the value
4915 is known to be positive, the number of sign bit copies is the
4916 same as that of the input. Finally, if the input has just one bit
4917 that might be nonzero, all the bits are copies of the sign bit. */
4918 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4919 known_x, known_mode, known_ret);
4920 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4921 return num0 > 1 ? num0 - 1 : 1;
4923 nonzero = nonzero_bits (XEXP (x, 0), mode);
4924 if (nonzero == 1)
4925 return bitwidth;
4927 if (num0 > 1
4928 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4929 num0--;
4931 return num0;
4933 case IOR: case AND: case XOR:
4934 case SMIN: case SMAX: case UMIN: case UMAX:
4935 /* Logical operations will preserve the number of sign-bit copies.
4936 MIN and MAX operations always return one of the operands. */
4937 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4938 known_x, known_mode, known_ret);
4939 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4940 known_x, known_mode, known_ret);
4942 /* If num1 is clearing some of the top bits then regardless of
4943 the other term, we are guaranteed to have at least that many
4944 high-order zero bits. */
4945 if (code == AND
4946 && num1 > 1
4947 && bitwidth <= HOST_BITS_PER_WIDE_INT
4948 && CONST_INT_P (XEXP (x, 1))
4949 && (UINTVAL (XEXP (x, 1))
4950 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4951 return num1;
4953 /* Similarly for IOR when setting high-order bits. */
4954 if (code == IOR
4955 && num1 > 1
4956 && bitwidth <= HOST_BITS_PER_WIDE_INT
4957 && CONST_INT_P (XEXP (x, 1))
4958 && (UINTVAL (XEXP (x, 1))
4959 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4960 return num1;
4962 return MIN (num0, num1);
4964 case PLUS: case MINUS:
4965 /* For addition and subtraction, we can have a 1-bit carry. However,
4966 if we are subtracting 1 from a positive number, there will not
4967 be such a carry. Furthermore, if the positive number is known to
4968 be 0 or 1, we know the result is either -1 or 0. */
4970 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4971 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4973 nonzero = nonzero_bits (XEXP (x, 0), mode);
4974 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4975 return (nonzero == 1 || nonzero == 0 ? bitwidth
4976 : bitwidth - floor_log2 (nonzero) - 1);
4979 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4980 known_x, known_mode, known_ret);
4981 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4982 known_x, known_mode, known_ret);
4983 result = MAX (1, MIN (num0, num1) - 1);
4985 return result;
4987 case MULT:
4988 /* The number of bits of the product is the sum of the number of
4989 bits of both terms. However, unless one of the terms if known
4990 to be positive, we must allow for an additional bit since negating
4991 a negative number can remove one sign bit copy. */
4993 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4994 known_x, known_mode, known_ret);
4995 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4996 known_x, known_mode, known_ret);
4998 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4999 if (result > 0
5000 && (bitwidth > HOST_BITS_PER_WIDE_INT
5001 || (((nonzero_bits (XEXP (x, 0), mode)
5002 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5003 && ((nonzero_bits (XEXP (x, 1), mode)
5004 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
5005 != 0))))
5006 result--;
5008 return MAX (1, result);
5010 case UDIV:
5011 /* The result must be <= the first operand. If the first operand
5012 has the high bit set, we know nothing about the number of sign
5013 bit copies. */
5014 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5015 return 1;
5016 else if ((nonzero_bits (XEXP (x, 0), mode)
5017 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5018 return 1;
5019 else
5020 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5021 known_x, known_mode, known_ret);
5023 case UMOD:
5024 /* The result must be <= the second operand. If the second operand
5025 has (or just might have) the high bit set, we know nothing about
5026 the number of sign bit copies. */
5027 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5028 return 1;
5029 else if ((nonzero_bits (XEXP (x, 1), mode)
5030 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5031 return 1;
5032 else
5033 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5034 known_x, known_mode, known_ret);
5036 case DIV:
5037 /* Similar to unsigned division, except that we have to worry about
5038 the case where the divisor is negative, in which case we have
5039 to add 1. */
5040 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5041 known_x, known_mode, known_ret);
5042 if (result > 1
5043 && (bitwidth > HOST_BITS_PER_WIDE_INT
5044 || (nonzero_bits (XEXP (x, 1), mode)
5045 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5046 result--;
5048 return result;
5050 case MOD:
5051 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5052 known_x, known_mode, known_ret);
5053 if (result > 1
5054 && (bitwidth > HOST_BITS_PER_WIDE_INT
5055 || (nonzero_bits (XEXP (x, 1), mode)
5056 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5057 result--;
5059 return result;
5061 case ASHIFTRT:
5062 /* Shifts by a constant add to the number of bits equal to the
5063 sign bit. */
5064 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5065 known_x, known_mode, known_ret);
5066 if (CONST_INT_P (XEXP (x, 1))
5067 && INTVAL (XEXP (x, 1)) > 0
5068 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5069 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5071 return num0;
5073 case ASHIFT:
5074 /* Left shifts destroy copies. */
5075 if (!CONST_INT_P (XEXP (x, 1))
5076 || INTVAL (XEXP (x, 1)) < 0
5077 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5078 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5079 return 1;
5081 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5082 known_x, known_mode, known_ret);
5083 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5085 case IF_THEN_ELSE:
5086 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5087 known_x, known_mode, known_ret);
5088 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5089 known_x, known_mode, known_ret);
5090 return MIN (num0, num1);
5092 case EQ: case NE: case GE: case GT: case LE: case LT:
5093 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5094 case GEU: case GTU: case LEU: case LTU:
5095 case UNORDERED: case ORDERED:
5096 /* If the constant is negative, take its 1's complement and remask.
5097 Then see how many zero bits we have. */
5098 nonzero = STORE_FLAG_VALUE;
5099 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5100 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5101 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5103 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5105 default:
5106 break;
5109 /* If we haven't been able to figure it out by one of the above rules,
5110 see if some of the high-order bits are known to be zero. If so,
5111 count those bits and return one less than that amount. If we can't
5112 safely compute the mask for this mode, always return BITWIDTH. */
5114 bitwidth = GET_MODE_PRECISION (mode);
5115 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5116 return 1;
5118 nonzero = nonzero_bits (x, mode);
5119 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
5120 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5123 /* Calculate the rtx_cost of a single instruction. A return value of
5124 zero indicates an instruction pattern without a known cost. */
5127 insn_rtx_cost (rtx pat, bool speed)
5129 int i, cost;
5130 rtx set;
5132 /* Extract the single set rtx from the instruction pattern.
5133 We can't use single_set since we only have the pattern. */
5134 if (GET_CODE (pat) == SET)
5135 set = pat;
5136 else if (GET_CODE (pat) == PARALLEL)
5138 set = NULL_RTX;
5139 for (i = 0; i < XVECLEN (pat, 0); i++)
5141 rtx x = XVECEXP (pat, 0, i);
5142 if (GET_CODE (x) == SET)
5144 if (set)
5145 return 0;
5146 set = x;
5149 if (!set)
5150 return 0;
5152 else
5153 return 0;
5155 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5156 return cost > 0 ? cost : COSTS_N_INSNS (1);
5159 /* Returns estimate on cost of computing SEQ. */
5161 unsigned
5162 seq_cost (const rtx_insn *seq, bool speed)
5164 unsigned cost = 0;
5165 rtx set;
5167 for (; seq; seq = NEXT_INSN (seq))
5169 set = single_set (seq);
5170 if (set)
5171 cost += set_rtx_cost (set, speed);
5172 else
5173 cost++;
5176 return cost;
5179 /* Given an insn INSN and condition COND, return the condition in a
5180 canonical form to simplify testing by callers. Specifically:
5182 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5183 (2) Both operands will be machine operands; (cc0) will have been replaced.
5184 (3) If an operand is a constant, it will be the second operand.
5185 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5186 for GE, GEU, and LEU.
5188 If the condition cannot be understood, or is an inequality floating-point
5189 comparison which needs to be reversed, 0 will be returned.
5191 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5193 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5194 insn used in locating the condition was found. If a replacement test
5195 of the condition is desired, it should be placed in front of that
5196 insn and we will be sure that the inputs are still valid.
5198 If WANT_REG is nonzero, we wish the condition to be relative to that
5199 register, if possible. Therefore, do not canonicalize the condition
5200 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5201 to be a compare to a CC mode register.
5203 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5204 and at INSN. */
5207 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5208 rtx_insn **earliest,
5209 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5211 enum rtx_code code;
5212 rtx_insn *prev = insn;
5213 const_rtx set;
5214 rtx tem;
5215 rtx op0, op1;
5216 int reverse_code = 0;
5217 machine_mode mode;
5218 basic_block bb = BLOCK_FOR_INSN (insn);
5220 code = GET_CODE (cond);
5221 mode = GET_MODE (cond);
5222 op0 = XEXP (cond, 0);
5223 op1 = XEXP (cond, 1);
5225 if (reverse)
5226 code = reversed_comparison_code (cond, insn);
5227 if (code == UNKNOWN)
5228 return 0;
5230 if (earliest)
5231 *earliest = insn;
5233 /* If we are comparing a register with zero, see if the register is set
5234 in the previous insn to a COMPARE or a comparison operation. Perform
5235 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5236 in cse.c */
5238 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5239 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5240 && op1 == CONST0_RTX (GET_MODE (op0))
5241 && op0 != want_reg)
5243 /* Set nonzero when we find something of interest. */
5244 rtx x = 0;
5246 /* If comparison with cc0, import actual comparison from compare
5247 insn. */
5248 if (op0 == cc0_rtx)
5250 if ((prev = prev_nonnote_insn (prev)) == 0
5251 || !NONJUMP_INSN_P (prev)
5252 || (set = single_set (prev)) == 0
5253 || SET_DEST (set) != cc0_rtx)
5254 return 0;
5256 op0 = SET_SRC (set);
5257 op1 = CONST0_RTX (GET_MODE (op0));
5258 if (earliest)
5259 *earliest = prev;
5262 /* If this is a COMPARE, pick up the two things being compared. */
5263 if (GET_CODE (op0) == COMPARE)
5265 op1 = XEXP (op0, 1);
5266 op0 = XEXP (op0, 0);
5267 continue;
5269 else if (!REG_P (op0))
5270 break;
5272 /* Go back to the previous insn. Stop if it is not an INSN. We also
5273 stop if it isn't a single set or if it has a REG_INC note because
5274 we don't want to bother dealing with it. */
5276 prev = prev_nonnote_nondebug_insn (prev);
5278 if (prev == 0
5279 || !NONJUMP_INSN_P (prev)
5280 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5281 /* In cfglayout mode, there do not have to be labels at the
5282 beginning of a block, or jumps at the end, so the previous
5283 conditions would not stop us when we reach bb boundary. */
5284 || BLOCK_FOR_INSN (prev) != bb)
5285 break;
5287 set = set_of (op0, prev);
5289 if (set
5290 && (GET_CODE (set) != SET
5291 || !rtx_equal_p (SET_DEST (set), op0)))
5292 break;
5294 /* If this is setting OP0, get what it sets it to if it looks
5295 relevant. */
5296 if (set)
5298 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5299 #ifdef FLOAT_STORE_FLAG_VALUE
5300 REAL_VALUE_TYPE fsfv;
5301 #endif
5303 /* ??? We may not combine comparisons done in a CCmode with
5304 comparisons not done in a CCmode. This is to aid targets
5305 like Alpha that have an IEEE compliant EQ instruction, and
5306 a non-IEEE compliant BEQ instruction. The use of CCmode is
5307 actually artificial, simply to prevent the combination, but
5308 should not affect other platforms.
5310 However, we must allow VOIDmode comparisons to match either
5311 CCmode or non-CCmode comparison, because some ports have
5312 modeless comparisons inside branch patterns.
5314 ??? This mode check should perhaps look more like the mode check
5315 in simplify_comparison in combine. */
5316 if (((GET_MODE_CLASS (mode) == MODE_CC)
5317 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5318 && mode != VOIDmode
5319 && inner_mode != VOIDmode)
5320 break;
5321 if (GET_CODE (SET_SRC (set)) == COMPARE
5322 || (((code == NE
5323 || (code == LT
5324 && val_signbit_known_set_p (inner_mode,
5325 STORE_FLAG_VALUE))
5326 #ifdef FLOAT_STORE_FLAG_VALUE
5327 || (code == LT
5328 && SCALAR_FLOAT_MODE_P (inner_mode)
5329 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5330 REAL_VALUE_NEGATIVE (fsfv)))
5331 #endif
5333 && COMPARISON_P (SET_SRC (set))))
5334 x = SET_SRC (set);
5335 else if (((code == EQ
5336 || (code == GE
5337 && val_signbit_known_set_p (inner_mode,
5338 STORE_FLAG_VALUE))
5339 #ifdef FLOAT_STORE_FLAG_VALUE
5340 || (code == GE
5341 && SCALAR_FLOAT_MODE_P (inner_mode)
5342 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5343 REAL_VALUE_NEGATIVE (fsfv)))
5344 #endif
5346 && COMPARISON_P (SET_SRC (set)))
5348 reverse_code = 1;
5349 x = SET_SRC (set);
5351 else if ((code == EQ || code == NE)
5352 && GET_CODE (SET_SRC (set)) == XOR)
5353 /* Handle sequences like:
5355 (set op0 (xor X Y))
5356 ...(eq|ne op0 (const_int 0))...
5358 in which case:
5360 (eq op0 (const_int 0)) reduces to (eq X Y)
5361 (ne op0 (const_int 0)) reduces to (ne X Y)
5363 This is the form used by MIPS16, for example. */
5364 x = SET_SRC (set);
5365 else
5366 break;
5369 else if (reg_set_p (op0, prev))
5370 /* If this sets OP0, but not directly, we have to give up. */
5371 break;
5373 if (x)
5375 /* If the caller is expecting the condition to be valid at INSN,
5376 make sure X doesn't change before INSN. */
5377 if (valid_at_insn_p)
5378 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5379 break;
5380 if (COMPARISON_P (x))
5381 code = GET_CODE (x);
5382 if (reverse_code)
5384 code = reversed_comparison_code (x, prev);
5385 if (code == UNKNOWN)
5386 return 0;
5387 reverse_code = 0;
5390 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5391 if (earliest)
5392 *earliest = prev;
5396 /* If constant is first, put it last. */
5397 if (CONSTANT_P (op0))
5398 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5400 /* If OP0 is the result of a comparison, we weren't able to find what
5401 was really being compared, so fail. */
5402 if (!allow_cc_mode
5403 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5404 return 0;
5406 /* Canonicalize any ordered comparison with integers involving equality
5407 if we can do computations in the relevant mode and we do not
5408 overflow. */
5410 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5411 && CONST_INT_P (op1)
5412 && GET_MODE (op0) != VOIDmode
5413 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5415 HOST_WIDE_INT const_val = INTVAL (op1);
5416 unsigned HOST_WIDE_INT uconst_val = const_val;
5417 unsigned HOST_WIDE_INT max_val
5418 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5420 switch (code)
5422 case LE:
5423 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5424 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5425 break;
5427 /* When cross-compiling, const_val might be sign-extended from
5428 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5429 case GE:
5430 if ((const_val & max_val)
5431 != ((unsigned HOST_WIDE_INT) 1
5432 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5433 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5434 break;
5436 case LEU:
5437 if (uconst_val < max_val)
5438 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5439 break;
5441 case GEU:
5442 if (uconst_val != 0)
5443 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5444 break;
5446 default:
5447 break;
5451 /* Never return CC0; return zero instead. */
5452 if (CC0_P (op0))
5453 return 0;
5455 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5458 /* Given a jump insn JUMP, return the condition that will cause it to branch
5459 to its JUMP_LABEL. If the condition cannot be understood, or is an
5460 inequality floating-point comparison which needs to be reversed, 0 will
5461 be returned.
5463 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5464 insn used in locating the condition was found. If a replacement test
5465 of the condition is desired, it should be placed in front of that
5466 insn and we will be sure that the inputs are still valid. If EARLIEST
5467 is null, the returned condition will be valid at INSN.
5469 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5470 compare CC mode register.
5472 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5475 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5476 int valid_at_insn_p)
5478 rtx cond;
5479 int reverse;
5480 rtx set;
5482 /* If this is not a standard conditional jump, we can't parse it. */
5483 if (!JUMP_P (jump)
5484 || ! any_condjump_p (jump))
5485 return 0;
5486 set = pc_set (jump);
5488 cond = XEXP (SET_SRC (set), 0);
5490 /* If this branches to JUMP_LABEL when the condition is false, reverse
5491 the condition. */
5492 reverse
5493 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5494 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5496 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5497 allow_cc_mode, valid_at_insn_p);
5500 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5501 TARGET_MODE_REP_EXTENDED.
5503 Note that we assume that the property of
5504 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5505 narrower than mode B. I.e., if A is a mode narrower than B then in
5506 order to be able to operate on it in mode B, mode A needs to
5507 satisfy the requirements set by the representation of mode B. */
5509 static void
5510 init_num_sign_bit_copies_in_rep (void)
5512 machine_mode mode, in_mode;
5514 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5515 in_mode = GET_MODE_WIDER_MODE (mode))
5516 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5517 mode = GET_MODE_WIDER_MODE (mode))
5519 machine_mode i;
5521 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5522 extends to the next widest mode. */
5523 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5524 || GET_MODE_WIDER_MODE (mode) == in_mode);
5526 /* We are in in_mode. Count how many bits outside of mode
5527 have to be copies of the sign-bit. */
5528 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5530 machine_mode wider = GET_MODE_WIDER_MODE (i);
5532 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5533 /* We can only check sign-bit copies starting from the
5534 top-bit. In order to be able to check the bits we
5535 have already seen we pretend that subsequent bits
5536 have to be sign-bit copies too. */
5537 || num_sign_bit_copies_in_rep [in_mode][mode])
5538 num_sign_bit_copies_in_rep [in_mode][mode]
5539 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5544 /* Suppose that truncation from the machine mode of X to MODE is not a
5545 no-op. See if there is anything special about X so that we can
5546 assume it already contains a truncated value of MODE. */
5548 bool
5549 truncated_to_mode (machine_mode mode, const_rtx x)
5551 /* This register has already been used in MODE without explicit
5552 truncation. */
5553 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5554 return true;
5556 /* See if we already satisfy the requirements of MODE. If yes we
5557 can just switch to MODE. */
5558 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5559 && (num_sign_bit_copies (x, GET_MODE (x))
5560 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5561 return true;
5563 return false;
5566 /* Return true if RTX code CODE has a single sequence of zero or more
5567 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5568 entry in that case. */
5570 static bool
5571 setup_reg_subrtx_bounds (unsigned int code)
5573 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5574 unsigned int i = 0;
5575 for (; format[i] != 'e'; ++i)
5577 if (!format[i])
5578 /* No subrtxes. Leave start and count as 0. */
5579 return true;
5580 if (format[i] == 'E' || format[i] == 'V')
5581 return false;
5584 /* Record the sequence of 'e's. */
5585 rtx_all_subrtx_bounds[code].start = i;
5587 ++i;
5588 while (format[i] == 'e');
5589 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5590 /* rtl-iter.h relies on this. */
5591 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5593 for (; format[i]; ++i)
5594 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5595 return false;
5597 return true;
5600 /* Initialize rtx_all_subrtx_bounds. */
5601 void
5602 init_rtlanal (void)
5604 int i;
5605 for (i = 0; i < NUM_RTX_CODE; i++)
5607 if (!setup_reg_subrtx_bounds (i))
5608 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5609 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5610 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5613 init_num_sign_bit_copies_in_rep ();
5616 /* Check whether this is a constant pool constant. */
5617 bool
5618 constant_pool_constant_p (rtx x)
5620 x = avoid_constant_pool_reference (x);
5621 return CONST_DOUBLE_P (x);
5624 /* If M is a bitmask that selects a field of low-order bits within an item but
5625 not the entire word, return the length of the field. Return -1 otherwise.
5626 M is used in machine mode MODE. */
5629 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5631 if (mode != VOIDmode)
5633 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5634 return -1;
5635 m &= GET_MODE_MASK (mode);
5638 return exact_log2 (m + 1);
5641 /* Return the mode of MEM's address. */
5643 machine_mode
5644 get_address_mode (rtx mem)
5646 machine_mode mode;
5648 gcc_assert (MEM_P (mem));
5649 mode = GET_MODE (XEXP (mem, 0));
5650 if (mode != VOIDmode)
5651 return mode;
5652 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5655 /* Split up a CONST_DOUBLE or integer constant rtx
5656 into two rtx's for single words,
5657 storing in *FIRST the word that comes first in memory in the target
5658 and in *SECOND the other.
5660 TODO: This function needs to be rewritten to work on any size
5661 integer. */
5663 void
5664 split_double (rtx value, rtx *first, rtx *second)
5666 if (CONST_INT_P (value))
5668 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5670 /* In this case the CONST_INT holds both target words.
5671 Extract the bits from it into two word-sized pieces.
5672 Sign extend each half to HOST_WIDE_INT. */
5673 unsigned HOST_WIDE_INT low, high;
5674 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5675 unsigned bits_per_word = BITS_PER_WORD;
5677 /* Set sign_bit to the most significant bit of a word. */
5678 sign_bit = 1;
5679 sign_bit <<= bits_per_word - 1;
5681 /* Set mask so that all bits of the word are set. We could
5682 have used 1 << BITS_PER_WORD instead of basing the
5683 calculation on sign_bit. However, on machines where
5684 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5685 compiler warning, even though the code would never be
5686 executed. */
5687 mask = sign_bit << 1;
5688 mask--;
5690 /* Set sign_extend as any remaining bits. */
5691 sign_extend = ~mask;
5693 /* Pick the lower word and sign-extend it. */
5694 low = INTVAL (value);
5695 low &= mask;
5696 if (low & sign_bit)
5697 low |= sign_extend;
5699 /* Pick the higher word, shifted to the least significant
5700 bits, and sign-extend it. */
5701 high = INTVAL (value);
5702 high >>= bits_per_word - 1;
5703 high >>= 1;
5704 high &= mask;
5705 if (high & sign_bit)
5706 high |= sign_extend;
5708 /* Store the words in the target machine order. */
5709 if (WORDS_BIG_ENDIAN)
5711 *first = GEN_INT (high);
5712 *second = GEN_INT (low);
5714 else
5716 *first = GEN_INT (low);
5717 *second = GEN_INT (high);
5720 else
5722 /* The rule for using CONST_INT for a wider mode
5723 is that we regard the value as signed.
5724 So sign-extend it. */
5725 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5726 if (WORDS_BIG_ENDIAN)
5728 *first = high;
5729 *second = value;
5731 else
5733 *first = value;
5734 *second = high;
5738 else if (GET_CODE (value) == CONST_WIDE_INT)
5740 /* All of this is scary code and needs to be converted to
5741 properly work with any size integer. */
5742 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5743 if (WORDS_BIG_ENDIAN)
5745 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5746 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5748 else
5750 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5751 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5754 else if (!CONST_DOUBLE_P (value))
5756 if (WORDS_BIG_ENDIAN)
5758 *first = const0_rtx;
5759 *second = value;
5761 else
5763 *first = value;
5764 *second = const0_rtx;
5767 else if (GET_MODE (value) == VOIDmode
5768 /* This is the old way we did CONST_DOUBLE integers. */
5769 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5771 /* In an integer, the words are defined as most and least significant.
5772 So order them by the target's convention. */
5773 if (WORDS_BIG_ENDIAN)
5775 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5776 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5778 else
5780 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5781 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5784 else
5786 long l[2];
5788 /* Note, this converts the REAL_VALUE_TYPE to the target's
5789 format, splits up the floating point double and outputs
5790 exactly 32 bits of it into each of l[0] and l[1] --
5791 not necessarily BITS_PER_WORD bits. */
5792 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5794 /* If 32 bits is an entire word for the target, but not for the host,
5795 then sign-extend on the host so that the number will look the same
5796 way on the host that it would on the target. See for instance
5797 simplify_unary_operation. The #if is needed to avoid compiler
5798 warnings. */
5800 #if HOST_BITS_PER_LONG > 32
5801 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5803 if (l[0] & ((long) 1 << 31))
5804 l[0] |= ((unsigned long) (-1) << 32);
5805 if (l[1] & ((long) 1 << 31))
5806 l[1] |= ((unsigned long) (-1) << 32);
5808 #endif
5810 *first = GEN_INT (l[0]);
5811 *second = GEN_INT (l[1]);
5815 /* Return true if X is a sign_extract or zero_extract from the least
5816 significant bit. */
5818 static bool
5819 lsb_bitfield_op_p (rtx x)
5821 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5823 machine_mode mode = GET_MODE (XEXP (x, 0));
5824 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5825 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5827 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5829 return false;
5832 /* Strip outer address "mutations" from LOC and return a pointer to the
5833 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5834 stripped expression there.
5836 "Mutations" either convert between modes or apply some kind of
5837 extension, truncation or alignment. */
5839 rtx *
5840 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5842 for (;;)
5844 enum rtx_code code = GET_CODE (*loc);
5845 if (GET_RTX_CLASS (code) == RTX_UNARY)
5846 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5847 used to convert between pointer sizes. */
5848 loc = &XEXP (*loc, 0);
5849 else if (lsb_bitfield_op_p (*loc))
5850 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5851 acts as a combined truncation and extension. */
5852 loc = &XEXP (*loc, 0);
5853 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5854 /* (and ... (const_int -X)) is used to align to X bytes. */
5855 loc = &XEXP (*loc, 0);
5856 else if (code == SUBREG
5857 && !OBJECT_P (SUBREG_REG (*loc))
5858 && subreg_lowpart_p (*loc))
5859 /* (subreg (operator ...) ...) inside and is used for mode
5860 conversion too. */
5861 loc = &SUBREG_REG (*loc);
5862 else
5863 return loc;
5864 if (outer_code)
5865 *outer_code = code;
5869 /* Return true if CODE applies some kind of scale. The scaled value is
5870 is the first operand and the scale is the second. */
5872 static bool
5873 binary_scale_code_p (enum rtx_code code)
5875 return (code == MULT
5876 || code == ASHIFT
5877 /* Needed by ARM targets. */
5878 || code == ASHIFTRT
5879 || code == LSHIFTRT
5880 || code == ROTATE
5881 || code == ROTATERT);
5884 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5885 (see address_info). Return null otherwise. */
5887 static rtx *
5888 get_base_term (rtx *inner)
5890 if (GET_CODE (*inner) == LO_SUM)
5891 inner = strip_address_mutations (&XEXP (*inner, 0));
5892 if (REG_P (*inner)
5893 || MEM_P (*inner)
5894 || GET_CODE (*inner) == SUBREG
5895 || GET_CODE (*inner) == SCRATCH)
5896 return inner;
5897 return 0;
5900 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5901 (see address_info). Return null otherwise. */
5903 static rtx *
5904 get_index_term (rtx *inner)
5906 /* At present, only constant scales are allowed. */
5907 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5908 inner = strip_address_mutations (&XEXP (*inner, 0));
5909 if (REG_P (*inner)
5910 || MEM_P (*inner)
5911 || GET_CODE (*inner) == SUBREG
5912 || GET_CODE (*inner) == SCRATCH)
5913 return inner;
5914 return 0;
5917 /* Set the segment part of address INFO to LOC, given that INNER is the
5918 unmutated value. */
5920 static void
5921 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5923 gcc_assert (!info->segment);
5924 info->segment = loc;
5925 info->segment_term = inner;
5928 /* Set the base part of address INFO to LOC, given that INNER is the
5929 unmutated value. */
5931 static void
5932 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5934 gcc_assert (!info->base);
5935 info->base = loc;
5936 info->base_term = inner;
5939 /* Set the index part of address INFO to LOC, given that INNER is the
5940 unmutated value. */
5942 static void
5943 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5945 gcc_assert (!info->index);
5946 info->index = loc;
5947 info->index_term = inner;
5950 /* Set the displacement part of address INFO to LOC, given that INNER
5951 is the constant term. */
5953 static void
5954 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5956 gcc_assert (!info->disp);
5957 info->disp = loc;
5958 info->disp_term = inner;
5961 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5962 rest of INFO accordingly. */
5964 static void
5965 decompose_incdec_address (struct address_info *info)
5967 info->autoinc_p = true;
5969 rtx *base = &XEXP (*info->inner, 0);
5970 set_address_base (info, base, base);
5971 gcc_checking_assert (info->base == info->base_term);
5973 /* These addresses are only valid when the size of the addressed
5974 value is known. */
5975 gcc_checking_assert (info->mode != VOIDmode);
5978 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5979 of INFO accordingly. */
5981 static void
5982 decompose_automod_address (struct address_info *info)
5984 info->autoinc_p = true;
5986 rtx *base = &XEXP (*info->inner, 0);
5987 set_address_base (info, base, base);
5988 gcc_checking_assert (info->base == info->base_term);
5990 rtx plus = XEXP (*info->inner, 1);
5991 gcc_assert (GET_CODE (plus) == PLUS);
5993 info->base_term2 = &XEXP (plus, 0);
5994 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5996 rtx *step = &XEXP (plus, 1);
5997 rtx *inner_step = strip_address_mutations (step);
5998 if (CONSTANT_P (*inner_step))
5999 set_address_disp (info, step, inner_step);
6000 else
6001 set_address_index (info, step, inner_step);
6004 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6005 values in [PTR, END). Return a pointer to the end of the used array. */
6007 static rtx **
6008 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6010 rtx x = *loc;
6011 if (GET_CODE (x) == PLUS)
6013 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6014 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6016 else
6018 gcc_assert (ptr != end);
6019 *ptr++ = loc;
6021 return ptr;
6024 /* Evaluate the likelihood of X being a base or index value, returning
6025 positive if it is likely to be a base, negative if it is likely to be
6026 an index, and 0 if we can't tell. Make the magnitude of the return
6027 value reflect the amount of confidence we have in the answer.
6029 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6031 static int
6032 baseness (rtx x, machine_mode mode, addr_space_t as,
6033 enum rtx_code outer_code, enum rtx_code index_code)
6035 /* Believe *_POINTER unless the address shape requires otherwise. */
6036 if (REG_P (x) && REG_POINTER (x))
6037 return 2;
6038 if (MEM_P (x) && MEM_POINTER (x))
6039 return 2;
6041 if (REG_P (x) && HARD_REGISTER_P (x))
6043 /* X is a hard register. If it only fits one of the base
6044 or index classes, choose that interpretation. */
6045 int regno = REGNO (x);
6046 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6047 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6048 if (base_p != index_p)
6049 return base_p ? 1 : -1;
6051 return 0;
6054 /* INFO->INNER describes a normal, non-automodified address.
6055 Fill in the rest of INFO accordingly. */
6057 static void
6058 decompose_normal_address (struct address_info *info)
6060 /* Treat the address as the sum of up to four values. */
6061 rtx *ops[4];
6062 size_t n_ops = extract_plus_operands (info->inner, ops,
6063 ops + ARRAY_SIZE (ops)) - ops;
6065 /* If there is more than one component, any base component is in a PLUS. */
6066 if (n_ops > 1)
6067 info->base_outer_code = PLUS;
6069 /* Try to classify each sum operand now. Leave those that could be
6070 either a base or an index in OPS. */
6071 rtx *inner_ops[4];
6072 size_t out = 0;
6073 for (size_t in = 0; in < n_ops; ++in)
6075 rtx *loc = ops[in];
6076 rtx *inner = strip_address_mutations (loc);
6077 if (CONSTANT_P (*inner))
6078 set_address_disp (info, loc, inner);
6079 else if (GET_CODE (*inner) == UNSPEC)
6080 set_address_segment (info, loc, inner);
6081 else
6083 /* The only other possibilities are a base or an index. */
6084 rtx *base_term = get_base_term (inner);
6085 rtx *index_term = get_index_term (inner);
6086 gcc_assert (base_term || index_term);
6087 if (!base_term)
6088 set_address_index (info, loc, index_term);
6089 else if (!index_term)
6090 set_address_base (info, loc, base_term);
6091 else
6093 gcc_assert (base_term == index_term);
6094 ops[out] = loc;
6095 inner_ops[out] = base_term;
6096 ++out;
6101 /* Classify the remaining OPS members as bases and indexes. */
6102 if (out == 1)
6104 /* If we haven't seen a base or an index yet, assume that this is
6105 the base. If we were confident that another term was the base
6106 or index, treat the remaining operand as the other kind. */
6107 if (!info->base)
6108 set_address_base (info, ops[0], inner_ops[0]);
6109 else
6110 set_address_index (info, ops[0], inner_ops[0]);
6112 else if (out == 2)
6114 /* In the event of a tie, assume the base comes first. */
6115 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6116 GET_CODE (*ops[1]))
6117 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6118 GET_CODE (*ops[0])))
6120 set_address_base (info, ops[0], inner_ops[0]);
6121 set_address_index (info, ops[1], inner_ops[1]);
6123 else
6125 set_address_base (info, ops[1], inner_ops[1]);
6126 set_address_index (info, ops[0], inner_ops[0]);
6129 else
6130 gcc_assert (out == 0);
6133 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6134 or VOIDmode if not known. AS is the address space associated with LOC.
6135 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6137 void
6138 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6139 addr_space_t as, enum rtx_code outer_code)
6141 memset (info, 0, sizeof (*info));
6142 info->mode = mode;
6143 info->as = as;
6144 info->addr_outer_code = outer_code;
6145 info->outer = loc;
6146 info->inner = strip_address_mutations (loc, &outer_code);
6147 info->base_outer_code = outer_code;
6148 switch (GET_CODE (*info->inner))
6150 case PRE_DEC:
6151 case PRE_INC:
6152 case POST_DEC:
6153 case POST_INC:
6154 decompose_incdec_address (info);
6155 break;
6157 case PRE_MODIFY:
6158 case POST_MODIFY:
6159 decompose_automod_address (info);
6160 break;
6162 default:
6163 decompose_normal_address (info);
6164 break;
6168 /* Describe address operand LOC in INFO. */
6170 void
6171 decompose_lea_address (struct address_info *info, rtx *loc)
6173 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6176 /* Describe the address of MEM X in INFO. */
6178 void
6179 decompose_mem_address (struct address_info *info, rtx x)
6181 gcc_assert (MEM_P (x));
6182 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6183 MEM_ADDR_SPACE (x), MEM);
6186 /* Update INFO after a change to the address it describes. */
6188 void
6189 update_address (struct address_info *info)
6191 decompose_address (info, info->outer, info->mode, info->as,
6192 info->addr_outer_code);
6195 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6196 more complicated than that. */
6198 HOST_WIDE_INT
6199 get_index_scale (const struct address_info *info)
6201 rtx index = *info->index;
6202 if (GET_CODE (index) == MULT
6203 && CONST_INT_P (XEXP (index, 1))
6204 && info->index_term == &XEXP (index, 0))
6205 return INTVAL (XEXP (index, 1));
6207 if (GET_CODE (index) == ASHIFT
6208 && CONST_INT_P (XEXP (index, 1))
6209 && info->index_term == &XEXP (index, 0))
6210 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6212 if (info->index == info->index_term)
6213 return 1;
6215 return 0;
6218 /* Return the "index code" of INFO, in the form required by
6219 ok_for_base_p_1. */
6221 enum rtx_code
6222 get_index_code (const struct address_info *info)
6224 if (info->index)
6225 return GET_CODE (*info->index);
6227 if (info->disp)
6228 return GET_CODE (*info->disp);
6230 return SCRATCH;
6233 /* Return true if RTL X contains a SYMBOL_REF. */
6235 bool
6236 contains_symbol_ref_p (const_rtx x)
6238 subrtx_iterator::array_type array;
6239 FOR_EACH_SUBRTX (iter, array, x, ALL)
6240 if (SYMBOL_REF_P (*iter))
6241 return true;
6243 return false;
6246 /* Return true if X contains a thread-local symbol. */
6248 bool
6249 tls_referenced_p (const_rtx x)
6251 if (!targetm.have_tls)
6252 return false;
6254 subrtx_iterator::array_type array;
6255 FOR_EACH_SUBRTX (iter, array, x, ALL)
6256 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6257 return true;
6258 return false;