Daily bump.
[official-gcc.git] / gcc / rtlanal.c
blob65962e8e331494c64c6340bd2408defc57384957
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "insn-config.h"
29 #include "recog.h"
30 #include "target.h"
31 #include "output.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "predict.h"
37 #include "basic-block.h"
38 #include "df.h"
39 #include "symtab.h"
40 #include "tree.h"
41 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 #include "addresses.h"
43 #include "rtl-iter.h"
45 /* Forward declarations */
46 static void set_of_1 (rtx, const_rtx, void *);
47 static bool covers_regno_p (const_rtx, unsigned int);
48 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
49 static int computed_jump_p_1 (const_rtx);
50 static void parms_set (rtx, const_rtx, void *);
52 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
53 const_rtx, machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
56 const_rtx, machine_mode,
57 unsigned HOST_WIDE_INT);
58 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
59 machine_mode,
60 unsigned int);
61 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
62 machine_mode, unsigned int);
64 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
65 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
67 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
68 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
69 SIGN_EXTEND then while narrowing we also have to enforce the
70 representation and sign-extend the value to mode DESTINATION_REP.
72 If the value is already sign-extended to DESTINATION_REP mode we
73 can just switch to DESTINATION mode on it. For each pair of
74 integral modes SOURCE and DESTINATION, when truncating from SOURCE
75 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
76 contains the number of high-order bits in SOURCE that have to be
77 copies of the sign-bit so that we can do this mode-switch to
78 DESTINATION. */
80 static unsigned int
81 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
83 /* Store X into index I of ARRAY. ARRAY is known to have at least I
84 elements. Return the new base of ARRAY. */
86 template <typename T>
87 typename T::value_type *
88 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
89 value_type *base,
90 size_t i, value_type x)
92 if (base == array.stack)
94 if (i < LOCAL_ELEMS)
96 base[i] = x;
97 return base;
99 gcc_checking_assert (i == LOCAL_ELEMS);
100 /* A previous iteration might also have moved from the stack to the
101 heap, in which case the heap array will already be big enough. */
102 if (vec_safe_length (array.heap) <= i)
103 vec_safe_grow (array.heap, i + 1);
104 base = array.heap->address ();
105 memcpy (base, array.stack, sizeof (array.stack));
106 base[LOCAL_ELEMS] = x;
107 return base;
109 unsigned int length = array.heap->length ();
110 if (length > i)
112 gcc_checking_assert (base == array.heap->address ());
113 base[i] = x;
114 return base;
116 else
118 gcc_checking_assert (i == length);
119 vec_safe_push (array.heap, x);
120 return array.heap->address ();
124 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
125 number of elements added to the worklist. */
127 template <typename T>
128 size_t
129 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
130 value_type *base,
131 size_t end, rtx_type x)
133 enum rtx_code code = GET_CODE (x);
134 const char *format = GET_RTX_FORMAT (code);
135 size_t orig_end = end;
136 if (__builtin_expect (INSN_P (x), false))
138 /* Put the pattern at the top of the queue, since that's what
139 we're likely to want most. It also allows for the SEQUENCE
140 code below. */
141 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
142 if (format[i] == 'e')
144 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
145 if (__builtin_expect (end < LOCAL_ELEMS, true))
146 base[end++] = subx;
147 else
148 base = add_single_to_queue (array, base, end++, subx);
151 else
152 for (int i = 0; format[i]; ++i)
153 if (format[i] == 'e')
155 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
156 if (__builtin_expect (end < LOCAL_ELEMS, true))
157 base[end++] = subx;
158 else
159 base = add_single_to_queue (array, base, end++, subx);
161 else if (format[i] == 'E')
163 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
164 rtx *vec = x->u.fld[i].rt_rtvec->elem;
165 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
166 for (unsigned int j = 0; j < length; j++)
167 base[end++] = T::get_value (vec[j]);
168 else
169 for (unsigned int j = 0; j < length; j++)
170 base = add_single_to_queue (array, base, end++,
171 T::get_value (vec[j]));
172 if (code == SEQUENCE && end == length)
173 /* If the subrtxes of the sequence fill the entire array then
174 we know that no other parts of a containing insn are queued.
175 The caller is therefore iterating over the sequence as a
176 PATTERN (...), so we also want the patterns of the
177 subinstructions. */
178 for (unsigned int j = 0; j < length; j++)
180 typename T::rtx_type x = T::get_rtx (base[j]);
181 if (INSN_P (x))
182 base[j] = T::get_value (PATTERN (x));
185 return end - orig_end;
188 template <typename T>
189 void
190 generic_subrtx_iterator <T>::free_array (array_type &array)
192 vec_free (array.heap);
195 template <typename T>
196 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
198 template class generic_subrtx_iterator <const_rtx_accessor>;
199 template class generic_subrtx_iterator <rtx_var_accessor>;
200 template class generic_subrtx_iterator <rtx_ptr_accessor>;
202 /* Return 1 if the value of X is unstable
203 (would be different at a different point in the program).
204 The frame pointer, arg pointer, etc. are considered stable
205 (within one function) and so is anything marked `unchanging'. */
208 rtx_unstable_p (const_rtx x)
210 const RTX_CODE code = GET_CODE (x);
211 int i;
212 const char *fmt;
214 switch (code)
216 case MEM:
217 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
219 case CONST:
220 CASE_CONST_ANY:
221 case SYMBOL_REF:
222 case LABEL_REF:
223 return 0;
225 case REG:
226 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
227 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
228 /* The arg pointer varies if it is not a fixed register. */
229 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
230 return 0;
231 /* ??? When call-clobbered, the value is stable modulo the restore
232 that must happen after a call. This currently screws up local-alloc
233 into believing that the restore is not needed. */
234 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
235 return 0;
236 return 1;
238 case ASM_OPERANDS:
239 if (MEM_VOLATILE_P (x))
240 return 1;
242 /* Fall through. */
244 default:
245 break;
248 fmt = GET_RTX_FORMAT (code);
249 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
250 if (fmt[i] == 'e')
252 if (rtx_unstable_p (XEXP (x, i)))
253 return 1;
255 else if (fmt[i] == 'E')
257 int j;
258 for (j = 0; j < XVECLEN (x, i); j++)
259 if (rtx_unstable_p (XVECEXP (x, i, j)))
260 return 1;
263 return 0;
266 /* Return 1 if X has a value that can vary even between two
267 executions of the program. 0 means X can be compared reliably
268 against certain constants or near-constants.
269 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
270 zero, we are slightly more conservative.
271 The frame pointer and the arg pointer are considered constant. */
273 bool
274 rtx_varies_p (const_rtx x, bool for_alias)
276 RTX_CODE code;
277 int i;
278 const char *fmt;
280 if (!x)
281 return 0;
283 code = GET_CODE (x);
284 switch (code)
286 case MEM:
287 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
289 case CONST:
290 CASE_CONST_ANY:
291 case SYMBOL_REF:
292 case LABEL_REF:
293 return 0;
295 case REG:
296 /* Note that we have to test for the actual rtx used for the frame
297 and arg pointers and not just the register number in case we have
298 eliminated the frame and/or arg pointer and are using it
299 for pseudos. */
300 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
301 /* The arg pointer varies if it is not a fixed register. */
302 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
303 return 0;
304 if (x == pic_offset_table_rtx
305 /* ??? When call-clobbered, the value is stable modulo the restore
306 that must happen after a call. This currently screws up
307 local-alloc into believing that the restore is not needed, so we
308 must return 0 only if we are called from alias analysis. */
309 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
310 return 0;
311 return 1;
313 case LO_SUM:
314 /* The operand 0 of a LO_SUM is considered constant
315 (in fact it is related specifically to operand 1)
316 during alias analysis. */
317 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
318 || rtx_varies_p (XEXP (x, 1), for_alias);
320 case ASM_OPERANDS:
321 if (MEM_VOLATILE_P (x))
322 return 1;
324 /* Fall through. */
326 default:
327 break;
330 fmt = GET_RTX_FORMAT (code);
331 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
332 if (fmt[i] == 'e')
334 if (rtx_varies_p (XEXP (x, i), for_alias))
335 return 1;
337 else if (fmt[i] == 'E')
339 int j;
340 for (j = 0; j < XVECLEN (x, i); j++)
341 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
342 return 1;
345 return 0;
348 /* Compute an approximation for the offset between the register
349 FROM and TO for the current function, as it was at the start
350 of the routine. */
352 static HOST_WIDE_INT
353 get_initial_register_offset (int from, int to)
355 #ifdef ELIMINABLE_REGS
356 static const struct elim_table_t
358 const int from;
359 const int to;
360 } table[] = ELIMINABLE_REGS;
361 HOST_WIDE_INT offset1, offset2;
362 unsigned int i, j;
364 if (to == from)
365 return 0;
367 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
368 before the reload pass. We need to give at least
369 an estimation for the resulting frame size. */
370 if (! reload_completed)
372 offset1 = crtl->outgoing_args_size + get_frame_size ();
373 #if !STACK_GROWS_DOWNWARD
374 offset1 = - offset1;
375 #endif
376 if (to == STACK_POINTER_REGNUM)
377 return offset1;
378 else if (from == STACK_POINTER_REGNUM)
379 return - offset1;
380 else
381 return 0;
384 for (i = 0; i < ARRAY_SIZE (table); i++)
385 if (table[i].from == from)
387 if (table[i].to == to)
389 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
390 offset1);
391 return offset1;
393 for (j = 0; j < ARRAY_SIZE (table); j++)
395 if (table[j].to == to
396 && table[j].from == table[i].to)
398 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
399 offset1);
400 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
401 offset2);
402 return offset1 + offset2;
404 if (table[j].from == to
405 && table[j].to == table[i].to)
407 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
408 offset1);
409 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
410 offset2);
411 return offset1 - offset2;
415 else if (table[i].to == from)
417 if (table[i].from == to)
419 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
420 offset1);
421 return - offset1;
423 for (j = 0; j < ARRAY_SIZE (table); j++)
425 if (table[j].to == to
426 && table[j].from == table[i].from)
428 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
429 offset1);
430 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
431 offset2);
432 return - offset1 + offset2;
434 if (table[j].from == to
435 && table[j].to == table[i].from)
437 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
438 offset1);
439 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
440 offset2);
441 return - offset1 - offset2;
446 /* If the requested register combination was not found,
447 try a different more simple combination. */
448 if (from == ARG_POINTER_REGNUM)
449 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
450 else if (to == ARG_POINTER_REGNUM)
451 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
452 else if (from == HARD_FRAME_POINTER_REGNUM)
453 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
454 else if (to == HARD_FRAME_POINTER_REGNUM)
455 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
456 else
457 return 0;
459 #else
460 HOST_WIDE_INT offset;
462 if (to == from)
463 return 0;
465 if (reload_completed)
467 INITIAL_FRAME_POINTER_OFFSET (offset);
469 else
471 offset = crtl->outgoing_args_size + get_frame_size ();
472 #if !STACK_GROWS_DOWNWARD
473 offset = - offset;
474 #endif
477 if (to == STACK_POINTER_REGNUM)
478 return offset;
479 else if (from == STACK_POINTER_REGNUM)
480 return - offset;
481 else
482 return 0;
484 #endif
487 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
488 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
489 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
490 references on strict alignment machines. */
492 static int
493 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
494 machine_mode mode, bool unaligned_mems)
496 enum rtx_code code = GET_CODE (x);
498 /* The offset must be a multiple of the mode size if we are considering
499 unaligned memory references on strict alignment machines. */
500 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
502 HOST_WIDE_INT actual_offset = offset;
504 #ifdef SPARC_STACK_BOUNDARY_HACK
505 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
506 the real alignment of %sp. However, when it does this, the
507 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
508 if (SPARC_STACK_BOUNDARY_HACK
509 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
510 actual_offset -= STACK_POINTER_OFFSET;
511 #endif
513 if (actual_offset % GET_MODE_SIZE (mode) != 0)
514 return 1;
517 switch (code)
519 case SYMBOL_REF:
520 if (SYMBOL_REF_WEAK (x))
521 return 1;
522 if (!CONSTANT_POOL_ADDRESS_P (x))
524 tree decl;
525 HOST_WIDE_INT decl_size;
527 if (offset < 0)
528 return 1;
529 if (size == 0)
530 size = GET_MODE_SIZE (mode);
531 if (size == 0)
532 return offset != 0;
534 /* If the size of the access or of the symbol is unknown,
535 assume the worst. */
536 decl = SYMBOL_REF_DECL (x);
538 /* Else check that the access is in bounds. TODO: restructure
539 expr_size/tree_expr_size/int_expr_size and just use the latter. */
540 if (!decl)
541 decl_size = -1;
542 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
543 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
544 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
545 : -1);
546 else if (TREE_CODE (decl) == STRING_CST)
547 decl_size = TREE_STRING_LENGTH (decl);
548 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
549 decl_size = int_size_in_bytes (TREE_TYPE (decl));
550 else
551 decl_size = -1;
553 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
556 return 0;
558 case LABEL_REF:
559 return 0;
561 case REG:
562 /* Stack references are assumed not to trap, but we need to deal with
563 nonsensical offsets. */
564 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
565 || x == stack_pointer_rtx
566 /* The arg pointer varies if it is not a fixed register. */
567 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
569 #ifdef RED_ZONE_SIZE
570 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
571 #else
572 HOST_WIDE_INT red_zone_size = 0;
573 #endif
574 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
575 / BITS_PER_UNIT;
576 HOST_WIDE_INT low_bound, high_bound;
578 if (size == 0)
579 size = GET_MODE_SIZE (mode);
581 if (x == frame_pointer_rtx)
583 if (FRAME_GROWS_DOWNWARD)
585 high_bound = STARTING_FRAME_OFFSET;
586 low_bound = high_bound - get_frame_size ();
588 else
590 low_bound = STARTING_FRAME_OFFSET;
591 high_bound = low_bound + get_frame_size ();
594 else if (x == hard_frame_pointer_rtx)
596 HOST_WIDE_INT sp_offset
597 = get_initial_register_offset (STACK_POINTER_REGNUM,
598 HARD_FRAME_POINTER_REGNUM);
599 HOST_WIDE_INT ap_offset
600 = get_initial_register_offset (ARG_POINTER_REGNUM,
601 HARD_FRAME_POINTER_REGNUM);
603 #if STACK_GROWS_DOWNWARD
604 low_bound = sp_offset - red_zone_size - stack_boundary;
605 high_bound = ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl)
607 #if !ARGS_GROW_DOWNWARD
608 + crtl->args.size
609 #endif
610 + stack_boundary;
611 #else
612 high_bound = sp_offset + red_zone_size + stack_boundary;
613 low_bound = ap_offset
614 + FIRST_PARM_OFFSET (current_function_decl)
615 #if ARGS_GROW_DOWNWARD
616 - crtl->args.size
617 #endif
618 - stack_boundary;
619 #endif
621 else if (x == stack_pointer_rtx)
623 HOST_WIDE_INT ap_offset
624 = get_initial_register_offset (ARG_POINTER_REGNUM,
625 STACK_POINTER_REGNUM);
627 #if STACK_GROWS_DOWNWARD
628 low_bound = - red_zone_size - stack_boundary;
629 high_bound = ap_offset
630 + FIRST_PARM_OFFSET (current_function_decl)
631 #if !ARGS_GROW_DOWNWARD
632 + crtl->args.size
633 #endif
634 + stack_boundary;
635 #else
636 high_bound = red_zone_size + stack_boundary;
637 low_bound = ap_offset
638 + FIRST_PARM_OFFSET (current_function_decl)
639 #if ARGS_GROW_DOWNWARD
640 - crtl->args.size
641 #endif
642 - stack_boundary;
643 #endif
645 else
647 /* We assume that accesses are safe to at least the
648 next stack boundary.
649 Examples are varargs and __builtin_return_address. */
650 #if ARGS_GROW_DOWNWARD
651 high_bound = FIRST_PARM_OFFSET (current_function_decl)
652 + stack_boundary;
653 low_bound = FIRST_PARM_OFFSET (current_function_decl)
654 - crtl->args.size - stack_boundary;
655 #else
656 low_bound = FIRST_PARM_OFFSET (current_function_decl)
657 - stack_boundary;
658 high_bound = FIRST_PARM_OFFSET (current_function_decl)
659 + crtl->args.size + stack_boundary;
660 #endif
663 if (offset >= low_bound && offset <= high_bound - size)
664 return 0;
665 return 1;
667 /* All of the virtual frame registers are stack references. */
668 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
669 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
670 return 0;
671 return 1;
673 case CONST:
674 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
675 mode, unaligned_mems);
677 case PLUS:
678 /* An address is assumed not to trap if:
679 - it is the pic register plus a constant. */
680 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
681 return 0;
683 /* - or it is an address that can't trap plus a constant integer. */
684 if (CONST_INT_P (XEXP (x, 1))
685 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
686 size, mode, unaligned_mems))
687 return 0;
689 return 1;
691 case LO_SUM:
692 case PRE_MODIFY:
693 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
694 mode, unaligned_mems);
696 case PRE_DEC:
697 case PRE_INC:
698 case POST_DEC:
699 case POST_INC:
700 case POST_MODIFY:
701 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
702 mode, unaligned_mems);
704 default:
705 break;
708 /* If it isn't one of the case above, it can cause a trap. */
709 return 1;
712 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
715 rtx_addr_can_trap_p (const_rtx x)
717 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
720 /* Return true if X is an address that is known to not be zero. */
722 bool
723 nonzero_address_p (const_rtx x)
725 const enum rtx_code code = GET_CODE (x);
727 switch (code)
729 case SYMBOL_REF:
730 return !SYMBOL_REF_WEAK (x);
732 case LABEL_REF:
733 return true;
735 case REG:
736 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
737 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
738 || x == stack_pointer_rtx
739 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
740 return true;
741 /* All of the virtual frame registers are stack references. */
742 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
743 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
744 return true;
745 return false;
747 case CONST:
748 return nonzero_address_p (XEXP (x, 0));
750 case PLUS:
751 /* Handle PIC references. */
752 if (XEXP (x, 0) == pic_offset_table_rtx
753 && CONSTANT_P (XEXP (x, 1)))
754 return true;
755 return false;
757 case PRE_MODIFY:
758 /* Similar to the above; allow positive offsets. Further, since
759 auto-inc is only allowed in memories, the register must be a
760 pointer. */
761 if (CONST_INT_P (XEXP (x, 1))
762 && INTVAL (XEXP (x, 1)) > 0)
763 return true;
764 return nonzero_address_p (XEXP (x, 0));
766 case PRE_INC:
767 /* Similarly. Further, the offset is always positive. */
768 return true;
770 case PRE_DEC:
771 case POST_DEC:
772 case POST_INC:
773 case POST_MODIFY:
774 return nonzero_address_p (XEXP (x, 0));
776 case LO_SUM:
777 return nonzero_address_p (XEXP (x, 1));
779 default:
780 break;
783 /* If it isn't one of the case above, might be zero. */
784 return false;
787 /* Return 1 if X refers to a memory location whose address
788 cannot be compared reliably with constant addresses,
789 or if X refers to a BLKmode memory object.
790 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
791 zero, we are slightly more conservative. */
793 bool
794 rtx_addr_varies_p (const_rtx x, bool for_alias)
796 enum rtx_code code;
797 int i;
798 const char *fmt;
800 if (x == 0)
801 return 0;
803 code = GET_CODE (x);
804 if (code == MEM)
805 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
807 fmt = GET_RTX_FORMAT (code);
808 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
809 if (fmt[i] == 'e')
811 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
812 return 1;
814 else if (fmt[i] == 'E')
816 int j;
817 for (j = 0; j < XVECLEN (x, i); j++)
818 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
819 return 1;
821 return 0;
824 /* Return the CALL in X if there is one. */
827 get_call_rtx_from (rtx x)
829 if (INSN_P (x))
830 x = PATTERN (x);
831 if (GET_CODE (x) == PARALLEL)
832 x = XVECEXP (x, 0, 0);
833 if (GET_CODE (x) == SET)
834 x = SET_SRC (x);
835 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
836 return x;
837 return NULL_RTX;
840 /* Return the value of the integer term in X, if one is apparent;
841 otherwise return 0.
842 Only obvious integer terms are detected.
843 This is used in cse.c with the `related_value' field. */
845 HOST_WIDE_INT
846 get_integer_term (const_rtx x)
848 if (GET_CODE (x) == CONST)
849 x = XEXP (x, 0);
851 if (GET_CODE (x) == MINUS
852 && CONST_INT_P (XEXP (x, 1)))
853 return - INTVAL (XEXP (x, 1));
854 if (GET_CODE (x) == PLUS
855 && CONST_INT_P (XEXP (x, 1)))
856 return INTVAL (XEXP (x, 1));
857 return 0;
860 /* If X is a constant, return the value sans apparent integer term;
861 otherwise return 0.
862 Only obvious integer terms are detected. */
865 get_related_value (const_rtx x)
867 if (GET_CODE (x) != CONST)
868 return 0;
869 x = XEXP (x, 0);
870 if (GET_CODE (x) == PLUS
871 && CONST_INT_P (XEXP (x, 1)))
872 return XEXP (x, 0);
873 else if (GET_CODE (x) == MINUS
874 && CONST_INT_P (XEXP (x, 1)))
875 return XEXP (x, 0);
876 return 0;
879 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
880 to somewhere in the same object or object_block as SYMBOL. */
882 bool
883 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
885 tree decl;
887 if (GET_CODE (symbol) != SYMBOL_REF)
888 return false;
890 if (offset == 0)
891 return true;
893 if (offset > 0)
895 if (CONSTANT_POOL_ADDRESS_P (symbol)
896 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
897 return true;
899 decl = SYMBOL_REF_DECL (symbol);
900 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
901 return true;
904 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
905 && SYMBOL_REF_BLOCK (symbol)
906 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
907 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
908 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
909 return true;
911 return false;
914 /* Split X into a base and a constant offset, storing them in *BASE_OUT
915 and *OFFSET_OUT respectively. */
917 void
918 split_const (rtx x, rtx *base_out, rtx *offset_out)
920 if (GET_CODE (x) == CONST)
922 x = XEXP (x, 0);
923 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
925 *base_out = XEXP (x, 0);
926 *offset_out = XEXP (x, 1);
927 return;
930 *base_out = x;
931 *offset_out = const0_rtx;
934 /* Return the number of places FIND appears within X. If COUNT_DEST is
935 zero, we do not count occurrences inside the destination of a SET. */
938 count_occurrences (const_rtx x, const_rtx find, int count_dest)
940 int i, j;
941 enum rtx_code code;
942 const char *format_ptr;
943 int count;
945 if (x == find)
946 return 1;
948 code = GET_CODE (x);
950 switch (code)
952 case REG:
953 CASE_CONST_ANY:
954 case SYMBOL_REF:
955 case CODE_LABEL:
956 case PC:
957 case CC0:
958 return 0;
960 case EXPR_LIST:
961 count = count_occurrences (XEXP (x, 0), find, count_dest);
962 if (XEXP (x, 1))
963 count += count_occurrences (XEXP (x, 1), find, count_dest);
964 return count;
966 case MEM:
967 if (MEM_P (find) && rtx_equal_p (x, find))
968 return 1;
969 break;
971 case SET:
972 if (SET_DEST (x) == find && ! count_dest)
973 return count_occurrences (SET_SRC (x), find, count_dest);
974 break;
976 default:
977 break;
980 format_ptr = GET_RTX_FORMAT (code);
981 count = 0;
983 for (i = 0; i < GET_RTX_LENGTH (code); i++)
985 switch (*format_ptr++)
987 case 'e':
988 count += count_occurrences (XEXP (x, i), find, count_dest);
989 break;
991 case 'E':
992 for (j = 0; j < XVECLEN (x, i); j++)
993 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
994 break;
997 return count;
1001 /* Return TRUE if OP is a register or subreg of a register that
1002 holds an unsigned quantity. Otherwise, return FALSE. */
1004 bool
1005 unsigned_reg_p (rtx op)
1007 if (REG_P (op)
1008 && REG_EXPR (op)
1009 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1010 return true;
1012 if (GET_CODE (op) == SUBREG
1013 && SUBREG_PROMOTED_SIGN (op))
1014 return true;
1016 return false;
1020 /* Nonzero if register REG appears somewhere within IN.
1021 Also works if REG is not a register; in this case it checks
1022 for a subexpression of IN that is Lisp "equal" to REG. */
1025 reg_mentioned_p (const_rtx reg, const_rtx in)
1027 const char *fmt;
1028 int i;
1029 enum rtx_code code;
1031 if (in == 0)
1032 return 0;
1034 if (reg == in)
1035 return 1;
1037 if (GET_CODE (in) == LABEL_REF)
1038 return reg == LABEL_REF_LABEL (in);
1040 code = GET_CODE (in);
1042 switch (code)
1044 /* Compare registers by number. */
1045 case REG:
1046 return REG_P (reg) && REGNO (in) == REGNO (reg);
1048 /* These codes have no constituent expressions
1049 and are unique. */
1050 case SCRATCH:
1051 case CC0:
1052 case PC:
1053 return 0;
1055 CASE_CONST_ANY:
1056 /* These are kept unique for a given value. */
1057 return 0;
1059 default:
1060 break;
1063 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1064 return 1;
1066 fmt = GET_RTX_FORMAT (code);
1068 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1070 if (fmt[i] == 'E')
1072 int j;
1073 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1074 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1075 return 1;
1077 else if (fmt[i] == 'e'
1078 && reg_mentioned_p (reg, XEXP (in, i)))
1079 return 1;
1081 return 0;
1084 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1085 no CODE_LABEL insn. */
1088 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1090 rtx_insn *p;
1091 if (beg == end)
1092 return 0;
1093 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1094 if (LABEL_P (p))
1095 return 0;
1096 return 1;
1099 /* Nonzero if register REG is used in an insn between
1100 FROM_INSN and TO_INSN (exclusive of those two). */
1103 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1104 const rtx_insn *to_insn)
1106 rtx_insn *insn;
1108 if (from_insn == to_insn)
1109 return 0;
1111 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1112 if (NONDEBUG_INSN_P (insn)
1113 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1114 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1115 return 1;
1116 return 0;
1119 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1120 is entirely replaced by a new value and the only use is as a SET_DEST,
1121 we do not consider it a reference. */
1124 reg_referenced_p (const_rtx x, const_rtx body)
1126 int i;
1128 switch (GET_CODE (body))
1130 case SET:
1131 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1132 return 1;
1134 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1135 of a REG that occupies all of the REG, the insn references X if
1136 it is mentioned in the destination. */
1137 if (GET_CODE (SET_DEST (body)) != CC0
1138 && GET_CODE (SET_DEST (body)) != PC
1139 && !REG_P (SET_DEST (body))
1140 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1141 && REG_P (SUBREG_REG (SET_DEST (body)))
1142 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1143 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1144 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1145 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1146 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1147 return 1;
1148 return 0;
1150 case ASM_OPERANDS:
1151 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1152 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1153 return 1;
1154 return 0;
1156 case CALL:
1157 case USE:
1158 case IF_THEN_ELSE:
1159 return reg_overlap_mentioned_p (x, body);
1161 case TRAP_IF:
1162 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1164 case PREFETCH:
1165 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1167 case UNSPEC:
1168 case UNSPEC_VOLATILE:
1169 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1170 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1171 return 1;
1172 return 0;
1174 case PARALLEL:
1175 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1176 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1177 return 1;
1178 return 0;
1180 case CLOBBER:
1181 if (MEM_P (XEXP (body, 0)))
1182 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1183 return 1;
1184 return 0;
1186 case COND_EXEC:
1187 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1188 return 1;
1189 return reg_referenced_p (x, COND_EXEC_CODE (body));
1191 default:
1192 return 0;
1196 /* Nonzero if register REG is set or clobbered in an insn between
1197 FROM_INSN and TO_INSN (exclusive of those two). */
1200 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1201 const rtx_insn *to_insn)
1203 const rtx_insn *insn;
1205 if (from_insn == to_insn)
1206 return 0;
1208 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1209 if (INSN_P (insn) && reg_set_p (reg, insn))
1210 return 1;
1211 return 0;
1214 /* Internals of reg_set_between_p. */
1216 reg_set_p (const_rtx reg, const_rtx insn)
1218 /* After delay slot handling, call and branch insns might be in a
1219 sequence. Check all the elements there. */
1220 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1222 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1223 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1224 return true;
1226 return false;
1229 /* We can be passed an insn or part of one. If we are passed an insn,
1230 check if a side-effect of the insn clobbers REG. */
1231 if (INSN_P (insn)
1232 && (FIND_REG_INC_NOTE (insn, reg)
1233 || (CALL_P (insn)
1234 && ((REG_P (reg)
1235 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1236 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1237 GET_MODE (reg), REGNO (reg)))
1238 || MEM_P (reg)
1239 || find_reg_fusage (insn, CLOBBER, reg)))))
1240 return true;
1242 return set_of (reg, insn) != NULL_RTX;
1245 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1246 only if none of them are modified between START and END. Return 1 if
1247 X contains a MEM; this routine does use memory aliasing. */
1250 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1252 const enum rtx_code code = GET_CODE (x);
1253 const char *fmt;
1254 int i, j;
1255 rtx_insn *insn;
1257 if (start == end)
1258 return 0;
1260 switch (code)
1262 CASE_CONST_ANY:
1263 case CONST:
1264 case SYMBOL_REF:
1265 case LABEL_REF:
1266 return 0;
1268 case PC:
1269 case CC0:
1270 return 1;
1272 case MEM:
1273 if (modified_between_p (XEXP (x, 0), start, end))
1274 return 1;
1275 if (MEM_READONLY_P (x))
1276 return 0;
1277 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1278 if (memory_modified_in_insn_p (x, insn))
1279 return 1;
1280 return 0;
1281 break;
1283 case REG:
1284 return reg_set_between_p (x, start, end);
1286 default:
1287 break;
1290 fmt = GET_RTX_FORMAT (code);
1291 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1293 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1294 return 1;
1296 else if (fmt[i] == 'E')
1297 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1298 if (modified_between_p (XVECEXP (x, i, j), start, end))
1299 return 1;
1302 return 0;
1305 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1306 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1307 does use memory aliasing. */
1310 modified_in_p (const_rtx x, const_rtx insn)
1312 const enum rtx_code code = GET_CODE (x);
1313 const char *fmt;
1314 int i, j;
1316 switch (code)
1318 CASE_CONST_ANY:
1319 case CONST:
1320 case SYMBOL_REF:
1321 case LABEL_REF:
1322 return 0;
1324 case PC:
1325 case CC0:
1326 return 1;
1328 case MEM:
1329 if (modified_in_p (XEXP (x, 0), insn))
1330 return 1;
1331 if (MEM_READONLY_P (x))
1332 return 0;
1333 if (memory_modified_in_insn_p (x, insn))
1334 return 1;
1335 return 0;
1336 break;
1338 case REG:
1339 return reg_set_p (x, insn);
1341 default:
1342 break;
1345 fmt = GET_RTX_FORMAT (code);
1346 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1348 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1349 return 1;
1351 else if (fmt[i] == 'E')
1352 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1353 if (modified_in_p (XVECEXP (x, i, j), insn))
1354 return 1;
1357 return 0;
1360 /* Helper function for set_of. */
1361 struct set_of_data
1363 const_rtx found;
1364 const_rtx pat;
1367 static void
1368 set_of_1 (rtx x, const_rtx pat, void *data1)
1370 struct set_of_data *const data = (struct set_of_data *) (data1);
1371 if (rtx_equal_p (x, data->pat)
1372 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1373 data->found = pat;
1376 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1377 (either directly or via STRICT_LOW_PART and similar modifiers). */
1378 const_rtx
1379 set_of (const_rtx pat, const_rtx insn)
1381 struct set_of_data data;
1382 data.found = NULL_RTX;
1383 data.pat = pat;
1384 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1385 return data.found;
1388 /* Add all hard register in X to *PSET. */
1389 void
1390 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1392 subrtx_iterator::array_type array;
1393 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1395 const_rtx x = *iter;
1396 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1397 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1401 /* This function, called through note_stores, collects sets and
1402 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1403 by DATA. */
1404 void
1405 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1407 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1408 if (REG_P (x) && HARD_REGISTER_P (x))
1409 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1412 /* Examine INSN, and compute the set of hard registers written by it.
1413 Store it in *PSET. Should only be called after reload. */
1414 void
1415 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1417 rtx link;
1419 CLEAR_HARD_REG_SET (*pset);
1420 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1421 if (CALL_P (insn))
1423 if (implicit)
1424 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1426 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1427 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1429 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1430 if (REG_NOTE_KIND (link) == REG_INC)
1431 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1434 /* Like record_hard_reg_sets, but called through note_uses. */
1435 void
1436 record_hard_reg_uses (rtx *px, void *data)
1438 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1441 /* Given an INSN, return a SET expression if this insn has only a single SET.
1442 It may also have CLOBBERs, USEs, or SET whose output
1443 will not be used, which we ignore. */
1446 single_set_2 (const rtx_insn *insn, const_rtx pat)
1448 rtx set = NULL;
1449 int set_verified = 1;
1450 int i;
1452 if (GET_CODE (pat) == PARALLEL)
1454 for (i = 0; i < XVECLEN (pat, 0); i++)
1456 rtx sub = XVECEXP (pat, 0, i);
1457 switch (GET_CODE (sub))
1459 case USE:
1460 case CLOBBER:
1461 break;
1463 case SET:
1464 /* We can consider insns having multiple sets, where all
1465 but one are dead as single set insns. In common case
1466 only single set is present in the pattern so we want
1467 to avoid checking for REG_UNUSED notes unless necessary.
1469 When we reach set first time, we just expect this is
1470 the single set we are looking for and only when more
1471 sets are found in the insn, we check them. */
1472 if (!set_verified)
1474 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1475 && !side_effects_p (set))
1476 set = NULL;
1477 else
1478 set_verified = 1;
1480 if (!set)
1481 set = sub, set_verified = 0;
1482 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1483 || side_effects_p (sub))
1484 return NULL_RTX;
1485 break;
1487 default:
1488 return NULL_RTX;
1492 return set;
1495 /* Given an INSN, return nonzero if it has more than one SET, else return
1496 zero. */
1499 multiple_sets (const_rtx insn)
1501 int found;
1502 int i;
1504 /* INSN must be an insn. */
1505 if (! INSN_P (insn))
1506 return 0;
1508 /* Only a PARALLEL can have multiple SETs. */
1509 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1511 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1512 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1514 /* If we have already found a SET, then return now. */
1515 if (found)
1516 return 1;
1517 else
1518 found = 1;
1522 /* Either zero or one SET. */
1523 return 0;
1526 /* Return nonzero if the destination of SET equals the source
1527 and there are no side effects. */
1530 set_noop_p (const_rtx set)
1532 rtx src = SET_SRC (set);
1533 rtx dst = SET_DEST (set);
1535 if (dst == pc_rtx && src == pc_rtx)
1536 return 1;
1538 if (MEM_P (dst) && MEM_P (src))
1539 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1541 if (GET_CODE (dst) == ZERO_EXTRACT)
1542 return rtx_equal_p (XEXP (dst, 0), src)
1543 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1544 && !side_effects_p (src);
1546 if (GET_CODE (dst) == STRICT_LOW_PART)
1547 dst = XEXP (dst, 0);
1549 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1551 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1552 return 0;
1553 src = SUBREG_REG (src);
1554 dst = SUBREG_REG (dst);
1557 /* It is a NOOP if destination overlaps with selected src vector
1558 elements. */
1559 if (GET_CODE (src) == VEC_SELECT
1560 && REG_P (XEXP (src, 0)) && REG_P (dst)
1561 && HARD_REGISTER_P (XEXP (src, 0))
1562 && HARD_REGISTER_P (dst))
1564 int i;
1565 rtx par = XEXP (src, 1);
1566 rtx src0 = XEXP (src, 0);
1567 int c0 = INTVAL (XVECEXP (par, 0, 0));
1568 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1570 for (i = 1; i < XVECLEN (par, 0); i++)
1571 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1572 return 0;
1573 return
1574 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1575 offset, GET_MODE (dst)) == (int) REGNO (dst);
1578 return (REG_P (src) && REG_P (dst)
1579 && REGNO (src) == REGNO (dst));
1582 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1583 value to itself. */
1586 noop_move_p (const rtx_insn *insn)
1588 rtx pat = PATTERN (insn);
1590 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1591 return 1;
1593 /* Insns carrying these notes are useful later on. */
1594 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1595 return 0;
1597 /* Check the code to be executed for COND_EXEC. */
1598 if (GET_CODE (pat) == COND_EXEC)
1599 pat = COND_EXEC_CODE (pat);
1601 if (GET_CODE (pat) == SET && set_noop_p (pat))
1602 return 1;
1604 if (GET_CODE (pat) == PARALLEL)
1606 int i;
1607 /* If nothing but SETs of registers to themselves,
1608 this insn can also be deleted. */
1609 for (i = 0; i < XVECLEN (pat, 0); i++)
1611 rtx tem = XVECEXP (pat, 0, i);
1613 if (GET_CODE (tem) == USE
1614 || GET_CODE (tem) == CLOBBER)
1615 continue;
1617 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1618 return 0;
1621 return 1;
1623 return 0;
1627 /* Return nonzero if register in range [REGNO, ENDREGNO)
1628 appears either explicitly or implicitly in X
1629 other than being stored into.
1631 References contained within the substructure at LOC do not count.
1632 LOC may be zero, meaning don't ignore anything. */
1634 bool
1635 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1636 rtx *loc)
1638 int i;
1639 unsigned int x_regno;
1640 RTX_CODE code;
1641 const char *fmt;
1643 repeat:
1644 /* The contents of a REG_NONNEG note is always zero, so we must come here
1645 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1646 if (x == 0)
1647 return false;
1649 code = GET_CODE (x);
1651 switch (code)
1653 case REG:
1654 x_regno = REGNO (x);
1656 /* If we modifying the stack, frame, or argument pointer, it will
1657 clobber a virtual register. In fact, we could be more precise,
1658 but it isn't worth it. */
1659 if ((x_regno == STACK_POINTER_REGNUM
1660 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1661 && x_regno == ARG_POINTER_REGNUM)
1662 || x_regno == FRAME_POINTER_REGNUM)
1663 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1664 return true;
1666 return endregno > x_regno && regno < END_REGNO (x);
1668 case SUBREG:
1669 /* If this is a SUBREG of a hard reg, we can see exactly which
1670 registers are being modified. Otherwise, handle normally. */
1671 if (REG_P (SUBREG_REG (x))
1672 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1674 unsigned int inner_regno = subreg_regno (x);
1675 unsigned int inner_endregno
1676 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1677 ? subreg_nregs (x) : 1);
1679 return endregno > inner_regno && regno < inner_endregno;
1681 break;
1683 case CLOBBER:
1684 case SET:
1685 if (&SET_DEST (x) != loc
1686 /* Note setting a SUBREG counts as referring to the REG it is in for
1687 a pseudo but not for hard registers since we can
1688 treat each word individually. */
1689 && ((GET_CODE (SET_DEST (x)) == SUBREG
1690 && loc != &SUBREG_REG (SET_DEST (x))
1691 && REG_P (SUBREG_REG (SET_DEST (x)))
1692 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1693 && refers_to_regno_p (regno, endregno,
1694 SUBREG_REG (SET_DEST (x)), loc))
1695 || (!REG_P (SET_DEST (x))
1696 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1697 return true;
1699 if (code == CLOBBER || loc == &SET_SRC (x))
1700 return false;
1701 x = SET_SRC (x);
1702 goto repeat;
1704 default:
1705 break;
1708 /* X does not match, so try its subexpressions. */
1710 fmt = GET_RTX_FORMAT (code);
1711 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1713 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1715 if (i == 0)
1717 x = XEXP (x, 0);
1718 goto repeat;
1720 else
1721 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1722 return true;
1724 else if (fmt[i] == 'E')
1726 int j;
1727 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1728 if (loc != &XVECEXP (x, i, j)
1729 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1730 return true;
1733 return false;
1736 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1737 we check if any register number in X conflicts with the relevant register
1738 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1739 contains a MEM (we don't bother checking for memory addresses that can't
1740 conflict because we expect this to be a rare case. */
1743 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1745 unsigned int regno, endregno;
1747 /* If either argument is a constant, then modifying X can not
1748 affect IN. Here we look at IN, we can profitably combine
1749 CONSTANT_P (x) with the switch statement below. */
1750 if (CONSTANT_P (in))
1751 return 0;
1753 recurse:
1754 switch (GET_CODE (x))
1756 case STRICT_LOW_PART:
1757 case ZERO_EXTRACT:
1758 case SIGN_EXTRACT:
1759 /* Overly conservative. */
1760 x = XEXP (x, 0);
1761 goto recurse;
1763 case SUBREG:
1764 regno = REGNO (SUBREG_REG (x));
1765 if (regno < FIRST_PSEUDO_REGISTER)
1766 regno = subreg_regno (x);
1767 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1768 ? subreg_nregs (x) : 1);
1769 goto do_reg;
1771 case REG:
1772 regno = REGNO (x);
1773 endregno = END_REGNO (x);
1774 do_reg:
1775 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1777 case MEM:
1779 const char *fmt;
1780 int i;
1782 if (MEM_P (in))
1783 return 1;
1785 fmt = GET_RTX_FORMAT (GET_CODE (in));
1786 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1787 if (fmt[i] == 'e')
1789 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1790 return 1;
1792 else if (fmt[i] == 'E')
1794 int j;
1795 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1796 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1797 return 1;
1800 return 0;
1803 case SCRATCH:
1804 case PC:
1805 case CC0:
1806 return reg_mentioned_p (x, in);
1808 case PARALLEL:
1810 int i;
1812 /* If any register in here refers to it we return true. */
1813 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1814 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1815 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1816 return 1;
1817 return 0;
1820 default:
1821 gcc_assert (CONSTANT_P (x));
1822 return 0;
1826 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1827 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1828 ignored by note_stores, but passed to FUN.
1830 FUN receives three arguments:
1831 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1832 2. the SET or CLOBBER rtx that does the store,
1833 3. the pointer DATA provided to note_stores.
1835 If the item being stored in or clobbered is a SUBREG of a hard register,
1836 the SUBREG will be passed. */
1838 void
1839 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1841 int i;
1843 if (GET_CODE (x) == COND_EXEC)
1844 x = COND_EXEC_CODE (x);
1846 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1848 rtx dest = SET_DEST (x);
1850 while ((GET_CODE (dest) == SUBREG
1851 && (!REG_P (SUBREG_REG (dest))
1852 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1853 || GET_CODE (dest) == ZERO_EXTRACT
1854 || GET_CODE (dest) == STRICT_LOW_PART)
1855 dest = XEXP (dest, 0);
1857 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1858 each of whose first operand is a register. */
1859 if (GET_CODE (dest) == PARALLEL)
1861 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1862 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1863 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1865 else
1866 (*fun) (dest, x, data);
1869 else if (GET_CODE (x) == PARALLEL)
1870 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1871 note_stores (XVECEXP (x, 0, i), fun, data);
1874 /* Like notes_stores, but call FUN for each expression that is being
1875 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1876 FUN for each expression, not any interior subexpressions. FUN receives a
1877 pointer to the expression and the DATA passed to this function.
1879 Note that this is not quite the same test as that done in reg_referenced_p
1880 since that considers something as being referenced if it is being
1881 partially set, while we do not. */
1883 void
1884 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1886 rtx body = *pbody;
1887 int i;
1889 switch (GET_CODE (body))
1891 case COND_EXEC:
1892 (*fun) (&COND_EXEC_TEST (body), data);
1893 note_uses (&COND_EXEC_CODE (body), fun, data);
1894 return;
1896 case PARALLEL:
1897 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1898 note_uses (&XVECEXP (body, 0, i), fun, data);
1899 return;
1901 case SEQUENCE:
1902 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1903 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1904 return;
1906 case USE:
1907 (*fun) (&XEXP (body, 0), data);
1908 return;
1910 case ASM_OPERANDS:
1911 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1912 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1913 return;
1915 case TRAP_IF:
1916 (*fun) (&TRAP_CONDITION (body), data);
1917 return;
1919 case PREFETCH:
1920 (*fun) (&XEXP (body, 0), data);
1921 return;
1923 case UNSPEC:
1924 case UNSPEC_VOLATILE:
1925 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1926 (*fun) (&XVECEXP (body, 0, i), data);
1927 return;
1929 case CLOBBER:
1930 if (MEM_P (XEXP (body, 0)))
1931 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1932 return;
1934 case SET:
1936 rtx dest = SET_DEST (body);
1938 /* For sets we replace everything in source plus registers in memory
1939 expression in store and operands of a ZERO_EXTRACT. */
1940 (*fun) (&SET_SRC (body), data);
1942 if (GET_CODE (dest) == ZERO_EXTRACT)
1944 (*fun) (&XEXP (dest, 1), data);
1945 (*fun) (&XEXP (dest, 2), data);
1948 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1949 dest = XEXP (dest, 0);
1951 if (MEM_P (dest))
1952 (*fun) (&XEXP (dest, 0), data);
1954 return;
1956 default:
1957 /* All the other possibilities never store. */
1958 (*fun) (pbody, data);
1959 return;
1963 /* Return nonzero if X's old contents don't survive after INSN.
1964 This will be true if X is (cc0) or if X is a register and
1965 X dies in INSN or because INSN entirely sets X.
1967 "Entirely set" means set directly and not through a SUBREG, or
1968 ZERO_EXTRACT, so no trace of the old contents remains.
1969 Likewise, REG_INC does not count.
1971 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1972 but for this use that makes no difference, since regs don't overlap
1973 during their lifetimes. Therefore, this function may be used
1974 at any time after deaths have been computed.
1976 If REG is a hard reg that occupies multiple machine registers, this
1977 function will only return 1 if each of those registers will be replaced
1978 by INSN. */
1981 dead_or_set_p (const_rtx insn, const_rtx x)
1983 unsigned int regno, end_regno;
1984 unsigned int i;
1986 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1987 if (GET_CODE (x) == CC0)
1988 return 1;
1990 gcc_assert (REG_P (x));
1992 regno = REGNO (x);
1993 end_regno = END_REGNO (x);
1994 for (i = regno; i < end_regno; i++)
1995 if (! dead_or_set_regno_p (insn, i))
1996 return 0;
1998 return 1;
2001 /* Return TRUE iff DEST is a register or subreg of a register and
2002 doesn't change the number of words of the inner register, and any
2003 part of the register is TEST_REGNO. */
2005 static bool
2006 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2008 unsigned int regno, endregno;
2010 if (GET_CODE (dest) == SUBREG
2011 && (((GET_MODE_SIZE (GET_MODE (dest))
2012 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
2013 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
2014 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
2015 dest = SUBREG_REG (dest);
2017 if (!REG_P (dest))
2018 return false;
2020 regno = REGNO (dest);
2021 endregno = END_REGNO (dest);
2022 return (test_regno >= regno && test_regno < endregno);
2025 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2026 any member matches the covers_regno_no_parallel_p criteria. */
2028 static bool
2029 covers_regno_p (const_rtx dest, unsigned int test_regno)
2031 if (GET_CODE (dest) == PARALLEL)
2033 /* Some targets place small structures in registers for return
2034 values of functions, and those registers are wrapped in
2035 PARALLELs that we may see as the destination of a SET. */
2036 int i;
2038 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2040 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2041 if (inner != NULL_RTX
2042 && covers_regno_no_parallel_p (inner, test_regno))
2043 return true;
2046 return false;
2048 else
2049 return covers_regno_no_parallel_p (dest, test_regno);
2052 /* Utility function for dead_or_set_p to check an individual register. */
2055 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
2057 const_rtx pattern;
2059 /* See if there is a death note for something that includes TEST_REGNO. */
2060 if (find_regno_note (insn, REG_DEAD, test_regno))
2061 return 1;
2063 if (CALL_P (insn)
2064 && find_regno_fusage (insn, CLOBBER, test_regno))
2065 return 1;
2067 pattern = PATTERN (insn);
2069 /* If a COND_EXEC is not executed, the value survives. */
2070 if (GET_CODE (pattern) == COND_EXEC)
2071 return 0;
2073 if (GET_CODE (pattern) == SET)
2074 return covers_regno_p (SET_DEST (pattern), test_regno);
2075 else if (GET_CODE (pattern) == PARALLEL)
2077 int i;
2079 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2081 rtx body = XVECEXP (pattern, 0, i);
2083 if (GET_CODE (body) == COND_EXEC)
2084 body = COND_EXEC_CODE (body);
2086 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2087 && covers_regno_p (SET_DEST (body), test_regno))
2088 return 1;
2092 return 0;
2095 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2096 If DATUM is nonzero, look for one whose datum is DATUM. */
2099 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2101 rtx link;
2103 gcc_checking_assert (insn);
2105 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2106 if (! INSN_P (insn))
2107 return 0;
2108 if (datum == 0)
2110 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2111 if (REG_NOTE_KIND (link) == kind)
2112 return link;
2113 return 0;
2116 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2117 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2118 return link;
2119 return 0;
2122 /* Return the reg-note of kind KIND in insn INSN which applies to register
2123 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2124 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2125 it might be the case that the note overlaps REGNO. */
2128 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2130 rtx link;
2132 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2133 if (! INSN_P (insn))
2134 return 0;
2136 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2137 if (REG_NOTE_KIND (link) == kind
2138 /* Verify that it is a register, so that scratch and MEM won't cause a
2139 problem here. */
2140 && REG_P (XEXP (link, 0))
2141 && REGNO (XEXP (link, 0)) <= regno
2142 && END_REGNO (XEXP (link, 0)) > regno)
2143 return link;
2144 return 0;
2147 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2148 has such a note. */
2151 find_reg_equal_equiv_note (const_rtx insn)
2153 rtx link;
2155 if (!INSN_P (insn))
2156 return 0;
2158 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2159 if (REG_NOTE_KIND (link) == REG_EQUAL
2160 || REG_NOTE_KIND (link) == REG_EQUIV)
2162 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2163 insns that have multiple sets. Checking single_set to
2164 make sure of this is not the proper check, as explained
2165 in the comment in set_unique_reg_note.
2167 This should be changed into an assert. */
2168 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2169 return 0;
2170 return link;
2172 return NULL;
2175 /* Check whether INSN is a single_set whose source is known to be
2176 equivalent to a constant. Return that constant if so, otherwise
2177 return null. */
2180 find_constant_src (const rtx_insn *insn)
2182 rtx note, set, x;
2184 set = single_set (insn);
2185 if (set)
2187 x = avoid_constant_pool_reference (SET_SRC (set));
2188 if (CONSTANT_P (x))
2189 return x;
2192 note = find_reg_equal_equiv_note (insn);
2193 if (note && CONSTANT_P (XEXP (note, 0)))
2194 return XEXP (note, 0);
2196 return NULL_RTX;
2199 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2200 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2203 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2205 /* If it's not a CALL_INSN, it can't possibly have a
2206 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2207 if (!CALL_P (insn))
2208 return 0;
2210 gcc_assert (datum);
2212 if (!REG_P (datum))
2214 rtx link;
2216 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2217 link;
2218 link = XEXP (link, 1))
2219 if (GET_CODE (XEXP (link, 0)) == code
2220 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2221 return 1;
2223 else
2225 unsigned int regno = REGNO (datum);
2227 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2228 to pseudo registers, so don't bother checking. */
2230 if (regno < FIRST_PSEUDO_REGISTER)
2232 unsigned int end_regno = END_REGNO (datum);
2233 unsigned int i;
2235 for (i = regno; i < end_regno; i++)
2236 if (find_regno_fusage (insn, code, i))
2237 return 1;
2241 return 0;
2244 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2245 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2248 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2250 rtx link;
2252 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2253 to pseudo registers, so don't bother checking. */
2255 if (regno >= FIRST_PSEUDO_REGISTER
2256 || !CALL_P (insn) )
2257 return 0;
2259 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2261 rtx op, reg;
2263 if (GET_CODE (op = XEXP (link, 0)) == code
2264 && REG_P (reg = XEXP (op, 0))
2265 && REGNO (reg) <= regno
2266 && END_REGNO (reg) > regno)
2267 return 1;
2270 return 0;
2274 /* Return true if KIND is an integer REG_NOTE. */
2276 static bool
2277 int_reg_note_p (enum reg_note kind)
2279 return kind == REG_BR_PROB;
2282 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2283 stored as the pointer to the next register note. */
2286 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2288 rtx note;
2290 gcc_checking_assert (!int_reg_note_p (kind));
2291 switch (kind)
2293 case REG_CC_SETTER:
2294 case REG_CC_USER:
2295 case REG_LABEL_TARGET:
2296 case REG_LABEL_OPERAND:
2297 case REG_TM:
2298 /* These types of register notes use an INSN_LIST rather than an
2299 EXPR_LIST, so that copying is done right and dumps look
2300 better. */
2301 note = alloc_INSN_LIST (datum, list);
2302 PUT_REG_NOTE_KIND (note, kind);
2303 break;
2305 default:
2306 note = alloc_EXPR_LIST (kind, datum, list);
2307 break;
2310 return note;
2313 /* Add register note with kind KIND and datum DATUM to INSN. */
2315 void
2316 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2318 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2321 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2323 void
2324 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2326 gcc_checking_assert (int_reg_note_p (kind));
2327 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2328 datum, REG_NOTES (insn));
2331 /* Add a register note like NOTE to INSN. */
2333 void
2334 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2336 if (GET_CODE (note) == INT_LIST)
2337 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2338 else
2339 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2342 /* Remove register note NOTE from the REG_NOTES of INSN. */
2344 void
2345 remove_note (rtx insn, const_rtx note)
2347 rtx link;
2349 if (note == NULL_RTX)
2350 return;
2352 if (REG_NOTES (insn) == note)
2353 REG_NOTES (insn) = XEXP (note, 1);
2354 else
2355 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2356 if (XEXP (link, 1) == note)
2358 XEXP (link, 1) = XEXP (note, 1);
2359 break;
2362 switch (REG_NOTE_KIND (note))
2364 case REG_EQUAL:
2365 case REG_EQUIV:
2366 df_notes_rescan (as_a <rtx_insn *> (insn));
2367 break;
2368 default:
2369 break;
2373 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2375 void
2376 remove_reg_equal_equiv_notes (rtx_insn *insn)
2378 rtx *loc;
2380 loc = &REG_NOTES (insn);
2381 while (*loc)
2383 enum reg_note kind = REG_NOTE_KIND (*loc);
2384 if (kind == REG_EQUAL || kind == REG_EQUIV)
2385 *loc = XEXP (*loc, 1);
2386 else
2387 loc = &XEXP (*loc, 1);
2391 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2393 void
2394 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2396 df_ref eq_use;
2398 if (!df)
2399 return;
2401 /* This loop is a little tricky. We cannot just go down the chain because
2402 it is being modified by some actions in the loop. So we just iterate
2403 over the head. We plan to drain the list anyway. */
2404 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2406 rtx_insn *insn = DF_REF_INSN (eq_use);
2407 rtx note = find_reg_equal_equiv_note (insn);
2409 /* This assert is generally triggered when someone deletes a REG_EQUAL
2410 or REG_EQUIV note by hacking the list manually rather than calling
2411 remove_note. */
2412 gcc_assert (note);
2414 remove_note (insn, note);
2418 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2419 return 1 if it is found. A simple equality test is used to determine if
2420 NODE matches. */
2422 bool
2423 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2425 const_rtx x;
2427 for (x = listp; x; x = XEXP (x, 1))
2428 if (node == XEXP (x, 0))
2429 return true;
2431 return false;
2434 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2435 remove that entry from the list if it is found.
2437 A simple equality test is used to determine if NODE matches. */
2439 void
2440 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2442 rtx_expr_list *temp = *listp;
2443 rtx_expr_list *prev = NULL;
2445 while (temp)
2447 if (node == temp->element ())
2449 /* Splice the node out of the list. */
2450 if (prev)
2451 XEXP (prev, 1) = temp->next ();
2452 else
2453 *listp = temp->next ();
2455 return;
2458 prev = temp;
2459 temp = temp->next ();
2463 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2464 remove that entry from the list if it is found.
2466 A simple equality test is used to determine if NODE matches. */
2468 void
2469 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2471 rtx_insn_list *temp = *listp;
2472 rtx_insn_list *prev = NULL;
2474 while (temp)
2476 if (node == temp->insn ())
2478 /* Splice the node out of the list. */
2479 if (prev)
2480 XEXP (prev, 1) = temp->next ();
2481 else
2482 *listp = temp->next ();
2484 return;
2487 prev = temp;
2488 temp = temp->next ();
2492 /* Nonzero if X contains any volatile instructions. These are instructions
2493 which may cause unpredictable machine state instructions, and thus no
2494 instructions or register uses should be moved or combined across them.
2495 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2498 volatile_insn_p (const_rtx x)
2500 const RTX_CODE code = GET_CODE (x);
2501 switch (code)
2503 case LABEL_REF:
2504 case SYMBOL_REF:
2505 case CONST:
2506 CASE_CONST_ANY:
2507 case CC0:
2508 case PC:
2509 case REG:
2510 case SCRATCH:
2511 case CLOBBER:
2512 case ADDR_VEC:
2513 case ADDR_DIFF_VEC:
2514 case CALL:
2515 case MEM:
2516 return 0;
2518 case UNSPEC_VOLATILE:
2519 return 1;
2521 case ASM_INPUT:
2522 case ASM_OPERANDS:
2523 if (MEM_VOLATILE_P (x))
2524 return 1;
2526 default:
2527 break;
2530 /* Recursively scan the operands of this expression. */
2533 const char *const fmt = GET_RTX_FORMAT (code);
2534 int i;
2536 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2538 if (fmt[i] == 'e')
2540 if (volatile_insn_p (XEXP (x, i)))
2541 return 1;
2543 else if (fmt[i] == 'E')
2545 int j;
2546 for (j = 0; j < XVECLEN (x, i); j++)
2547 if (volatile_insn_p (XVECEXP (x, i, j)))
2548 return 1;
2552 return 0;
2555 /* Nonzero if X contains any volatile memory references
2556 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2559 volatile_refs_p (const_rtx x)
2561 const RTX_CODE code = GET_CODE (x);
2562 switch (code)
2564 case LABEL_REF:
2565 case SYMBOL_REF:
2566 case CONST:
2567 CASE_CONST_ANY:
2568 case CC0:
2569 case PC:
2570 case REG:
2571 case SCRATCH:
2572 case CLOBBER:
2573 case ADDR_VEC:
2574 case ADDR_DIFF_VEC:
2575 return 0;
2577 case UNSPEC_VOLATILE:
2578 return 1;
2580 case MEM:
2581 case ASM_INPUT:
2582 case ASM_OPERANDS:
2583 if (MEM_VOLATILE_P (x))
2584 return 1;
2586 default:
2587 break;
2590 /* Recursively scan the operands of this expression. */
2593 const char *const fmt = GET_RTX_FORMAT (code);
2594 int i;
2596 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2598 if (fmt[i] == 'e')
2600 if (volatile_refs_p (XEXP (x, i)))
2601 return 1;
2603 else if (fmt[i] == 'E')
2605 int j;
2606 for (j = 0; j < XVECLEN (x, i); j++)
2607 if (volatile_refs_p (XVECEXP (x, i, j)))
2608 return 1;
2612 return 0;
2615 /* Similar to above, except that it also rejects register pre- and post-
2616 incrementing. */
2619 side_effects_p (const_rtx x)
2621 const RTX_CODE code = GET_CODE (x);
2622 switch (code)
2624 case LABEL_REF:
2625 case SYMBOL_REF:
2626 case CONST:
2627 CASE_CONST_ANY:
2628 case CC0:
2629 case PC:
2630 case REG:
2631 case SCRATCH:
2632 case ADDR_VEC:
2633 case ADDR_DIFF_VEC:
2634 case VAR_LOCATION:
2635 return 0;
2637 case CLOBBER:
2638 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2639 when some combination can't be done. If we see one, don't think
2640 that we can simplify the expression. */
2641 return (GET_MODE (x) != VOIDmode);
2643 case PRE_INC:
2644 case PRE_DEC:
2645 case POST_INC:
2646 case POST_DEC:
2647 case PRE_MODIFY:
2648 case POST_MODIFY:
2649 case CALL:
2650 case UNSPEC_VOLATILE:
2651 return 1;
2653 case MEM:
2654 case ASM_INPUT:
2655 case ASM_OPERANDS:
2656 if (MEM_VOLATILE_P (x))
2657 return 1;
2659 default:
2660 break;
2663 /* Recursively scan the operands of this expression. */
2666 const char *fmt = GET_RTX_FORMAT (code);
2667 int i;
2669 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2671 if (fmt[i] == 'e')
2673 if (side_effects_p (XEXP (x, i)))
2674 return 1;
2676 else if (fmt[i] == 'E')
2678 int j;
2679 for (j = 0; j < XVECLEN (x, i); j++)
2680 if (side_effects_p (XVECEXP (x, i, j)))
2681 return 1;
2685 return 0;
2688 /* Return nonzero if evaluating rtx X might cause a trap.
2689 FLAGS controls how to consider MEMs. A nonzero means the context
2690 of the access may have changed from the original, such that the
2691 address may have become invalid. */
2694 may_trap_p_1 (const_rtx x, unsigned flags)
2696 int i;
2697 enum rtx_code code;
2698 const char *fmt;
2700 /* We make no distinction currently, but this function is part of
2701 the internal target-hooks ABI so we keep the parameter as
2702 "unsigned flags". */
2703 bool code_changed = flags != 0;
2705 if (x == 0)
2706 return 0;
2707 code = GET_CODE (x);
2708 switch (code)
2710 /* Handle these cases quickly. */
2711 CASE_CONST_ANY:
2712 case SYMBOL_REF:
2713 case LABEL_REF:
2714 case CONST:
2715 case PC:
2716 case CC0:
2717 case REG:
2718 case SCRATCH:
2719 return 0;
2721 case UNSPEC:
2722 return targetm.unspec_may_trap_p (x, flags);
2724 case UNSPEC_VOLATILE:
2725 case ASM_INPUT:
2726 case TRAP_IF:
2727 return 1;
2729 case ASM_OPERANDS:
2730 return MEM_VOLATILE_P (x);
2732 /* Memory ref can trap unless it's a static var or a stack slot. */
2733 case MEM:
2734 /* Recognize specific pattern of stack checking probes. */
2735 if (flag_stack_check
2736 && MEM_VOLATILE_P (x)
2737 && XEXP (x, 0) == stack_pointer_rtx)
2738 return 1;
2739 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2740 reference; moving it out of context such as when moving code
2741 when optimizing, might cause its address to become invalid. */
2742 code_changed
2743 || !MEM_NOTRAP_P (x))
2745 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2746 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2747 GET_MODE (x), code_changed);
2750 return 0;
2752 /* Division by a non-constant might trap. */
2753 case DIV:
2754 case MOD:
2755 case UDIV:
2756 case UMOD:
2757 if (HONOR_SNANS (x))
2758 return 1;
2759 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2760 return flag_trapping_math;
2761 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2762 return 1;
2763 break;
2765 case EXPR_LIST:
2766 /* An EXPR_LIST is used to represent a function call. This
2767 certainly may trap. */
2768 return 1;
2770 case GE:
2771 case GT:
2772 case LE:
2773 case LT:
2774 case LTGT:
2775 case COMPARE:
2776 /* Some floating point comparisons may trap. */
2777 if (!flag_trapping_math)
2778 break;
2779 /* ??? There is no machine independent way to check for tests that trap
2780 when COMPARE is used, though many targets do make this distinction.
2781 For instance, sparc uses CCFPE for compares which generate exceptions
2782 and CCFP for compares which do not generate exceptions. */
2783 if (HONOR_NANS (x))
2784 return 1;
2785 /* But often the compare has some CC mode, so check operand
2786 modes as well. */
2787 if (HONOR_NANS (XEXP (x, 0))
2788 || HONOR_NANS (XEXP (x, 1)))
2789 return 1;
2790 break;
2792 case EQ:
2793 case NE:
2794 if (HONOR_SNANS (x))
2795 return 1;
2796 /* Often comparison is CC mode, so check operand modes. */
2797 if (HONOR_SNANS (XEXP (x, 0))
2798 || HONOR_SNANS (XEXP (x, 1)))
2799 return 1;
2800 break;
2802 case FIX:
2803 /* Conversion of floating point might trap. */
2804 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2805 return 1;
2806 break;
2808 case NEG:
2809 case ABS:
2810 case SUBREG:
2811 /* These operations don't trap even with floating point. */
2812 break;
2814 default:
2815 /* Any floating arithmetic may trap. */
2816 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2817 return 1;
2820 fmt = GET_RTX_FORMAT (code);
2821 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2823 if (fmt[i] == 'e')
2825 if (may_trap_p_1 (XEXP (x, i), flags))
2826 return 1;
2828 else if (fmt[i] == 'E')
2830 int j;
2831 for (j = 0; j < XVECLEN (x, i); j++)
2832 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2833 return 1;
2836 return 0;
2839 /* Return nonzero if evaluating rtx X might cause a trap. */
2842 may_trap_p (const_rtx x)
2844 return may_trap_p_1 (x, 0);
2847 /* Same as above, but additionally return nonzero if evaluating rtx X might
2848 cause a fault. We define a fault for the purpose of this function as a
2849 erroneous execution condition that cannot be encountered during the normal
2850 execution of a valid program; the typical example is an unaligned memory
2851 access on a strict alignment machine. The compiler guarantees that it
2852 doesn't generate code that will fault from a valid program, but this
2853 guarantee doesn't mean anything for individual instructions. Consider
2854 the following example:
2856 struct S { int d; union { char *cp; int *ip; }; };
2858 int foo(struct S *s)
2860 if (s->d == 1)
2861 return *s->ip;
2862 else
2863 return *s->cp;
2866 on a strict alignment machine. In a valid program, foo will never be
2867 invoked on a structure for which d is equal to 1 and the underlying
2868 unique field of the union not aligned on a 4-byte boundary, but the
2869 expression *s->ip might cause a fault if considered individually.
2871 At the RTL level, potentially problematic expressions will almost always
2872 verify may_trap_p; for example, the above dereference can be emitted as
2873 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2874 However, suppose that foo is inlined in a caller that causes s->cp to
2875 point to a local character variable and guarantees that s->d is not set
2876 to 1; foo may have been effectively translated into pseudo-RTL as:
2878 if ((reg:SI) == 1)
2879 (set (reg:SI) (mem:SI (%fp - 7)))
2880 else
2881 (set (reg:QI) (mem:QI (%fp - 7)))
2883 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2884 memory reference to a stack slot, but it will certainly cause a fault
2885 on a strict alignment machine. */
2888 may_trap_or_fault_p (const_rtx x)
2890 return may_trap_p_1 (x, 1);
2893 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2894 i.e., an inequality. */
2897 inequality_comparisons_p (const_rtx x)
2899 const char *fmt;
2900 int len, i;
2901 const enum rtx_code code = GET_CODE (x);
2903 switch (code)
2905 case REG:
2906 case SCRATCH:
2907 case PC:
2908 case CC0:
2909 CASE_CONST_ANY:
2910 case CONST:
2911 case LABEL_REF:
2912 case SYMBOL_REF:
2913 return 0;
2915 case LT:
2916 case LTU:
2917 case GT:
2918 case GTU:
2919 case LE:
2920 case LEU:
2921 case GE:
2922 case GEU:
2923 return 1;
2925 default:
2926 break;
2929 len = GET_RTX_LENGTH (code);
2930 fmt = GET_RTX_FORMAT (code);
2932 for (i = 0; i < len; i++)
2934 if (fmt[i] == 'e')
2936 if (inequality_comparisons_p (XEXP (x, i)))
2937 return 1;
2939 else if (fmt[i] == 'E')
2941 int j;
2942 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2943 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2944 return 1;
2948 return 0;
2951 /* Replace any occurrence of FROM in X with TO. The function does
2952 not enter into CONST_DOUBLE for the replace.
2954 Note that copying is not done so X must not be shared unless all copies
2955 are to be modified. */
2958 replace_rtx (rtx x, rtx from, rtx to)
2960 int i, j;
2961 const char *fmt;
2963 if (x == from)
2964 return to;
2966 /* Allow this function to make replacements in EXPR_LISTs. */
2967 if (x == 0)
2968 return 0;
2970 if (GET_CODE (x) == SUBREG)
2972 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2974 if (CONST_INT_P (new_rtx))
2976 x = simplify_subreg (GET_MODE (x), new_rtx,
2977 GET_MODE (SUBREG_REG (x)),
2978 SUBREG_BYTE (x));
2979 gcc_assert (x);
2981 else
2982 SUBREG_REG (x) = new_rtx;
2984 return x;
2986 else if (GET_CODE (x) == ZERO_EXTEND)
2988 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2990 if (CONST_INT_P (new_rtx))
2992 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2993 new_rtx, GET_MODE (XEXP (x, 0)));
2994 gcc_assert (x);
2996 else
2997 XEXP (x, 0) = new_rtx;
2999 return x;
3002 fmt = GET_RTX_FORMAT (GET_CODE (x));
3003 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3005 if (fmt[i] == 'e')
3006 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
3007 else if (fmt[i] == 'E')
3008 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3009 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
3012 return x;
3015 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3016 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3018 void
3019 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3021 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3022 rtx x = *loc;
3023 if (JUMP_TABLE_DATA_P (x))
3025 x = PATTERN (x);
3026 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3027 int len = GET_NUM_ELEM (vec);
3028 for (int i = 0; i < len; ++i)
3030 rtx ref = RTVEC_ELT (vec, i);
3031 if (XEXP (ref, 0) == old_label)
3033 XEXP (ref, 0) = new_label;
3034 if (update_label_nuses)
3036 ++LABEL_NUSES (new_label);
3037 --LABEL_NUSES (old_label);
3041 return;
3044 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3045 field. This is not handled by the iterator because it doesn't
3046 handle unprinted ('0') fields. */
3047 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3048 JUMP_LABEL (x) = new_label;
3050 subrtx_ptr_iterator::array_type array;
3051 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3053 rtx *loc = *iter;
3054 if (rtx x = *loc)
3056 if (GET_CODE (x) == SYMBOL_REF
3057 && CONSTANT_POOL_ADDRESS_P (x))
3059 rtx c = get_pool_constant (x);
3060 if (rtx_referenced_p (old_label, c))
3062 /* Create a copy of constant C; replace the label inside
3063 but do not update LABEL_NUSES because uses in constant pool
3064 are not counted. */
3065 rtx new_c = copy_rtx (c);
3066 replace_label (&new_c, old_label, new_label, false);
3068 /* Add the new constant NEW_C to constant pool and replace
3069 the old reference to constant by new reference. */
3070 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3071 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3075 if ((GET_CODE (x) == LABEL_REF
3076 || GET_CODE (x) == INSN_LIST)
3077 && XEXP (x, 0) == old_label)
3079 XEXP (x, 0) = new_label;
3080 if (update_label_nuses)
3082 ++LABEL_NUSES (new_label);
3083 --LABEL_NUSES (old_label);
3090 void
3091 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
3092 bool update_label_nuses)
3094 rtx insn_as_rtx = insn;
3095 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3096 gcc_checking_assert (insn_as_rtx == insn);
3099 /* Return true if X is referenced in BODY. */
3101 bool
3102 rtx_referenced_p (const_rtx x, const_rtx body)
3104 subrtx_iterator::array_type array;
3105 FOR_EACH_SUBRTX (iter, array, body, ALL)
3106 if (const_rtx y = *iter)
3108 /* Check if a label_ref Y refers to label X. */
3109 if (GET_CODE (y) == LABEL_REF
3110 && LABEL_P (x)
3111 && LABEL_REF_LABEL (y) == x)
3112 return true;
3114 if (rtx_equal_p (x, y))
3115 return true;
3117 /* If Y is a reference to pool constant traverse the constant. */
3118 if (GET_CODE (y) == SYMBOL_REF
3119 && CONSTANT_POOL_ADDRESS_P (y))
3120 iter.substitute (get_pool_constant (y));
3122 return false;
3125 /* If INSN is a tablejump return true and store the label (before jump table) to
3126 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3128 bool
3129 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
3131 rtx label;
3132 rtx_insn *table;
3134 if (!JUMP_P (insn))
3135 return false;
3137 label = JUMP_LABEL (insn);
3138 if (label != NULL_RTX && !ANY_RETURN_P (label)
3139 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
3140 && JUMP_TABLE_DATA_P (table))
3142 if (labelp)
3143 *labelp = label;
3144 if (tablep)
3145 *tablep = as_a <rtx_jump_table_data *> (table);
3146 return true;
3148 return false;
3151 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3152 constant that is not in the constant pool and not in the condition
3153 of an IF_THEN_ELSE. */
3155 static int
3156 computed_jump_p_1 (const_rtx x)
3158 const enum rtx_code code = GET_CODE (x);
3159 int i, j;
3160 const char *fmt;
3162 switch (code)
3164 case LABEL_REF:
3165 case PC:
3166 return 0;
3168 case CONST:
3169 CASE_CONST_ANY:
3170 case SYMBOL_REF:
3171 case REG:
3172 return 1;
3174 case MEM:
3175 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3176 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3178 case IF_THEN_ELSE:
3179 return (computed_jump_p_1 (XEXP (x, 1))
3180 || computed_jump_p_1 (XEXP (x, 2)));
3182 default:
3183 break;
3186 fmt = GET_RTX_FORMAT (code);
3187 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3189 if (fmt[i] == 'e'
3190 && computed_jump_p_1 (XEXP (x, i)))
3191 return 1;
3193 else if (fmt[i] == 'E')
3194 for (j = 0; j < XVECLEN (x, i); j++)
3195 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3196 return 1;
3199 return 0;
3202 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3204 Tablejumps and casesi insns are not considered indirect jumps;
3205 we can recognize them by a (use (label_ref)). */
3208 computed_jump_p (const rtx_insn *insn)
3210 int i;
3211 if (JUMP_P (insn))
3213 rtx pat = PATTERN (insn);
3215 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3216 if (JUMP_LABEL (insn) != NULL)
3217 return 0;
3219 if (GET_CODE (pat) == PARALLEL)
3221 int len = XVECLEN (pat, 0);
3222 int has_use_labelref = 0;
3224 for (i = len - 1; i >= 0; i--)
3225 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3226 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3227 == LABEL_REF))
3229 has_use_labelref = 1;
3230 break;
3233 if (! has_use_labelref)
3234 for (i = len - 1; i >= 0; i--)
3235 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3236 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3237 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3238 return 1;
3240 else if (GET_CODE (pat) == SET
3241 && SET_DEST (pat) == pc_rtx
3242 && computed_jump_p_1 (SET_SRC (pat)))
3243 return 1;
3245 return 0;
3250 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3251 the equivalent add insn and pass the result to FN, using DATA as the
3252 final argument. */
3254 static int
3255 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3257 rtx x = XEXP (mem, 0);
3258 switch (GET_CODE (x))
3260 case PRE_INC:
3261 case POST_INC:
3263 int size = GET_MODE_SIZE (GET_MODE (mem));
3264 rtx r1 = XEXP (x, 0);
3265 rtx c = gen_int_mode (size, GET_MODE (r1));
3266 return fn (mem, x, r1, r1, c, data);
3269 case PRE_DEC:
3270 case POST_DEC:
3272 int size = GET_MODE_SIZE (GET_MODE (mem));
3273 rtx r1 = XEXP (x, 0);
3274 rtx c = gen_int_mode (-size, GET_MODE (r1));
3275 return fn (mem, x, r1, r1, c, data);
3278 case PRE_MODIFY:
3279 case POST_MODIFY:
3281 rtx r1 = XEXP (x, 0);
3282 rtx add = XEXP (x, 1);
3283 return fn (mem, x, r1, add, NULL, data);
3286 default:
3287 gcc_unreachable ();
3291 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3292 For each such autoinc operation found, call FN, passing it
3293 the innermost enclosing MEM, the operation itself, the RTX modified
3294 by the operation, two RTXs (the second may be NULL) that, once
3295 added, represent the value to be held by the modified RTX
3296 afterwards, and DATA. FN is to return 0 to continue the
3297 traversal or any other value to have it returned to the caller of
3298 for_each_inc_dec. */
3301 for_each_inc_dec (rtx x,
3302 for_each_inc_dec_fn fn,
3303 void *data)
3305 subrtx_var_iterator::array_type array;
3306 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3308 rtx mem = *iter;
3309 if (mem
3310 && MEM_P (mem)
3311 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3313 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3314 if (res != 0)
3315 return res;
3316 iter.skip_subrtxes ();
3319 return 0;
3323 /* Searches X for any reference to REGNO, returning the rtx of the
3324 reference found if any. Otherwise, returns NULL_RTX. */
3327 regno_use_in (unsigned int regno, rtx x)
3329 const char *fmt;
3330 int i, j;
3331 rtx tem;
3333 if (REG_P (x) && REGNO (x) == regno)
3334 return x;
3336 fmt = GET_RTX_FORMAT (GET_CODE (x));
3337 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3339 if (fmt[i] == 'e')
3341 if ((tem = regno_use_in (regno, XEXP (x, i))))
3342 return tem;
3344 else if (fmt[i] == 'E')
3345 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3346 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3347 return tem;
3350 return NULL_RTX;
3353 /* Return a value indicating whether OP, an operand of a commutative
3354 operation, is preferred as the first or second operand. The more
3355 positive the value, the stronger the preference for being the first
3356 operand. */
3359 commutative_operand_precedence (rtx op)
3361 enum rtx_code code = GET_CODE (op);
3363 /* Constants always become the second operand. Prefer "nice" constants. */
3364 if (code == CONST_INT)
3365 return -8;
3366 if (code == CONST_WIDE_INT)
3367 return -8;
3368 if (code == CONST_DOUBLE)
3369 return -7;
3370 if (code == CONST_FIXED)
3371 return -7;
3372 op = avoid_constant_pool_reference (op);
3373 code = GET_CODE (op);
3375 switch (GET_RTX_CLASS (code))
3377 case RTX_CONST_OBJ:
3378 if (code == CONST_INT)
3379 return -6;
3380 if (code == CONST_WIDE_INT)
3381 return -6;
3382 if (code == CONST_DOUBLE)
3383 return -5;
3384 if (code == CONST_FIXED)
3385 return -5;
3386 return -4;
3388 case RTX_EXTRA:
3389 /* SUBREGs of objects should come second. */
3390 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3391 return -3;
3392 return 0;
3394 case RTX_OBJ:
3395 /* Complex expressions should be the first, so decrease priority
3396 of objects. Prefer pointer objects over non pointer objects. */
3397 if ((REG_P (op) && REG_POINTER (op))
3398 || (MEM_P (op) && MEM_POINTER (op)))
3399 return -1;
3400 return -2;
3402 case RTX_COMM_ARITH:
3403 /* Prefer operands that are themselves commutative to be first.
3404 This helps to make things linear. In particular,
3405 (and (and (reg) (reg)) (not (reg))) is canonical. */
3406 return 4;
3408 case RTX_BIN_ARITH:
3409 /* If only one operand is a binary expression, it will be the first
3410 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3411 is canonical, although it will usually be further simplified. */
3412 return 2;
3414 case RTX_UNARY:
3415 /* Then prefer NEG and NOT. */
3416 if (code == NEG || code == NOT)
3417 return 1;
3419 default:
3420 return 0;
3424 /* Return 1 iff it is necessary to swap operands of commutative operation
3425 in order to canonicalize expression. */
3427 bool
3428 swap_commutative_operands_p (rtx x, rtx y)
3430 return (commutative_operand_precedence (x)
3431 < commutative_operand_precedence (y));
3434 /* Return 1 if X is an autoincrement side effect and the register is
3435 not the stack pointer. */
3437 auto_inc_p (const_rtx x)
3439 switch (GET_CODE (x))
3441 case PRE_INC:
3442 case POST_INC:
3443 case PRE_DEC:
3444 case POST_DEC:
3445 case PRE_MODIFY:
3446 case POST_MODIFY:
3447 /* There are no REG_INC notes for SP. */
3448 if (XEXP (x, 0) != stack_pointer_rtx)
3449 return 1;
3450 default:
3451 break;
3453 return 0;
3456 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3458 loc_mentioned_in_p (rtx *loc, const_rtx in)
3460 enum rtx_code code;
3461 const char *fmt;
3462 int i, j;
3464 if (!in)
3465 return 0;
3467 code = GET_CODE (in);
3468 fmt = GET_RTX_FORMAT (code);
3469 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3471 if (fmt[i] == 'e')
3473 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3474 return 1;
3476 else if (fmt[i] == 'E')
3477 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3478 if (loc == &XVECEXP (in, i, j)
3479 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3480 return 1;
3482 return 0;
3485 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3486 and SUBREG_BYTE, return the bit offset where the subreg begins
3487 (counting from the least significant bit of the operand). */
3489 unsigned int
3490 subreg_lsb_1 (machine_mode outer_mode,
3491 machine_mode inner_mode,
3492 unsigned int subreg_byte)
3494 unsigned int bitpos;
3495 unsigned int byte;
3496 unsigned int word;
3498 /* A paradoxical subreg begins at bit position 0. */
3499 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3500 return 0;
3502 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3503 /* If the subreg crosses a word boundary ensure that
3504 it also begins and ends on a word boundary. */
3505 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3506 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3507 && (subreg_byte % UNITS_PER_WORD
3508 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3510 if (WORDS_BIG_ENDIAN)
3511 word = (GET_MODE_SIZE (inner_mode)
3512 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3513 else
3514 word = subreg_byte / UNITS_PER_WORD;
3515 bitpos = word * BITS_PER_WORD;
3517 if (BYTES_BIG_ENDIAN)
3518 byte = (GET_MODE_SIZE (inner_mode)
3519 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3520 else
3521 byte = subreg_byte % UNITS_PER_WORD;
3522 bitpos += byte * BITS_PER_UNIT;
3524 return bitpos;
3527 /* Given a subreg X, return the bit offset where the subreg begins
3528 (counting from the least significant bit of the reg). */
3530 unsigned int
3531 subreg_lsb (const_rtx x)
3533 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3534 SUBREG_BYTE (x));
3537 /* Fill in information about a subreg of a hard register.
3538 xregno - A regno of an inner hard subreg_reg (or what will become one).
3539 xmode - The mode of xregno.
3540 offset - The byte offset.
3541 ymode - The mode of a top level SUBREG (or what may become one).
3542 info - Pointer to structure to fill in.
3544 Rather than considering one particular inner register (and thus one
3545 particular "outer" register) in isolation, this function really uses
3546 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3547 function does not check whether adding INFO->offset to XREGNO gives
3548 a valid hard register; even if INFO->offset + XREGNO is out of range,
3549 there might be another register of the same type that is in range.
3550 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3551 register, since that can depend on things like whether the final
3552 register number is even or odd. Callers that want to check whether
3553 this particular subreg can be replaced by a simple (reg ...) should
3554 use simplify_subreg_regno. */
3556 void
3557 subreg_get_info (unsigned int xregno, machine_mode xmode,
3558 unsigned int offset, machine_mode ymode,
3559 struct subreg_info *info)
3561 int nregs_xmode, nregs_ymode;
3562 int mode_multiple, nregs_multiple;
3563 int offset_adj, y_offset, y_offset_adj;
3564 int regsize_xmode, regsize_ymode;
3565 bool rknown;
3567 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3569 rknown = false;
3571 /* If there are holes in a non-scalar mode in registers, we expect
3572 that it is made up of its units concatenated together. */
3573 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3575 machine_mode xmode_unit;
3577 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3578 if (GET_MODE_INNER (xmode) == VOIDmode)
3579 xmode_unit = xmode;
3580 else
3581 xmode_unit = GET_MODE_INNER (xmode);
3582 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3583 gcc_assert (nregs_xmode
3584 == (GET_MODE_NUNITS (xmode)
3585 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3586 gcc_assert (hard_regno_nregs[xregno][xmode]
3587 == (hard_regno_nregs[xregno][xmode_unit]
3588 * GET_MODE_NUNITS (xmode)));
3590 /* You can only ask for a SUBREG of a value with holes in the middle
3591 if you don't cross the holes. (Such a SUBREG should be done by
3592 picking a different register class, or doing it in memory if
3593 necessary.) An example of a value with holes is XCmode on 32-bit
3594 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3595 3 for each part, but in memory it's two 128-bit parts.
3596 Padding is assumed to be at the end (not necessarily the 'high part')
3597 of each unit. */
3598 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3599 < GET_MODE_NUNITS (xmode))
3600 && (offset / GET_MODE_SIZE (xmode_unit)
3601 != ((offset + GET_MODE_SIZE (ymode) - 1)
3602 / GET_MODE_SIZE (xmode_unit))))
3604 info->representable_p = false;
3605 rknown = true;
3608 else
3609 nregs_xmode = hard_regno_nregs[xregno][xmode];
3611 nregs_ymode = hard_regno_nregs[xregno][ymode];
3613 /* Paradoxical subregs are otherwise valid. */
3614 if (!rknown
3615 && offset == 0
3616 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3618 info->representable_p = true;
3619 /* If this is a big endian paradoxical subreg, which uses more
3620 actual hard registers than the original register, we must
3621 return a negative offset so that we find the proper highpart
3622 of the register. */
3623 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3624 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3625 info->offset = nregs_xmode - nregs_ymode;
3626 else
3627 info->offset = 0;
3628 info->nregs = nregs_ymode;
3629 return;
3632 /* If registers store different numbers of bits in the different
3633 modes, we cannot generally form this subreg. */
3634 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3635 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3636 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3637 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3639 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3640 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3641 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3643 info->representable_p = false;
3644 info->nregs
3645 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3646 info->offset = offset / regsize_xmode;
3647 return;
3649 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3651 info->representable_p = false;
3652 info->nregs
3653 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3654 info->offset = offset / regsize_xmode;
3655 return;
3657 /* Quick exit for the simple and common case of extracting whole
3658 subregisters from a multiregister value. */
3659 /* ??? It would be better to integrate this into the code below,
3660 if we can generalize the concept enough and figure out how
3661 odd-sized modes can coexist with the other weird cases we support. */
3662 if (!rknown
3663 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3664 && regsize_xmode == regsize_ymode
3665 && (offset % regsize_ymode) == 0)
3667 info->representable_p = true;
3668 info->nregs = nregs_ymode;
3669 info->offset = offset / regsize_ymode;
3670 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3671 return;
3675 /* Lowpart subregs are otherwise valid. */
3676 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3678 info->representable_p = true;
3679 rknown = true;
3681 if (offset == 0 || nregs_xmode == nregs_ymode)
3683 info->offset = 0;
3684 info->nregs = nregs_ymode;
3685 return;
3689 /* This should always pass, otherwise we don't know how to verify
3690 the constraint. These conditions may be relaxed but
3691 subreg_regno_offset would need to be redesigned. */
3692 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3693 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3695 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3696 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3698 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3699 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3700 HOST_WIDE_INT off_low = offset & (ysize - 1);
3701 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3702 offset = (xsize - ysize - off_high) | off_low;
3704 /* The XMODE value can be seen as a vector of NREGS_XMODE
3705 values. The subreg must represent a lowpart of given field.
3706 Compute what field it is. */
3707 offset_adj = offset;
3708 offset_adj -= subreg_lowpart_offset (ymode,
3709 mode_for_size (GET_MODE_BITSIZE (xmode)
3710 / nregs_xmode,
3711 MODE_INT, 0));
3713 /* Size of ymode must not be greater than the size of xmode. */
3714 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3715 gcc_assert (mode_multiple != 0);
3717 y_offset = offset / GET_MODE_SIZE (ymode);
3718 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3719 nregs_multiple = nregs_xmode / nregs_ymode;
3721 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3722 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3724 if (!rknown)
3726 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3727 rknown = true;
3729 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3730 info->nregs = nregs_ymode;
3733 /* This function returns the regno offset of a subreg expression.
3734 xregno - A regno of an inner hard subreg_reg (or what will become one).
3735 xmode - The mode of xregno.
3736 offset - The byte offset.
3737 ymode - The mode of a top level SUBREG (or what may become one).
3738 RETURN - The regno offset which would be used. */
3739 unsigned int
3740 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3741 unsigned int offset, machine_mode ymode)
3743 struct subreg_info info;
3744 subreg_get_info (xregno, xmode, offset, ymode, &info);
3745 return info.offset;
3748 /* This function returns true when the offset is representable via
3749 subreg_offset in the given regno.
3750 xregno - A regno of an inner hard subreg_reg (or what will become one).
3751 xmode - The mode of xregno.
3752 offset - The byte offset.
3753 ymode - The mode of a top level SUBREG (or what may become one).
3754 RETURN - Whether the offset is representable. */
3755 bool
3756 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3757 unsigned int offset, machine_mode ymode)
3759 struct subreg_info info;
3760 subreg_get_info (xregno, xmode, offset, ymode, &info);
3761 return info.representable_p;
3764 /* Return the number of a YMODE register to which
3766 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3768 can be simplified. Return -1 if the subreg can't be simplified.
3770 XREGNO is a hard register number. */
3773 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3774 unsigned int offset, machine_mode ymode)
3776 struct subreg_info info;
3777 unsigned int yregno;
3779 #ifdef CANNOT_CHANGE_MODE_CLASS
3780 /* Give the backend a chance to disallow the mode change. */
3781 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3782 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3783 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3784 /* We can use mode change in LRA for some transformations. */
3785 && ! lra_in_progress)
3786 return -1;
3787 #endif
3789 /* We shouldn't simplify stack-related registers. */
3790 if ((!reload_completed || frame_pointer_needed)
3791 && xregno == FRAME_POINTER_REGNUM)
3792 return -1;
3794 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3795 && xregno == ARG_POINTER_REGNUM)
3796 return -1;
3798 if (xregno == STACK_POINTER_REGNUM
3799 /* We should convert hard stack register in LRA if it is
3800 possible. */
3801 && ! lra_in_progress)
3802 return -1;
3804 /* Try to get the register offset. */
3805 subreg_get_info (xregno, xmode, offset, ymode, &info);
3806 if (!info.representable_p)
3807 return -1;
3809 /* Make sure that the offsetted register value is in range. */
3810 yregno = xregno + info.offset;
3811 if (!HARD_REGISTER_NUM_P (yregno))
3812 return -1;
3814 /* See whether (reg:YMODE YREGNO) is valid.
3816 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3817 This is a kludge to work around how complex FP arguments are passed
3818 on IA-64 and should be fixed. See PR target/49226. */
3819 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3820 && HARD_REGNO_MODE_OK (xregno, xmode))
3821 return -1;
3823 return (int) yregno;
3826 /* Return the final regno that a subreg expression refers to. */
3827 unsigned int
3828 subreg_regno (const_rtx x)
3830 unsigned int ret;
3831 rtx subreg = SUBREG_REG (x);
3832 int regno = REGNO (subreg);
3834 ret = regno + subreg_regno_offset (regno,
3835 GET_MODE (subreg),
3836 SUBREG_BYTE (x),
3837 GET_MODE (x));
3838 return ret;
3842 /* Return the number of registers that a subreg expression refers
3843 to. */
3844 unsigned int
3845 subreg_nregs (const_rtx x)
3847 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3850 /* Return the number of registers that a subreg REG with REGNO
3851 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3852 changed so that the regno can be passed in. */
3854 unsigned int
3855 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3857 struct subreg_info info;
3858 rtx subreg = SUBREG_REG (x);
3860 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3861 &info);
3862 return info.nregs;
3866 struct parms_set_data
3868 int nregs;
3869 HARD_REG_SET regs;
3872 /* Helper function for noticing stores to parameter registers. */
3873 static void
3874 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3876 struct parms_set_data *const d = (struct parms_set_data *) data;
3877 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3878 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3880 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3881 d->nregs--;
3885 /* Look backward for first parameter to be loaded.
3886 Note that loads of all parameters will not necessarily be
3887 found if CSE has eliminated some of them (e.g., an argument
3888 to the outer function is passed down as a parameter).
3889 Do not skip BOUNDARY. */
3890 rtx_insn *
3891 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3893 struct parms_set_data parm;
3894 rtx p;
3895 rtx_insn *before, *first_set;
3897 /* Since different machines initialize their parameter registers
3898 in different orders, assume nothing. Collect the set of all
3899 parameter registers. */
3900 CLEAR_HARD_REG_SET (parm.regs);
3901 parm.nregs = 0;
3902 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3903 if (GET_CODE (XEXP (p, 0)) == USE
3904 && REG_P (XEXP (XEXP (p, 0), 0)))
3906 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3908 /* We only care about registers which can hold function
3909 arguments. */
3910 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3911 continue;
3913 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3914 parm.nregs++;
3916 before = call_insn;
3917 first_set = call_insn;
3919 /* Search backward for the first set of a register in this set. */
3920 while (parm.nregs && before != boundary)
3922 before = PREV_INSN (before);
3924 /* It is possible that some loads got CSEed from one call to
3925 another. Stop in that case. */
3926 if (CALL_P (before))
3927 break;
3929 /* Our caller needs either ensure that we will find all sets
3930 (in case code has not been optimized yet), or take care
3931 for possible labels in a way by setting boundary to preceding
3932 CODE_LABEL. */
3933 if (LABEL_P (before))
3935 gcc_assert (before == boundary);
3936 break;
3939 if (INSN_P (before))
3941 int nregs_old = parm.nregs;
3942 note_stores (PATTERN (before), parms_set, &parm);
3943 /* If we found something that did not set a parameter reg,
3944 we're done. Do not keep going, as that might result
3945 in hoisting an insn before the setting of a pseudo
3946 that is used by the hoisted insn. */
3947 if (nregs_old != parm.nregs)
3948 first_set = before;
3949 else
3950 break;
3953 return first_set;
3956 /* Return true if we should avoid inserting code between INSN and preceding
3957 call instruction. */
3959 bool
3960 keep_with_call_p (const rtx_insn *insn)
3962 rtx set;
3964 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3966 if (REG_P (SET_DEST (set))
3967 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3968 && fixed_regs[REGNO (SET_DEST (set))]
3969 && general_operand (SET_SRC (set), VOIDmode))
3970 return true;
3971 if (REG_P (SET_SRC (set))
3972 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3973 && REG_P (SET_DEST (set))
3974 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3975 return true;
3976 /* There may be a stack pop just after the call and before the store
3977 of the return register. Search for the actual store when deciding
3978 if we can break or not. */
3979 if (SET_DEST (set) == stack_pointer_rtx)
3981 /* This CONST_CAST is okay because next_nonnote_insn just
3982 returns its argument and we assign it to a const_rtx
3983 variable. */
3984 const rtx_insn *i2
3985 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3986 if (i2 && keep_with_call_p (i2))
3987 return true;
3990 return false;
3993 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3994 to non-complex jumps. That is, direct unconditional, conditional,
3995 and tablejumps, but not computed jumps or returns. It also does
3996 not apply to the fallthru case of a conditional jump. */
3998 bool
3999 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4001 rtx tmp = JUMP_LABEL (jump_insn);
4002 rtx_jump_table_data *table;
4004 if (label == tmp)
4005 return true;
4007 if (tablejump_p (jump_insn, NULL, &table))
4009 rtvec vec = table->get_labels ();
4010 int i, veclen = GET_NUM_ELEM (vec);
4012 for (i = 0; i < veclen; ++i)
4013 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4014 return true;
4017 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4018 return true;
4020 return false;
4024 /* Return an estimate of the cost of computing rtx X.
4025 One use is in cse, to decide which expression to keep in the hash table.
4026 Another is in rtl generation, to pick the cheapest way to multiply.
4027 Other uses like the latter are expected in the future.
4029 X appears as operand OPNO in an expression with code OUTER_CODE.
4030 SPEED specifies whether costs optimized for speed or size should
4031 be returned. */
4034 rtx_cost (rtx x, enum rtx_code outer_code, int opno, bool speed)
4036 int i, j;
4037 enum rtx_code code;
4038 const char *fmt;
4039 int total;
4040 int factor;
4042 if (x == 0)
4043 return 0;
4045 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4046 many insns, taking N times as long. */
4047 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
4048 if (factor == 0)
4049 factor = 1;
4051 /* Compute the default costs of certain things.
4052 Note that targetm.rtx_costs can override the defaults. */
4054 code = GET_CODE (x);
4055 switch (code)
4057 case MULT:
4058 /* Multiplication has time-complexity O(N*N), where N is the
4059 number of units (translated from digits) when using
4060 schoolbook long multiplication. */
4061 total = factor * factor * COSTS_N_INSNS (5);
4062 break;
4063 case DIV:
4064 case UDIV:
4065 case MOD:
4066 case UMOD:
4067 /* Similarly, complexity for schoolbook long division. */
4068 total = factor * factor * COSTS_N_INSNS (7);
4069 break;
4070 case USE:
4071 /* Used in combine.c as a marker. */
4072 total = 0;
4073 break;
4074 case SET:
4075 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4076 the mode for the factor. */
4077 factor = GET_MODE_SIZE (GET_MODE (SET_DEST (x))) / UNITS_PER_WORD;
4078 if (factor == 0)
4079 factor = 1;
4080 /* Pass through. */
4081 default:
4082 total = factor * COSTS_N_INSNS (1);
4085 switch (code)
4087 case REG:
4088 return 0;
4090 case SUBREG:
4091 total = 0;
4092 /* If we can't tie these modes, make this expensive. The larger
4093 the mode, the more expensive it is. */
4094 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
4095 return COSTS_N_INSNS (2 + factor);
4096 break;
4098 default:
4099 if (targetm.rtx_costs (x, code, outer_code, opno, &total, speed))
4100 return total;
4101 break;
4104 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4105 which is already in total. */
4107 fmt = GET_RTX_FORMAT (code);
4108 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4109 if (fmt[i] == 'e')
4110 total += rtx_cost (XEXP (x, i), code, i, speed);
4111 else if (fmt[i] == 'E')
4112 for (j = 0; j < XVECLEN (x, i); j++)
4113 total += rtx_cost (XVECEXP (x, i, j), code, i, speed);
4115 return total;
4118 /* Fill in the structure C with information about both speed and size rtx
4119 costs for X, which is operand OPNO in an expression with code OUTER. */
4121 void
4122 get_full_rtx_cost (rtx x, enum rtx_code outer, int opno,
4123 struct full_rtx_costs *c)
4125 c->speed = rtx_cost (x, outer, opno, true);
4126 c->size = rtx_cost (x, outer, opno, false);
4130 /* Return cost of address expression X.
4131 Expect that X is properly formed address reference.
4133 SPEED parameter specify whether costs optimized for speed or size should
4134 be returned. */
4137 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4139 /* We may be asked for cost of various unusual addresses, such as operands
4140 of push instruction. It is not worthwhile to complicate writing
4141 of the target hook by such cases. */
4143 if (!memory_address_addr_space_p (mode, x, as))
4144 return 1000;
4146 return targetm.address_cost (x, mode, as, speed);
4149 /* If the target doesn't override, compute the cost as with arithmetic. */
4152 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4154 return rtx_cost (x, MEM, 0, speed);
4158 unsigned HOST_WIDE_INT
4159 nonzero_bits (const_rtx x, machine_mode mode)
4161 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4164 unsigned int
4165 num_sign_bit_copies (const_rtx x, machine_mode mode)
4167 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4170 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4171 It avoids exponential behavior in nonzero_bits1 when X has
4172 identical subexpressions on the first or the second level. */
4174 static unsigned HOST_WIDE_INT
4175 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4176 machine_mode known_mode,
4177 unsigned HOST_WIDE_INT known_ret)
4179 if (x == known_x && mode == known_mode)
4180 return known_ret;
4182 /* Try to find identical subexpressions. If found call
4183 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4184 precomputed value for the subexpression as KNOWN_RET. */
4186 if (ARITHMETIC_P (x))
4188 rtx x0 = XEXP (x, 0);
4189 rtx x1 = XEXP (x, 1);
4191 /* Check the first level. */
4192 if (x0 == x1)
4193 return nonzero_bits1 (x, mode, x0, mode,
4194 cached_nonzero_bits (x0, mode, known_x,
4195 known_mode, known_ret));
4197 /* Check the second level. */
4198 if (ARITHMETIC_P (x0)
4199 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4200 return nonzero_bits1 (x, mode, x1, mode,
4201 cached_nonzero_bits (x1, mode, known_x,
4202 known_mode, known_ret));
4204 if (ARITHMETIC_P (x1)
4205 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4206 return nonzero_bits1 (x, mode, x0, mode,
4207 cached_nonzero_bits (x0, mode, known_x,
4208 known_mode, known_ret));
4211 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4214 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4215 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4216 is less useful. We can't allow both, because that results in exponential
4217 run time recursion. There is a nullstone testcase that triggered
4218 this. This macro avoids accidental uses of num_sign_bit_copies. */
4219 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4221 /* Given an expression, X, compute which bits in X can be nonzero.
4222 We don't care about bits outside of those defined in MODE.
4224 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4225 an arithmetic operation, we can do better. */
4227 static unsigned HOST_WIDE_INT
4228 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4229 machine_mode known_mode,
4230 unsigned HOST_WIDE_INT known_ret)
4232 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4233 unsigned HOST_WIDE_INT inner_nz;
4234 enum rtx_code code;
4235 machine_mode inner_mode;
4236 unsigned int mode_width = GET_MODE_PRECISION (mode);
4238 /* For floating-point and vector values, assume all bits are needed. */
4239 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4240 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4241 return nonzero;
4243 /* If X is wider than MODE, use its mode instead. */
4244 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4246 mode = GET_MODE (x);
4247 nonzero = GET_MODE_MASK (mode);
4248 mode_width = GET_MODE_PRECISION (mode);
4251 if (mode_width > HOST_BITS_PER_WIDE_INT)
4252 /* Our only callers in this case look for single bit values. So
4253 just return the mode mask. Those tests will then be false. */
4254 return nonzero;
4256 #ifndef WORD_REGISTER_OPERATIONS
4257 /* If MODE is wider than X, but both are a single word for both the host
4258 and target machines, we can compute this from which bits of the
4259 object might be nonzero in its own mode, taking into account the fact
4260 that on many CISC machines, accessing an object in a wider mode
4261 causes the high-order bits to become undefined. So they are
4262 not known to be zero. */
4264 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
4265 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4266 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4267 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4269 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4270 known_x, known_mode, known_ret);
4271 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4272 return nonzero;
4274 #endif
4276 code = GET_CODE (x);
4277 switch (code)
4279 case REG:
4280 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4281 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4282 all the bits above ptr_mode are known to be zero. */
4283 /* As we do not know which address space the pointer is referring to,
4284 we can do this only if the target does not support different pointer
4285 or address modes depending on the address space. */
4286 if (target_default_pointer_address_modes_p ()
4287 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4288 && REG_POINTER (x))
4289 nonzero &= GET_MODE_MASK (ptr_mode);
4290 #endif
4292 /* Include declared information about alignment of pointers. */
4293 /* ??? We don't properly preserve REG_POINTER changes across
4294 pointer-to-integer casts, so we can't trust it except for
4295 things that we know must be pointers. See execute/960116-1.c. */
4296 if ((x == stack_pointer_rtx
4297 || x == frame_pointer_rtx
4298 || x == arg_pointer_rtx)
4299 && REGNO_POINTER_ALIGN (REGNO (x)))
4301 unsigned HOST_WIDE_INT alignment
4302 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4304 #ifdef PUSH_ROUNDING
4305 /* If PUSH_ROUNDING is defined, it is possible for the
4306 stack to be momentarily aligned only to that amount,
4307 so we pick the least alignment. */
4308 if (x == stack_pointer_rtx && PUSH_ARGS)
4309 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4310 alignment);
4311 #endif
4313 nonzero &= ~(alignment - 1);
4317 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4318 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4319 known_mode, known_ret,
4320 &nonzero_for_hook);
4322 if (new_rtx)
4323 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4324 known_mode, known_ret);
4326 return nonzero_for_hook;
4329 case CONST_INT:
4330 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4331 /* If X is negative in MODE, sign-extend the value. */
4332 if (INTVAL (x) > 0
4333 && mode_width < BITS_PER_WORD
4334 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4335 != 0)
4336 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4337 #endif
4339 return UINTVAL (x);
4341 case MEM:
4342 #ifdef LOAD_EXTEND_OP
4343 /* In many, if not most, RISC machines, reading a byte from memory
4344 zeros the rest of the register. Noticing that fact saves a lot
4345 of extra zero-extends. */
4346 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4347 nonzero &= GET_MODE_MASK (GET_MODE (x));
4348 #endif
4349 break;
4351 case EQ: case NE:
4352 case UNEQ: case LTGT:
4353 case GT: case GTU: case UNGT:
4354 case LT: case LTU: case UNLT:
4355 case GE: case GEU: case UNGE:
4356 case LE: case LEU: case UNLE:
4357 case UNORDERED: case ORDERED:
4358 /* If this produces an integer result, we know which bits are set.
4359 Code here used to clear bits outside the mode of X, but that is
4360 now done above. */
4361 /* Mind that MODE is the mode the caller wants to look at this
4362 operation in, and not the actual operation mode. We can wind
4363 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4364 that describes the results of a vector compare. */
4365 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4366 && mode_width <= HOST_BITS_PER_WIDE_INT)
4367 nonzero = STORE_FLAG_VALUE;
4368 break;
4370 case NEG:
4371 #if 0
4372 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4373 and num_sign_bit_copies. */
4374 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4375 == GET_MODE_PRECISION (GET_MODE (x)))
4376 nonzero = 1;
4377 #endif
4379 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4380 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4381 break;
4383 case ABS:
4384 #if 0
4385 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4386 and num_sign_bit_copies. */
4387 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4388 == GET_MODE_PRECISION (GET_MODE (x)))
4389 nonzero = 1;
4390 #endif
4391 break;
4393 case TRUNCATE:
4394 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4395 known_x, known_mode, known_ret)
4396 & GET_MODE_MASK (mode));
4397 break;
4399 case ZERO_EXTEND:
4400 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4401 known_x, known_mode, known_ret);
4402 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4403 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4404 break;
4406 case SIGN_EXTEND:
4407 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4408 Otherwise, show all the bits in the outer mode but not the inner
4409 may be nonzero. */
4410 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4411 known_x, known_mode, known_ret);
4412 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4414 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4415 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4416 inner_nz |= (GET_MODE_MASK (mode)
4417 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4420 nonzero &= inner_nz;
4421 break;
4423 case AND:
4424 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4425 known_x, known_mode, known_ret)
4426 & cached_nonzero_bits (XEXP (x, 1), mode,
4427 known_x, known_mode, known_ret);
4428 break;
4430 case XOR: case IOR:
4431 case UMIN: case UMAX: case SMIN: case SMAX:
4433 unsigned HOST_WIDE_INT nonzero0
4434 = cached_nonzero_bits (XEXP (x, 0), mode,
4435 known_x, known_mode, known_ret);
4437 /* Don't call nonzero_bits for the second time if it cannot change
4438 anything. */
4439 if ((nonzero & nonzero0) != nonzero)
4440 nonzero &= nonzero0
4441 | cached_nonzero_bits (XEXP (x, 1), mode,
4442 known_x, known_mode, known_ret);
4444 break;
4446 case PLUS: case MINUS:
4447 case MULT:
4448 case DIV: case UDIV:
4449 case MOD: case UMOD:
4450 /* We can apply the rules of arithmetic to compute the number of
4451 high- and low-order zero bits of these operations. We start by
4452 computing the width (position of the highest-order nonzero bit)
4453 and the number of low-order zero bits for each value. */
4455 unsigned HOST_WIDE_INT nz0
4456 = cached_nonzero_bits (XEXP (x, 0), mode,
4457 known_x, known_mode, known_ret);
4458 unsigned HOST_WIDE_INT nz1
4459 = cached_nonzero_bits (XEXP (x, 1), mode,
4460 known_x, known_mode, known_ret);
4461 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4462 int width0 = floor_log2 (nz0) + 1;
4463 int width1 = floor_log2 (nz1) + 1;
4464 int low0 = floor_log2 (nz0 & -nz0);
4465 int low1 = floor_log2 (nz1 & -nz1);
4466 unsigned HOST_WIDE_INT op0_maybe_minusp
4467 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4468 unsigned HOST_WIDE_INT op1_maybe_minusp
4469 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4470 unsigned int result_width = mode_width;
4471 int result_low = 0;
4473 switch (code)
4475 case PLUS:
4476 result_width = MAX (width0, width1) + 1;
4477 result_low = MIN (low0, low1);
4478 break;
4479 case MINUS:
4480 result_low = MIN (low0, low1);
4481 break;
4482 case MULT:
4483 result_width = width0 + width1;
4484 result_low = low0 + low1;
4485 break;
4486 case DIV:
4487 if (width1 == 0)
4488 break;
4489 if (!op0_maybe_minusp && !op1_maybe_minusp)
4490 result_width = width0;
4491 break;
4492 case UDIV:
4493 if (width1 == 0)
4494 break;
4495 result_width = width0;
4496 break;
4497 case MOD:
4498 if (width1 == 0)
4499 break;
4500 if (!op0_maybe_minusp && !op1_maybe_minusp)
4501 result_width = MIN (width0, width1);
4502 result_low = MIN (low0, low1);
4503 break;
4504 case UMOD:
4505 if (width1 == 0)
4506 break;
4507 result_width = MIN (width0, width1);
4508 result_low = MIN (low0, low1);
4509 break;
4510 default:
4511 gcc_unreachable ();
4514 if (result_width < mode_width)
4515 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4517 if (result_low > 0)
4518 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4520 break;
4522 case ZERO_EXTRACT:
4523 if (CONST_INT_P (XEXP (x, 1))
4524 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4525 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4526 break;
4528 case SUBREG:
4529 /* If this is a SUBREG formed for a promoted variable that has
4530 been zero-extended, we know that at least the high-order bits
4531 are zero, though others might be too. */
4533 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4534 nonzero = GET_MODE_MASK (GET_MODE (x))
4535 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4536 known_x, known_mode, known_ret);
4538 inner_mode = GET_MODE (SUBREG_REG (x));
4539 /* If the inner mode is a single word for both the host and target
4540 machines, we can compute this from which bits of the inner
4541 object might be nonzero. */
4542 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4543 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4545 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4546 known_x, known_mode, known_ret);
4548 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4549 /* If this is a typical RISC machine, we only have to worry
4550 about the way loads are extended. */
4551 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4552 ? val_signbit_known_set_p (inner_mode, nonzero)
4553 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4554 || !MEM_P (SUBREG_REG (x)))
4555 #endif
4557 /* On many CISC machines, accessing an object in a wider mode
4558 causes the high-order bits to become undefined. So they are
4559 not known to be zero. */
4560 if (GET_MODE_PRECISION (GET_MODE (x))
4561 > GET_MODE_PRECISION (inner_mode))
4562 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4563 & ~GET_MODE_MASK (inner_mode));
4566 break;
4568 case ASHIFTRT:
4569 case LSHIFTRT:
4570 case ASHIFT:
4571 case ROTATE:
4572 /* The nonzero bits are in two classes: any bits within MODE
4573 that aren't in GET_MODE (x) are always significant. The rest of the
4574 nonzero bits are those that are significant in the operand of
4575 the shift when shifted the appropriate number of bits. This
4576 shows that high-order bits are cleared by the right shift and
4577 low-order bits by left shifts. */
4578 if (CONST_INT_P (XEXP (x, 1))
4579 && INTVAL (XEXP (x, 1)) >= 0
4580 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4581 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4583 machine_mode inner_mode = GET_MODE (x);
4584 unsigned int width = GET_MODE_PRECISION (inner_mode);
4585 int count = INTVAL (XEXP (x, 1));
4586 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4587 unsigned HOST_WIDE_INT op_nonzero
4588 = cached_nonzero_bits (XEXP (x, 0), mode,
4589 known_x, known_mode, known_ret);
4590 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4591 unsigned HOST_WIDE_INT outer = 0;
4593 if (mode_width > width)
4594 outer = (op_nonzero & nonzero & ~mode_mask);
4596 if (code == LSHIFTRT)
4597 inner >>= count;
4598 else if (code == ASHIFTRT)
4600 inner >>= count;
4602 /* If the sign bit may have been nonzero before the shift, we
4603 need to mark all the places it could have been copied to
4604 by the shift as possibly nonzero. */
4605 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4606 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4607 << (width - count);
4609 else if (code == ASHIFT)
4610 inner <<= count;
4611 else
4612 inner = ((inner << (count % width)
4613 | (inner >> (width - (count % width)))) & mode_mask);
4615 nonzero &= (outer | inner);
4617 break;
4619 case FFS:
4620 case POPCOUNT:
4621 /* This is at most the number of bits in the mode. */
4622 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4623 break;
4625 case CLZ:
4626 /* If CLZ has a known value at zero, then the nonzero bits are
4627 that value, plus the number of bits in the mode minus one. */
4628 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4629 nonzero
4630 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4631 else
4632 nonzero = -1;
4633 break;
4635 case CTZ:
4636 /* If CTZ has a known value at zero, then the nonzero bits are
4637 that value, plus the number of bits in the mode minus one. */
4638 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4639 nonzero
4640 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4641 else
4642 nonzero = -1;
4643 break;
4645 case CLRSB:
4646 /* This is at most the number of bits in the mode minus 1. */
4647 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4648 break;
4650 case PARITY:
4651 nonzero = 1;
4652 break;
4654 case IF_THEN_ELSE:
4656 unsigned HOST_WIDE_INT nonzero_true
4657 = cached_nonzero_bits (XEXP (x, 1), mode,
4658 known_x, known_mode, known_ret);
4660 /* Don't call nonzero_bits for the second time if it cannot change
4661 anything. */
4662 if ((nonzero & nonzero_true) != nonzero)
4663 nonzero &= nonzero_true
4664 | cached_nonzero_bits (XEXP (x, 2), mode,
4665 known_x, known_mode, known_ret);
4667 break;
4669 default:
4670 break;
4673 return nonzero;
4676 /* See the macro definition above. */
4677 #undef cached_num_sign_bit_copies
4680 /* The function cached_num_sign_bit_copies is a wrapper around
4681 num_sign_bit_copies1. It avoids exponential behavior in
4682 num_sign_bit_copies1 when X has identical subexpressions on the
4683 first or the second level. */
4685 static unsigned int
4686 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4687 machine_mode known_mode,
4688 unsigned int known_ret)
4690 if (x == known_x && mode == known_mode)
4691 return known_ret;
4693 /* Try to find identical subexpressions. If found call
4694 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4695 the precomputed value for the subexpression as KNOWN_RET. */
4697 if (ARITHMETIC_P (x))
4699 rtx x0 = XEXP (x, 0);
4700 rtx x1 = XEXP (x, 1);
4702 /* Check the first level. */
4703 if (x0 == x1)
4704 return
4705 num_sign_bit_copies1 (x, mode, x0, mode,
4706 cached_num_sign_bit_copies (x0, mode, known_x,
4707 known_mode,
4708 known_ret));
4710 /* Check the second level. */
4711 if (ARITHMETIC_P (x0)
4712 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4713 return
4714 num_sign_bit_copies1 (x, mode, x1, mode,
4715 cached_num_sign_bit_copies (x1, mode, known_x,
4716 known_mode,
4717 known_ret));
4719 if (ARITHMETIC_P (x1)
4720 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4721 return
4722 num_sign_bit_copies1 (x, mode, x0, mode,
4723 cached_num_sign_bit_copies (x0, mode, known_x,
4724 known_mode,
4725 known_ret));
4728 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4731 /* Return the number of bits at the high-order end of X that are known to
4732 be equal to the sign bit. X will be used in mode MODE; if MODE is
4733 VOIDmode, X will be used in its own mode. The returned value will always
4734 be between 1 and the number of bits in MODE. */
4736 static unsigned int
4737 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4738 machine_mode known_mode,
4739 unsigned int known_ret)
4741 enum rtx_code code = GET_CODE (x);
4742 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4743 int num0, num1, result;
4744 unsigned HOST_WIDE_INT nonzero;
4746 /* If we weren't given a mode, use the mode of X. If the mode is still
4747 VOIDmode, we don't know anything. Likewise if one of the modes is
4748 floating-point. */
4750 if (mode == VOIDmode)
4751 mode = GET_MODE (x);
4753 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4754 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4755 return 1;
4757 /* For a smaller object, just ignore the high bits. */
4758 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4760 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4761 known_x, known_mode, known_ret);
4762 return MAX (1,
4763 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4766 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4768 #ifndef WORD_REGISTER_OPERATIONS
4769 /* If this machine does not do all register operations on the entire
4770 register and MODE is wider than the mode of X, we can say nothing
4771 at all about the high-order bits. */
4772 return 1;
4773 #else
4774 /* Likewise on machines that do, if the mode of the object is smaller
4775 than a word and loads of that size don't sign extend, we can say
4776 nothing about the high order bits. */
4777 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4778 #ifdef LOAD_EXTEND_OP
4779 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4780 #endif
4782 return 1;
4783 #endif
4786 switch (code)
4788 case REG:
4790 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4791 /* If pointers extend signed and this is a pointer in Pmode, say that
4792 all the bits above ptr_mode are known to be sign bit copies. */
4793 /* As we do not know which address space the pointer is referring to,
4794 we can do this only if the target does not support different pointer
4795 or address modes depending on the address space. */
4796 if (target_default_pointer_address_modes_p ()
4797 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4798 && mode == Pmode && REG_POINTER (x))
4799 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4800 #endif
4803 unsigned int copies_for_hook = 1, copies = 1;
4804 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4805 known_mode, known_ret,
4806 &copies_for_hook);
4808 if (new_rtx)
4809 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4810 known_mode, known_ret);
4812 if (copies > 1 || copies_for_hook > 1)
4813 return MAX (copies, copies_for_hook);
4815 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4817 break;
4819 case MEM:
4820 #ifdef LOAD_EXTEND_OP
4821 /* Some RISC machines sign-extend all loads of smaller than a word. */
4822 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4823 return MAX (1, ((int) bitwidth
4824 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4825 #endif
4826 break;
4828 case CONST_INT:
4829 /* If the constant is negative, take its 1's complement and remask.
4830 Then see how many zero bits we have. */
4831 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4832 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4833 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4834 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4836 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4838 case SUBREG:
4839 /* If this is a SUBREG for a promoted object that is sign-extended
4840 and we are looking at it in a wider mode, we know that at least the
4841 high-order bits are known to be sign bit copies. */
4843 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4845 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4846 known_x, known_mode, known_ret);
4847 return MAX ((int) bitwidth
4848 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4849 num0);
4852 /* For a smaller object, just ignore the high bits. */
4853 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4855 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4856 known_x, known_mode, known_ret);
4857 return MAX (1, (num0
4858 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4859 - bitwidth)));
4862 #ifdef WORD_REGISTER_OPERATIONS
4863 #ifdef LOAD_EXTEND_OP
4864 /* For paradoxical SUBREGs on machines where all register operations
4865 affect the entire register, just look inside. Note that we are
4866 passing MODE to the recursive call, so the number of sign bit copies
4867 will remain relative to that mode, not the inner mode. */
4869 /* This works only if loads sign extend. Otherwise, if we get a
4870 reload for the inner part, it may be loaded from the stack, and
4871 then we lose all sign bit copies that existed before the store
4872 to the stack. */
4874 if (paradoxical_subreg_p (x)
4875 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4876 && MEM_P (SUBREG_REG (x)))
4877 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4878 known_x, known_mode, known_ret);
4879 #endif
4880 #endif
4881 break;
4883 case SIGN_EXTRACT:
4884 if (CONST_INT_P (XEXP (x, 1)))
4885 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4886 break;
4888 case SIGN_EXTEND:
4889 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4890 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4891 known_x, known_mode, known_ret));
4893 case TRUNCATE:
4894 /* For a smaller object, just ignore the high bits. */
4895 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4896 known_x, known_mode, known_ret);
4897 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4898 - bitwidth)));
4900 case NOT:
4901 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4902 known_x, known_mode, known_ret);
4904 case ROTATE: case ROTATERT:
4905 /* If we are rotating left by a number of bits less than the number
4906 of sign bit copies, we can just subtract that amount from the
4907 number. */
4908 if (CONST_INT_P (XEXP (x, 1))
4909 && INTVAL (XEXP (x, 1)) >= 0
4910 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4912 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4913 known_x, known_mode, known_ret);
4914 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4915 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4917 break;
4919 case NEG:
4920 /* In general, this subtracts one sign bit copy. But if the value
4921 is known to be positive, the number of sign bit copies is the
4922 same as that of the input. Finally, if the input has just one bit
4923 that might be nonzero, all the bits are copies of the sign bit. */
4924 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4925 known_x, known_mode, known_ret);
4926 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4927 return num0 > 1 ? num0 - 1 : 1;
4929 nonzero = nonzero_bits (XEXP (x, 0), mode);
4930 if (nonzero == 1)
4931 return bitwidth;
4933 if (num0 > 1
4934 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4935 num0--;
4937 return num0;
4939 case IOR: case AND: case XOR:
4940 case SMIN: case SMAX: case UMIN: case UMAX:
4941 /* Logical operations will preserve the number of sign-bit copies.
4942 MIN and MAX operations always return one of the operands. */
4943 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4944 known_x, known_mode, known_ret);
4945 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4946 known_x, known_mode, known_ret);
4948 /* If num1 is clearing some of the top bits then regardless of
4949 the other term, we are guaranteed to have at least that many
4950 high-order zero bits. */
4951 if (code == AND
4952 && num1 > 1
4953 && bitwidth <= HOST_BITS_PER_WIDE_INT
4954 && CONST_INT_P (XEXP (x, 1))
4955 && (UINTVAL (XEXP (x, 1))
4956 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4957 return num1;
4959 /* Similarly for IOR when setting high-order bits. */
4960 if (code == IOR
4961 && num1 > 1
4962 && bitwidth <= HOST_BITS_PER_WIDE_INT
4963 && CONST_INT_P (XEXP (x, 1))
4964 && (UINTVAL (XEXP (x, 1))
4965 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4966 return num1;
4968 return MIN (num0, num1);
4970 case PLUS: case MINUS:
4971 /* For addition and subtraction, we can have a 1-bit carry. However,
4972 if we are subtracting 1 from a positive number, there will not
4973 be such a carry. Furthermore, if the positive number is known to
4974 be 0 or 1, we know the result is either -1 or 0. */
4976 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4977 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4979 nonzero = nonzero_bits (XEXP (x, 0), mode);
4980 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4981 return (nonzero == 1 || nonzero == 0 ? bitwidth
4982 : bitwidth - floor_log2 (nonzero) - 1);
4985 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4986 known_x, known_mode, known_ret);
4987 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4988 known_x, known_mode, known_ret);
4989 result = MAX (1, MIN (num0, num1) - 1);
4991 return result;
4993 case MULT:
4994 /* The number of bits of the product is the sum of the number of
4995 bits of both terms. However, unless one of the terms if known
4996 to be positive, we must allow for an additional bit since negating
4997 a negative number can remove one sign bit copy. */
4999 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5000 known_x, known_mode, known_ret);
5001 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5002 known_x, known_mode, known_ret);
5004 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5005 if (result > 0
5006 && (bitwidth > HOST_BITS_PER_WIDE_INT
5007 || (((nonzero_bits (XEXP (x, 0), mode)
5008 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5009 && ((nonzero_bits (XEXP (x, 1), mode)
5010 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
5011 != 0))))
5012 result--;
5014 return MAX (1, result);
5016 case UDIV:
5017 /* The result must be <= the first operand. If the first operand
5018 has the high bit set, we know nothing about the number of sign
5019 bit copies. */
5020 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5021 return 1;
5022 else if ((nonzero_bits (XEXP (x, 0), mode)
5023 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5024 return 1;
5025 else
5026 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5027 known_x, known_mode, known_ret);
5029 case UMOD:
5030 /* The result must be <= the second operand. If the second operand
5031 has (or just might have) the high bit set, we know nothing about
5032 the number of sign bit copies. */
5033 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5034 return 1;
5035 else if ((nonzero_bits (XEXP (x, 1), mode)
5036 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5037 return 1;
5038 else
5039 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5040 known_x, known_mode, known_ret);
5042 case DIV:
5043 /* Similar to unsigned division, except that we have to worry about
5044 the case where the divisor is negative, in which case we have
5045 to add 1. */
5046 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5047 known_x, known_mode, known_ret);
5048 if (result > 1
5049 && (bitwidth > HOST_BITS_PER_WIDE_INT
5050 || (nonzero_bits (XEXP (x, 1), mode)
5051 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5052 result--;
5054 return result;
5056 case MOD:
5057 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5058 known_x, known_mode, known_ret);
5059 if (result > 1
5060 && (bitwidth > HOST_BITS_PER_WIDE_INT
5061 || (nonzero_bits (XEXP (x, 1), mode)
5062 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5063 result--;
5065 return result;
5067 case ASHIFTRT:
5068 /* Shifts by a constant add to the number of bits equal to the
5069 sign bit. */
5070 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5071 known_x, known_mode, known_ret);
5072 if (CONST_INT_P (XEXP (x, 1))
5073 && INTVAL (XEXP (x, 1)) > 0
5074 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5075 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5077 return num0;
5079 case ASHIFT:
5080 /* Left shifts destroy copies. */
5081 if (!CONST_INT_P (XEXP (x, 1))
5082 || INTVAL (XEXP (x, 1)) < 0
5083 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5084 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5085 return 1;
5087 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5088 known_x, known_mode, known_ret);
5089 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5091 case IF_THEN_ELSE:
5092 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5093 known_x, known_mode, known_ret);
5094 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5095 known_x, known_mode, known_ret);
5096 return MIN (num0, num1);
5098 case EQ: case NE: case GE: case GT: case LE: case LT:
5099 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5100 case GEU: case GTU: case LEU: case LTU:
5101 case UNORDERED: case ORDERED:
5102 /* If the constant is negative, take its 1's complement and remask.
5103 Then see how many zero bits we have. */
5104 nonzero = STORE_FLAG_VALUE;
5105 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5106 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5107 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5109 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5111 default:
5112 break;
5115 /* If we haven't been able to figure it out by one of the above rules,
5116 see if some of the high-order bits are known to be zero. If so,
5117 count those bits and return one less than that amount. If we can't
5118 safely compute the mask for this mode, always return BITWIDTH. */
5120 bitwidth = GET_MODE_PRECISION (mode);
5121 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5122 return 1;
5124 nonzero = nonzero_bits (x, mode);
5125 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
5126 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5129 /* Calculate the rtx_cost of a single instruction. A return value of
5130 zero indicates an instruction pattern without a known cost. */
5133 insn_rtx_cost (rtx pat, bool speed)
5135 int i, cost;
5136 rtx set;
5138 /* Extract the single set rtx from the instruction pattern.
5139 We can't use single_set since we only have the pattern. */
5140 if (GET_CODE (pat) == SET)
5141 set = pat;
5142 else if (GET_CODE (pat) == PARALLEL)
5144 set = NULL_RTX;
5145 for (i = 0; i < XVECLEN (pat, 0); i++)
5147 rtx x = XVECEXP (pat, 0, i);
5148 if (GET_CODE (x) == SET)
5150 if (set)
5151 return 0;
5152 set = x;
5155 if (!set)
5156 return 0;
5158 else
5159 return 0;
5161 cost = set_src_cost (SET_SRC (set), speed);
5162 return cost > 0 ? cost : COSTS_N_INSNS (1);
5165 /* Returns estimate on cost of computing SEQ. */
5167 unsigned
5168 seq_cost (const rtx_insn *seq, bool speed)
5170 unsigned cost = 0;
5171 rtx set;
5173 for (; seq; seq = NEXT_INSN (seq))
5175 set = single_set (seq);
5176 if (set)
5177 cost += set_rtx_cost (set, speed);
5178 else
5179 cost++;
5182 return cost;
5185 /* Given an insn INSN and condition COND, return the condition in a
5186 canonical form to simplify testing by callers. Specifically:
5188 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5189 (2) Both operands will be machine operands; (cc0) will have been replaced.
5190 (3) If an operand is a constant, it will be the second operand.
5191 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5192 for GE, GEU, and LEU.
5194 If the condition cannot be understood, or is an inequality floating-point
5195 comparison which needs to be reversed, 0 will be returned.
5197 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5199 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5200 insn used in locating the condition was found. If a replacement test
5201 of the condition is desired, it should be placed in front of that
5202 insn and we will be sure that the inputs are still valid.
5204 If WANT_REG is nonzero, we wish the condition to be relative to that
5205 register, if possible. Therefore, do not canonicalize the condition
5206 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5207 to be a compare to a CC mode register.
5209 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5210 and at INSN. */
5213 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5214 rtx_insn **earliest,
5215 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5217 enum rtx_code code;
5218 rtx_insn *prev = insn;
5219 const_rtx set;
5220 rtx tem;
5221 rtx op0, op1;
5222 int reverse_code = 0;
5223 machine_mode mode;
5224 basic_block bb = BLOCK_FOR_INSN (insn);
5226 code = GET_CODE (cond);
5227 mode = GET_MODE (cond);
5228 op0 = XEXP (cond, 0);
5229 op1 = XEXP (cond, 1);
5231 if (reverse)
5232 code = reversed_comparison_code (cond, insn);
5233 if (code == UNKNOWN)
5234 return 0;
5236 if (earliest)
5237 *earliest = insn;
5239 /* If we are comparing a register with zero, see if the register is set
5240 in the previous insn to a COMPARE or a comparison operation. Perform
5241 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5242 in cse.c */
5244 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5245 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5246 && op1 == CONST0_RTX (GET_MODE (op0))
5247 && op0 != want_reg)
5249 /* Set nonzero when we find something of interest. */
5250 rtx x = 0;
5252 /* If comparison with cc0, import actual comparison from compare
5253 insn. */
5254 if (op0 == cc0_rtx)
5256 if ((prev = prev_nonnote_insn (prev)) == 0
5257 || !NONJUMP_INSN_P (prev)
5258 || (set = single_set (prev)) == 0
5259 || SET_DEST (set) != cc0_rtx)
5260 return 0;
5262 op0 = SET_SRC (set);
5263 op1 = CONST0_RTX (GET_MODE (op0));
5264 if (earliest)
5265 *earliest = prev;
5268 /* If this is a COMPARE, pick up the two things being compared. */
5269 if (GET_CODE (op0) == COMPARE)
5271 op1 = XEXP (op0, 1);
5272 op0 = XEXP (op0, 0);
5273 continue;
5275 else if (!REG_P (op0))
5276 break;
5278 /* Go back to the previous insn. Stop if it is not an INSN. We also
5279 stop if it isn't a single set or if it has a REG_INC note because
5280 we don't want to bother dealing with it. */
5282 prev = prev_nonnote_nondebug_insn (prev);
5284 if (prev == 0
5285 || !NONJUMP_INSN_P (prev)
5286 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5287 /* In cfglayout mode, there do not have to be labels at the
5288 beginning of a block, or jumps at the end, so the previous
5289 conditions would not stop us when we reach bb boundary. */
5290 || BLOCK_FOR_INSN (prev) != bb)
5291 break;
5293 set = set_of (op0, prev);
5295 if (set
5296 && (GET_CODE (set) != SET
5297 || !rtx_equal_p (SET_DEST (set), op0)))
5298 break;
5300 /* If this is setting OP0, get what it sets it to if it looks
5301 relevant. */
5302 if (set)
5304 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5305 #ifdef FLOAT_STORE_FLAG_VALUE
5306 REAL_VALUE_TYPE fsfv;
5307 #endif
5309 /* ??? We may not combine comparisons done in a CCmode with
5310 comparisons not done in a CCmode. This is to aid targets
5311 like Alpha that have an IEEE compliant EQ instruction, and
5312 a non-IEEE compliant BEQ instruction. The use of CCmode is
5313 actually artificial, simply to prevent the combination, but
5314 should not affect other platforms.
5316 However, we must allow VOIDmode comparisons to match either
5317 CCmode or non-CCmode comparison, because some ports have
5318 modeless comparisons inside branch patterns.
5320 ??? This mode check should perhaps look more like the mode check
5321 in simplify_comparison in combine. */
5322 if (((GET_MODE_CLASS (mode) == MODE_CC)
5323 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5324 && mode != VOIDmode
5325 && inner_mode != VOIDmode)
5326 break;
5327 if (GET_CODE (SET_SRC (set)) == COMPARE
5328 || (((code == NE
5329 || (code == LT
5330 && val_signbit_known_set_p (inner_mode,
5331 STORE_FLAG_VALUE))
5332 #ifdef FLOAT_STORE_FLAG_VALUE
5333 || (code == LT
5334 && SCALAR_FLOAT_MODE_P (inner_mode)
5335 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5336 REAL_VALUE_NEGATIVE (fsfv)))
5337 #endif
5339 && COMPARISON_P (SET_SRC (set))))
5340 x = SET_SRC (set);
5341 else if (((code == EQ
5342 || (code == GE
5343 && val_signbit_known_set_p (inner_mode,
5344 STORE_FLAG_VALUE))
5345 #ifdef FLOAT_STORE_FLAG_VALUE
5346 || (code == GE
5347 && SCALAR_FLOAT_MODE_P (inner_mode)
5348 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5349 REAL_VALUE_NEGATIVE (fsfv)))
5350 #endif
5352 && COMPARISON_P (SET_SRC (set)))
5354 reverse_code = 1;
5355 x = SET_SRC (set);
5357 else if ((code == EQ || code == NE)
5358 && GET_CODE (SET_SRC (set)) == XOR)
5359 /* Handle sequences like:
5361 (set op0 (xor X Y))
5362 ...(eq|ne op0 (const_int 0))...
5364 in which case:
5366 (eq op0 (const_int 0)) reduces to (eq X Y)
5367 (ne op0 (const_int 0)) reduces to (ne X Y)
5369 This is the form used by MIPS16, for example. */
5370 x = SET_SRC (set);
5371 else
5372 break;
5375 else if (reg_set_p (op0, prev))
5376 /* If this sets OP0, but not directly, we have to give up. */
5377 break;
5379 if (x)
5381 /* If the caller is expecting the condition to be valid at INSN,
5382 make sure X doesn't change before INSN. */
5383 if (valid_at_insn_p)
5384 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5385 break;
5386 if (COMPARISON_P (x))
5387 code = GET_CODE (x);
5388 if (reverse_code)
5390 code = reversed_comparison_code (x, prev);
5391 if (code == UNKNOWN)
5392 return 0;
5393 reverse_code = 0;
5396 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5397 if (earliest)
5398 *earliest = prev;
5402 /* If constant is first, put it last. */
5403 if (CONSTANT_P (op0))
5404 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5406 /* If OP0 is the result of a comparison, we weren't able to find what
5407 was really being compared, so fail. */
5408 if (!allow_cc_mode
5409 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5410 return 0;
5412 /* Canonicalize any ordered comparison with integers involving equality
5413 if we can do computations in the relevant mode and we do not
5414 overflow. */
5416 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5417 && CONST_INT_P (op1)
5418 && GET_MODE (op0) != VOIDmode
5419 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5421 HOST_WIDE_INT const_val = INTVAL (op1);
5422 unsigned HOST_WIDE_INT uconst_val = const_val;
5423 unsigned HOST_WIDE_INT max_val
5424 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5426 switch (code)
5428 case LE:
5429 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5430 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5431 break;
5433 /* When cross-compiling, const_val might be sign-extended from
5434 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5435 case GE:
5436 if ((const_val & max_val)
5437 != ((unsigned HOST_WIDE_INT) 1
5438 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5439 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5440 break;
5442 case LEU:
5443 if (uconst_val < max_val)
5444 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5445 break;
5447 case GEU:
5448 if (uconst_val != 0)
5449 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5450 break;
5452 default:
5453 break;
5457 /* Never return CC0; return zero instead. */
5458 if (CC0_P (op0))
5459 return 0;
5461 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5464 /* Given a jump insn JUMP, return the condition that will cause it to branch
5465 to its JUMP_LABEL. If the condition cannot be understood, or is an
5466 inequality floating-point comparison which needs to be reversed, 0 will
5467 be returned.
5469 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5470 insn used in locating the condition was found. If a replacement test
5471 of the condition is desired, it should be placed in front of that
5472 insn and we will be sure that the inputs are still valid. If EARLIEST
5473 is null, the returned condition will be valid at INSN.
5475 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5476 compare CC mode register.
5478 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5481 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5482 int valid_at_insn_p)
5484 rtx cond;
5485 int reverse;
5486 rtx set;
5488 /* If this is not a standard conditional jump, we can't parse it. */
5489 if (!JUMP_P (jump)
5490 || ! any_condjump_p (jump))
5491 return 0;
5492 set = pc_set (jump);
5494 cond = XEXP (SET_SRC (set), 0);
5496 /* If this branches to JUMP_LABEL when the condition is false, reverse
5497 the condition. */
5498 reverse
5499 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5500 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5502 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5503 allow_cc_mode, valid_at_insn_p);
5506 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5507 TARGET_MODE_REP_EXTENDED.
5509 Note that we assume that the property of
5510 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5511 narrower than mode B. I.e., if A is a mode narrower than B then in
5512 order to be able to operate on it in mode B, mode A needs to
5513 satisfy the requirements set by the representation of mode B. */
5515 static void
5516 init_num_sign_bit_copies_in_rep (void)
5518 machine_mode mode, in_mode;
5520 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5521 in_mode = GET_MODE_WIDER_MODE (mode))
5522 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5523 mode = GET_MODE_WIDER_MODE (mode))
5525 machine_mode i;
5527 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5528 extends to the next widest mode. */
5529 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5530 || GET_MODE_WIDER_MODE (mode) == in_mode);
5532 /* We are in in_mode. Count how many bits outside of mode
5533 have to be copies of the sign-bit. */
5534 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5536 machine_mode wider = GET_MODE_WIDER_MODE (i);
5538 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5539 /* We can only check sign-bit copies starting from the
5540 top-bit. In order to be able to check the bits we
5541 have already seen we pretend that subsequent bits
5542 have to be sign-bit copies too. */
5543 || num_sign_bit_copies_in_rep [in_mode][mode])
5544 num_sign_bit_copies_in_rep [in_mode][mode]
5545 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5550 /* Suppose that truncation from the machine mode of X to MODE is not a
5551 no-op. See if there is anything special about X so that we can
5552 assume it already contains a truncated value of MODE. */
5554 bool
5555 truncated_to_mode (machine_mode mode, const_rtx x)
5557 /* This register has already been used in MODE without explicit
5558 truncation. */
5559 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5560 return true;
5562 /* See if we already satisfy the requirements of MODE. If yes we
5563 can just switch to MODE. */
5564 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5565 && (num_sign_bit_copies (x, GET_MODE (x))
5566 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5567 return true;
5569 return false;
5572 /* Return true if RTX code CODE has a single sequence of zero or more
5573 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5574 entry in that case. */
5576 static bool
5577 setup_reg_subrtx_bounds (unsigned int code)
5579 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5580 unsigned int i = 0;
5581 for (; format[i] != 'e'; ++i)
5583 if (!format[i])
5584 /* No subrtxes. Leave start and count as 0. */
5585 return true;
5586 if (format[i] == 'E' || format[i] == 'V')
5587 return false;
5590 /* Record the sequence of 'e's. */
5591 rtx_all_subrtx_bounds[code].start = i;
5593 ++i;
5594 while (format[i] == 'e');
5595 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5596 /* rtl-iter.h relies on this. */
5597 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5599 for (; format[i]; ++i)
5600 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5601 return false;
5603 return true;
5606 /* Initialize rtx_all_subrtx_bounds. */
5607 void
5608 init_rtlanal (void)
5610 int i;
5611 for (i = 0; i < NUM_RTX_CODE; i++)
5613 if (!setup_reg_subrtx_bounds (i))
5614 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5615 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5616 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5619 init_num_sign_bit_copies_in_rep ();
5622 /* Check whether this is a constant pool constant. */
5623 bool
5624 constant_pool_constant_p (rtx x)
5626 x = avoid_constant_pool_reference (x);
5627 return CONST_DOUBLE_P (x);
5630 /* If M is a bitmask that selects a field of low-order bits within an item but
5631 not the entire word, return the length of the field. Return -1 otherwise.
5632 M is used in machine mode MODE. */
5635 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5637 if (mode != VOIDmode)
5639 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5640 return -1;
5641 m &= GET_MODE_MASK (mode);
5644 return exact_log2 (m + 1);
5647 /* Return the mode of MEM's address. */
5649 machine_mode
5650 get_address_mode (rtx mem)
5652 machine_mode mode;
5654 gcc_assert (MEM_P (mem));
5655 mode = GET_MODE (XEXP (mem, 0));
5656 if (mode != VOIDmode)
5657 return mode;
5658 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5661 /* Split up a CONST_DOUBLE or integer constant rtx
5662 into two rtx's for single words,
5663 storing in *FIRST the word that comes first in memory in the target
5664 and in *SECOND the other.
5666 TODO: This function needs to be rewritten to work on any size
5667 integer. */
5669 void
5670 split_double (rtx value, rtx *first, rtx *second)
5672 if (CONST_INT_P (value))
5674 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5676 /* In this case the CONST_INT holds both target words.
5677 Extract the bits from it into two word-sized pieces.
5678 Sign extend each half to HOST_WIDE_INT. */
5679 unsigned HOST_WIDE_INT low, high;
5680 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5681 unsigned bits_per_word = BITS_PER_WORD;
5683 /* Set sign_bit to the most significant bit of a word. */
5684 sign_bit = 1;
5685 sign_bit <<= bits_per_word - 1;
5687 /* Set mask so that all bits of the word are set. We could
5688 have used 1 << BITS_PER_WORD instead of basing the
5689 calculation on sign_bit. However, on machines where
5690 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5691 compiler warning, even though the code would never be
5692 executed. */
5693 mask = sign_bit << 1;
5694 mask--;
5696 /* Set sign_extend as any remaining bits. */
5697 sign_extend = ~mask;
5699 /* Pick the lower word and sign-extend it. */
5700 low = INTVAL (value);
5701 low &= mask;
5702 if (low & sign_bit)
5703 low |= sign_extend;
5705 /* Pick the higher word, shifted to the least significant
5706 bits, and sign-extend it. */
5707 high = INTVAL (value);
5708 high >>= bits_per_word - 1;
5709 high >>= 1;
5710 high &= mask;
5711 if (high & sign_bit)
5712 high |= sign_extend;
5714 /* Store the words in the target machine order. */
5715 if (WORDS_BIG_ENDIAN)
5717 *first = GEN_INT (high);
5718 *second = GEN_INT (low);
5720 else
5722 *first = GEN_INT (low);
5723 *second = GEN_INT (high);
5726 else
5728 /* The rule for using CONST_INT for a wider mode
5729 is that we regard the value as signed.
5730 So sign-extend it. */
5731 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5732 if (WORDS_BIG_ENDIAN)
5734 *first = high;
5735 *second = value;
5737 else
5739 *first = value;
5740 *second = high;
5744 else if (GET_CODE (value) == CONST_WIDE_INT)
5746 /* All of this is scary code and needs to be converted to
5747 properly work with any size integer. */
5748 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5749 if (WORDS_BIG_ENDIAN)
5751 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5752 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5754 else
5756 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5757 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5760 else if (!CONST_DOUBLE_P (value))
5762 if (WORDS_BIG_ENDIAN)
5764 *first = const0_rtx;
5765 *second = value;
5767 else
5769 *first = value;
5770 *second = const0_rtx;
5773 else if (GET_MODE (value) == VOIDmode
5774 /* This is the old way we did CONST_DOUBLE integers. */
5775 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5777 /* In an integer, the words are defined as most and least significant.
5778 So order them by the target's convention. */
5779 if (WORDS_BIG_ENDIAN)
5781 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5782 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5784 else
5786 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5787 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5790 else
5792 REAL_VALUE_TYPE r;
5793 long l[2];
5794 REAL_VALUE_FROM_CONST_DOUBLE (r, value);
5796 /* Note, this converts the REAL_VALUE_TYPE to the target's
5797 format, splits up the floating point double and outputs
5798 exactly 32 bits of it into each of l[0] and l[1] --
5799 not necessarily BITS_PER_WORD bits. */
5800 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5802 /* If 32 bits is an entire word for the target, but not for the host,
5803 then sign-extend on the host so that the number will look the same
5804 way on the host that it would on the target. See for instance
5805 simplify_unary_operation. The #if is needed to avoid compiler
5806 warnings. */
5808 #if HOST_BITS_PER_LONG > 32
5809 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5811 if (l[0] & ((long) 1 << 31))
5812 l[0] |= ((long) (-1) << 32);
5813 if (l[1] & ((long) 1 << 31))
5814 l[1] |= ((long) (-1) << 32);
5816 #endif
5818 *first = GEN_INT (l[0]);
5819 *second = GEN_INT (l[1]);
5823 /* Return true if X is a sign_extract or zero_extract from the least
5824 significant bit. */
5826 static bool
5827 lsb_bitfield_op_p (rtx x)
5829 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5831 machine_mode mode = GET_MODE (XEXP (x, 0));
5832 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5833 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5835 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5837 return false;
5840 /* Strip outer address "mutations" from LOC and return a pointer to the
5841 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5842 stripped expression there.
5844 "Mutations" either convert between modes or apply some kind of
5845 extension, truncation or alignment. */
5847 rtx *
5848 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5850 for (;;)
5852 enum rtx_code code = GET_CODE (*loc);
5853 if (GET_RTX_CLASS (code) == RTX_UNARY)
5854 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5855 used to convert between pointer sizes. */
5856 loc = &XEXP (*loc, 0);
5857 else if (lsb_bitfield_op_p (*loc))
5858 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5859 acts as a combined truncation and extension. */
5860 loc = &XEXP (*loc, 0);
5861 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5862 /* (and ... (const_int -X)) is used to align to X bytes. */
5863 loc = &XEXP (*loc, 0);
5864 else if (code == SUBREG
5865 && !OBJECT_P (SUBREG_REG (*loc))
5866 && subreg_lowpart_p (*loc))
5867 /* (subreg (operator ...) ...) inside and is used for mode
5868 conversion too. */
5869 loc = &SUBREG_REG (*loc);
5870 else
5871 return loc;
5872 if (outer_code)
5873 *outer_code = code;
5877 /* Return true if CODE applies some kind of scale. The scaled value is
5878 is the first operand and the scale is the second. */
5880 static bool
5881 binary_scale_code_p (enum rtx_code code)
5883 return (code == MULT
5884 || code == ASHIFT
5885 /* Needed by ARM targets. */
5886 || code == ASHIFTRT
5887 || code == LSHIFTRT
5888 || code == ROTATE
5889 || code == ROTATERT);
5892 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5893 (see address_info). Return null otherwise. */
5895 static rtx *
5896 get_base_term (rtx *inner)
5898 if (GET_CODE (*inner) == LO_SUM)
5899 inner = strip_address_mutations (&XEXP (*inner, 0));
5900 if (REG_P (*inner)
5901 || MEM_P (*inner)
5902 || GET_CODE (*inner) == SUBREG
5903 || GET_CODE (*inner) == SCRATCH)
5904 return inner;
5905 return 0;
5908 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5909 (see address_info). Return null otherwise. */
5911 static rtx *
5912 get_index_term (rtx *inner)
5914 /* At present, only constant scales are allowed. */
5915 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5916 inner = strip_address_mutations (&XEXP (*inner, 0));
5917 if (REG_P (*inner)
5918 || MEM_P (*inner)
5919 || GET_CODE (*inner) == SUBREG
5920 || GET_CODE (*inner) == SCRATCH)
5921 return inner;
5922 return 0;
5925 /* Set the segment part of address INFO to LOC, given that INNER is the
5926 unmutated value. */
5928 static void
5929 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5931 gcc_assert (!info->segment);
5932 info->segment = loc;
5933 info->segment_term = inner;
5936 /* Set the base part of address INFO to LOC, given that INNER is the
5937 unmutated value. */
5939 static void
5940 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5942 gcc_assert (!info->base);
5943 info->base = loc;
5944 info->base_term = inner;
5947 /* Set the index part of address INFO to LOC, given that INNER is the
5948 unmutated value. */
5950 static void
5951 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5953 gcc_assert (!info->index);
5954 info->index = loc;
5955 info->index_term = inner;
5958 /* Set the displacement part of address INFO to LOC, given that INNER
5959 is the constant term. */
5961 static void
5962 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5964 gcc_assert (!info->disp);
5965 info->disp = loc;
5966 info->disp_term = inner;
5969 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5970 rest of INFO accordingly. */
5972 static void
5973 decompose_incdec_address (struct address_info *info)
5975 info->autoinc_p = true;
5977 rtx *base = &XEXP (*info->inner, 0);
5978 set_address_base (info, base, base);
5979 gcc_checking_assert (info->base == info->base_term);
5981 /* These addresses are only valid when the size of the addressed
5982 value is known. */
5983 gcc_checking_assert (info->mode != VOIDmode);
5986 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5987 of INFO accordingly. */
5989 static void
5990 decompose_automod_address (struct address_info *info)
5992 info->autoinc_p = true;
5994 rtx *base = &XEXP (*info->inner, 0);
5995 set_address_base (info, base, base);
5996 gcc_checking_assert (info->base == info->base_term);
5998 rtx plus = XEXP (*info->inner, 1);
5999 gcc_assert (GET_CODE (plus) == PLUS);
6001 info->base_term2 = &XEXP (plus, 0);
6002 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6004 rtx *step = &XEXP (plus, 1);
6005 rtx *inner_step = strip_address_mutations (step);
6006 if (CONSTANT_P (*inner_step))
6007 set_address_disp (info, step, inner_step);
6008 else
6009 set_address_index (info, step, inner_step);
6012 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6013 values in [PTR, END). Return a pointer to the end of the used array. */
6015 static rtx **
6016 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6018 rtx x = *loc;
6019 if (GET_CODE (x) == PLUS)
6021 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6022 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6024 else
6026 gcc_assert (ptr != end);
6027 *ptr++ = loc;
6029 return ptr;
6032 /* Evaluate the likelihood of X being a base or index value, returning
6033 positive if it is likely to be a base, negative if it is likely to be
6034 an index, and 0 if we can't tell. Make the magnitude of the return
6035 value reflect the amount of confidence we have in the answer.
6037 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6039 static int
6040 baseness (rtx x, machine_mode mode, addr_space_t as,
6041 enum rtx_code outer_code, enum rtx_code index_code)
6043 /* Believe *_POINTER unless the address shape requires otherwise. */
6044 if (REG_P (x) && REG_POINTER (x))
6045 return 2;
6046 if (MEM_P (x) && MEM_POINTER (x))
6047 return 2;
6049 if (REG_P (x) && HARD_REGISTER_P (x))
6051 /* X is a hard register. If it only fits one of the base
6052 or index classes, choose that interpretation. */
6053 int regno = REGNO (x);
6054 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6055 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6056 if (base_p != index_p)
6057 return base_p ? 1 : -1;
6059 return 0;
6062 /* INFO->INNER describes a normal, non-automodified address.
6063 Fill in the rest of INFO accordingly. */
6065 static void
6066 decompose_normal_address (struct address_info *info)
6068 /* Treat the address as the sum of up to four values. */
6069 rtx *ops[4];
6070 size_t n_ops = extract_plus_operands (info->inner, ops,
6071 ops + ARRAY_SIZE (ops)) - ops;
6073 /* If there is more than one component, any base component is in a PLUS. */
6074 if (n_ops > 1)
6075 info->base_outer_code = PLUS;
6077 /* Try to classify each sum operand now. Leave those that could be
6078 either a base or an index in OPS. */
6079 rtx *inner_ops[4];
6080 size_t out = 0;
6081 for (size_t in = 0; in < n_ops; ++in)
6083 rtx *loc = ops[in];
6084 rtx *inner = strip_address_mutations (loc);
6085 if (CONSTANT_P (*inner))
6086 set_address_disp (info, loc, inner);
6087 else if (GET_CODE (*inner) == UNSPEC)
6088 set_address_segment (info, loc, inner);
6089 else
6091 /* The only other possibilities are a base or an index. */
6092 rtx *base_term = get_base_term (inner);
6093 rtx *index_term = get_index_term (inner);
6094 gcc_assert (base_term || index_term);
6095 if (!base_term)
6096 set_address_index (info, loc, index_term);
6097 else if (!index_term)
6098 set_address_base (info, loc, base_term);
6099 else
6101 gcc_assert (base_term == index_term);
6102 ops[out] = loc;
6103 inner_ops[out] = base_term;
6104 ++out;
6109 /* Classify the remaining OPS members as bases and indexes. */
6110 if (out == 1)
6112 /* If we haven't seen a base or an index yet, assume that this is
6113 the base. If we were confident that another term was the base
6114 or index, treat the remaining operand as the other kind. */
6115 if (!info->base)
6116 set_address_base (info, ops[0], inner_ops[0]);
6117 else
6118 set_address_index (info, ops[0], inner_ops[0]);
6120 else if (out == 2)
6122 /* In the event of a tie, assume the base comes first. */
6123 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6124 GET_CODE (*ops[1]))
6125 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6126 GET_CODE (*ops[0])))
6128 set_address_base (info, ops[0], inner_ops[0]);
6129 set_address_index (info, ops[1], inner_ops[1]);
6131 else
6133 set_address_base (info, ops[1], inner_ops[1]);
6134 set_address_index (info, ops[0], inner_ops[0]);
6137 else
6138 gcc_assert (out == 0);
6141 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6142 or VOIDmode if not known. AS is the address space associated with LOC.
6143 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6145 void
6146 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6147 addr_space_t as, enum rtx_code outer_code)
6149 memset (info, 0, sizeof (*info));
6150 info->mode = mode;
6151 info->as = as;
6152 info->addr_outer_code = outer_code;
6153 info->outer = loc;
6154 info->inner = strip_address_mutations (loc, &outer_code);
6155 info->base_outer_code = outer_code;
6156 switch (GET_CODE (*info->inner))
6158 case PRE_DEC:
6159 case PRE_INC:
6160 case POST_DEC:
6161 case POST_INC:
6162 decompose_incdec_address (info);
6163 break;
6165 case PRE_MODIFY:
6166 case POST_MODIFY:
6167 decompose_automod_address (info);
6168 break;
6170 default:
6171 decompose_normal_address (info);
6172 break;
6176 /* Describe address operand LOC in INFO. */
6178 void
6179 decompose_lea_address (struct address_info *info, rtx *loc)
6181 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6184 /* Describe the address of MEM X in INFO. */
6186 void
6187 decompose_mem_address (struct address_info *info, rtx x)
6189 gcc_assert (MEM_P (x));
6190 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6191 MEM_ADDR_SPACE (x), MEM);
6194 /* Update INFO after a change to the address it describes. */
6196 void
6197 update_address (struct address_info *info)
6199 decompose_address (info, info->outer, info->mode, info->as,
6200 info->addr_outer_code);
6203 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6204 more complicated than that. */
6206 HOST_WIDE_INT
6207 get_index_scale (const struct address_info *info)
6209 rtx index = *info->index;
6210 if (GET_CODE (index) == MULT
6211 && CONST_INT_P (XEXP (index, 1))
6212 && info->index_term == &XEXP (index, 0))
6213 return INTVAL (XEXP (index, 1));
6215 if (GET_CODE (index) == ASHIFT
6216 && CONST_INT_P (XEXP (index, 1))
6217 && info->index_term == &XEXP (index, 0))
6218 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6220 if (info->index == info->index_term)
6221 return 1;
6223 return 0;
6226 /* Return the "index code" of INFO, in the form required by
6227 ok_for_base_p_1. */
6229 enum rtx_code
6230 get_index_code (const struct address_info *info)
6232 if (info->index)
6233 return GET_CODE (*info->index);
6235 if (info->disp)
6236 return GET_CODE (*info->disp);
6238 return SCRATCH;
6241 /* Return true if X contains a thread-local symbol. */
6243 bool
6244 tls_referenced_p (const_rtx x)
6246 if (!targetm.have_tls)
6247 return false;
6249 subrtx_iterator::array_type array;
6250 FOR_EACH_SUBRTX (iter, array, x, ALL)
6251 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6252 return true;
6253 return false;