2015-10-18 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / rtlanal.c
blob23b2c45e6cfe5c1c285d74f97516d8f870d354d7
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "df.h"
29 #include "diagnostic-core.h"
30 #include "insn-config.h"
31 #include "recog.h"
32 #include "target.h"
33 #include "output.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "regs.h"
37 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
38 #include "addresses.h"
39 #include "rtl-iter.h"
41 /* Forward declarations */
42 static void set_of_1 (rtx, const_rtx, void *);
43 static bool covers_regno_p (const_rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
45 static int computed_jump_p_1 (const_rtx);
46 static void parms_set (rtx, const_rtx, void *);
48 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
49 const_rtx, machine_mode,
50 unsigned HOST_WIDE_INT);
51 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
52 const_rtx, machine_mode,
53 unsigned HOST_WIDE_INT);
54 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
55 machine_mode,
56 unsigned int);
57 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
58 machine_mode, unsigned int);
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
74 DESTINATION. */
76 static unsigned int
77 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
82 template <typename T>
83 typename T::value_type *
84 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
85 value_type *base,
86 size_t i, value_type x)
88 if (base == array.stack)
90 if (i < LOCAL_ELEMS)
92 base[i] = x;
93 return base;
95 gcc_checking_assert (i == LOCAL_ELEMS);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array.heap) <= i)
99 vec_safe_grow (array.heap, i + 1);
100 base = array.heap->address ();
101 memcpy (base, array.stack, sizeof (array.stack));
102 base[LOCAL_ELEMS] = x;
103 return base;
105 unsigned int length = array.heap->length ();
106 if (length > i)
108 gcc_checking_assert (base == array.heap->address ());
109 base[i] = x;
110 return base;
112 else
114 gcc_checking_assert (i == length);
115 vec_safe_push (array.heap, x);
116 return array.heap->address ();
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
123 template <typename T>
124 size_t
125 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
126 value_type *base,
127 size_t end, rtx_type x)
129 enum rtx_code code = GET_CODE (x);
130 const char *format = GET_RTX_FORMAT (code);
131 size_t orig_end = end;
132 if (__builtin_expect (INSN_P (x), false))
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
136 code below. */
137 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
138 if (format[i] == 'e')
140 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
141 if (__builtin_expect (end < LOCAL_ELEMS, true))
142 base[end++] = subx;
143 else
144 base = add_single_to_queue (array, base, end++, subx);
147 else
148 for (int i = 0; format[i]; ++i)
149 if (format[i] == 'e')
151 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
152 if (__builtin_expect (end < LOCAL_ELEMS, true))
153 base[end++] = subx;
154 else
155 base = add_single_to_queue (array, base, end++, subx);
157 else if (format[i] == 'E')
159 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
160 rtx *vec = x->u.fld[i].rt_rtvec->elem;
161 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
162 for (unsigned int j = 0; j < length; j++)
163 base[end++] = T::get_value (vec[j]);
164 else
165 for (unsigned int j = 0; j < length; j++)
166 base = add_single_to_queue (array, base, end++,
167 T::get_value (vec[j]));
168 if (code == SEQUENCE && end == length)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
173 subinstructions. */
174 for (unsigned int j = 0; j < length; j++)
176 typename T::rtx_type x = T::get_rtx (base[j]);
177 if (INSN_P (x))
178 base[j] = T::get_value (PATTERN (x));
181 return end - orig_end;
184 template <typename T>
185 void
186 generic_subrtx_iterator <T>::free_array (array_type &array)
188 vec_free (array.heap);
191 template <typename T>
192 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
194 template class generic_subrtx_iterator <const_rtx_accessor>;
195 template class generic_subrtx_iterator <rtx_var_accessor>;
196 template class generic_subrtx_iterator <rtx_ptr_accessor>;
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
204 rtx_unstable_p (const_rtx x)
206 const RTX_CODE code = GET_CODE (x);
207 int i;
208 const char *fmt;
210 switch (code)
212 case MEM:
213 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
215 case CONST:
216 CASE_CONST_ANY:
217 case SYMBOL_REF:
218 case LABEL_REF:
219 return 0;
221 case REG:
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
226 return 0;
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
231 return 0;
232 return 1;
234 case ASM_OPERANDS:
235 if (MEM_VOLATILE_P (x))
236 return 1;
238 /* Fall through. */
240 default:
241 break;
244 fmt = GET_RTX_FORMAT (code);
245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246 if (fmt[i] == 'e')
248 if (rtx_unstable_p (XEXP (x, i)))
249 return 1;
251 else if (fmt[i] == 'E')
253 int j;
254 for (j = 0; j < XVECLEN (x, i); j++)
255 if (rtx_unstable_p (XVECEXP (x, i, j)))
256 return 1;
259 return 0;
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
269 bool
270 rtx_varies_p (const_rtx x, bool for_alias)
272 RTX_CODE code;
273 int i;
274 const char *fmt;
276 if (!x)
277 return 0;
279 code = GET_CODE (x);
280 switch (code)
282 case MEM:
283 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
285 case CONST:
286 CASE_CONST_ANY:
287 case SYMBOL_REF:
288 case LABEL_REF:
289 return 0;
291 case REG:
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
295 for pseudos. */
296 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
299 return 0;
300 if (x == pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
306 return 0;
307 return 1;
309 case LO_SUM:
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
314 || rtx_varies_p (XEXP (x, 1), for_alias);
316 case ASM_OPERANDS:
317 if (MEM_VOLATILE_P (x))
318 return 1;
320 /* Fall through. */
322 default:
323 break;
326 fmt = GET_RTX_FORMAT (code);
327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328 if (fmt[i] == 'e')
330 if (rtx_varies_p (XEXP (x, i), for_alias))
331 return 1;
333 else if (fmt[i] == 'E')
335 int j;
336 for (j = 0; j < XVECLEN (x, i); j++)
337 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
338 return 1;
341 return 0;
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
346 of the routine. */
348 static HOST_WIDE_INT
349 get_initial_register_offset (int from, int to)
351 #ifdef ELIMINABLE_REGS
352 static const struct elim_table_t
354 const int from;
355 const int to;
356 } table[] = ELIMINABLE_REGS;
357 HOST_WIDE_INT offset1, offset2;
358 unsigned int i, j;
360 if (to == from)
361 return 0;
363 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
364 before the reload pass. We need to give at least
365 an estimation for the resulting frame size. */
366 if (! reload_completed)
368 offset1 = crtl->outgoing_args_size + get_frame_size ();
369 #if !STACK_GROWS_DOWNWARD
370 offset1 = - offset1;
371 #endif
372 if (to == STACK_POINTER_REGNUM)
373 return offset1;
374 else if (from == STACK_POINTER_REGNUM)
375 return - offset1;
376 else
377 return 0;
380 for (i = 0; i < ARRAY_SIZE (table); i++)
381 if (table[i].from == from)
383 if (table[i].to == to)
385 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
386 offset1);
387 return offset1;
389 for (j = 0; j < ARRAY_SIZE (table); j++)
391 if (table[j].to == to
392 && table[j].from == table[i].to)
394 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
395 offset1);
396 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
397 offset2);
398 return offset1 + offset2;
400 if (table[j].from == to
401 && table[j].to == table[i].to)
403 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
404 offset1);
405 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
406 offset2);
407 return offset1 - offset2;
411 else if (table[i].to == from)
413 if (table[i].from == to)
415 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
416 offset1);
417 return - offset1;
419 for (j = 0; j < ARRAY_SIZE (table); j++)
421 if (table[j].to == to
422 && table[j].from == table[i].from)
424 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
425 offset1);
426 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
427 offset2);
428 return - offset1 + offset2;
430 if (table[j].from == to
431 && table[j].to == table[i].from)
433 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
434 offset1);
435 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
436 offset2);
437 return - offset1 - offset2;
442 /* If the requested register combination was not found,
443 try a different more simple combination. */
444 if (from == ARG_POINTER_REGNUM)
445 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
446 else if (to == ARG_POINTER_REGNUM)
447 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
448 else if (from == HARD_FRAME_POINTER_REGNUM)
449 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
450 else if (to == HARD_FRAME_POINTER_REGNUM)
451 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
452 else
453 return 0;
455 #else
456 HOST_WIDE_INT offset;
458 if (to == from)
459 return 0;
461 if (reload_completed)
463 INITIAL_FRAME_POINTER_OFFSET (offset);
465 else
467 offset = crtl->outgoing_args_size + get_frame_size ();
468 #if !STACK_GROWS_DOWNWARD
469 offset = - offset;
470 #endif
473 if (to == STACK_POINTER_REGNUM)
474 return offset;
475 else if (from == STACK_POINTER_REGNUM)
476 return - offset;
477 else
478 return 0;
480 #endif
483 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
484 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
485 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
486 references on strict alignment machines. */
488 static int
489 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
490 machine_mode mode, bool unaligned_mems)
492 enum rtx_code code = GET_CODE (x);
494 /* The offset must be a multiple of the mode size if we are considering
495 unaligned memory references on strict alignment machines. */
496 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
498 HOST_WIDE_INT actual_offset = offset;
500 #ifdef SPARC_STACK_BOUNDARY_HACK
501 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
502 the real alignment of %sp. However, when it does this, the
503 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
504 if (SPARC_STACK_BOUNDARY_HACK
505 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
506 actual_offset -= STACK_POINTER_OFFSET;
507 #endif
509 if (actual_offset % GET_MODE_SIZE (mode) != 0)
510 return 1;
513 switch (code)
515 case SYMBOL_REF:
516 if (SYMBOL_REF_WEAK (x))
517 return 1;
518 if (!CONSTANT_POOL_ADDRESS_P (x))
520 tree decl;
521 HOST_WIDE_INT decl_size;
523 if (offset < 0)
524 return 1;
525 if (size == 0)
526 size = GET_MODE_SIZE (mode);
527 if (size == 0)
528 return offset != 0;
530 /* If the size of the access or of the symbol is unknown,
531 assume the worst. */
532 decl = SYMBOL_REF_DECL (x);
534 /* Else check that the access is in bounds. TODO: restructure
535 expr_size/tree_expr_size/int_expr_size and just use the latter. */
536 if (!decl)
537 decl_size = -1;
538 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
539 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
540 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
541 : -1);
542 else if (TREE_CODE (decl) == STRING_CST)
543 decl_size = TREE_STRING_LENGTH (decl);
544 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
545 decl_size = int_size_in_bytes (TREE_TYPE (decl));
546 else
547 decl_size = -1;
549 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
552 return 0;
554 case LABEL_REF:
555 return 0;
557 case REG:
558 /* Stack references are assumed not to trap, but we need to deal with
559 nonsensical offsets. */
560 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
561 || x == stack_pointer_rtx
562 /* The arg pointer varies if it is not a fixed register. */
563 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
565 #ifdef RED_ZONE_SIZE
566 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
567 #else
568 HOST_WIDE_INT red_zone_size = 0;
569 #endif
570 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
571 / BITS_PER_UNIT;
572 HOST_WIDE_INT low_bound, high_bound;
574 if (size == 0)
575 size = GET_MODE_SIZE (mode);
577 if (x == frame_pointer_rtx)
579 if (FRAME_GROWS_DOWNWARD)
581 high_bound = STARTING_FRAME_OFFSET;
582 low_bound = high_bound - get_frame_size ();
584 else
586 low_bound = STARTING_FRAME_OFFSET;
587 high_bound = low_bound + get_frame_size ();
590 else if (x == hard_frame_pointer_rtx)
592 HOST_WIDE_INT sp_offset
593 = get_initial_register_offset (STACK_POINTER_REGNUM,
594 HARD_FRAME_POINTER_REGNUM);
595 HOST_WIDE_INT ap_offset
596 = get_initial_register_offset (ARG_POINTER_REGNUM,
597 HARD_FRAME_POINTER_REGNUM);
599 #if STACK_GROWS_DOWNWARD
600 low_bound = sp_offset - red_zone_size - stack_boundary;
601 high_bound = ap_offset
602 + FIRST_PARM_OFFSET (current_function_decl)
603 #if !ARGS_GROW_DOWNWARD
604 + crtl->args.size
605 #endif
606 + stack_boundary;
607 #else
608 high_bound = sp_offset + red_zone_size + stack_boundary;
609 low_bound = ap_offset
610 + FIRST_PARM_OFFSET (current_function_decl)
611 #if ARGS_GROW_DOWNWARD
612 - crtl->args.size
613 #endif
614 - stack_boundary;
615 #endif
617 else if (x == stack_pointer_rtx)
619 HOST_WIDE_INT ap_offset
620 = get_initial_register_offset (ARG_POINTER_REGNUM,
621 STACK_POINTER_REGNUM);
623 #if STACK_GROWS_DOWNWARD
624 low_bound = - red_zone_size - stack_boundary;
625 high_bound = ap_offset
626 + FIRST_PARM_OFFSET (current_function_decl)
627 #if !ARGS_GROW_DOWNWARD
628 + crtl->args.size
629 #endif
630 + stack_boundary;
631 #else
632 high_bound = red_zone_size + stack_boundary;
633 low_bound = ap_offset
634 + FIRST_PARM_OFFSET (current_function_decl)
635 #if ARGS_GROW_DOWNWARD
636 - crtl->args.size
637 #endif
638 - stack_boundary;
639 #endif
641 else
643 /* We assume that accesses are safe to at least the
644 next stack boundary.
645 Examples are varargs and __builtin_return_address. */
646 #if ARGS_GROW_DOWNWARD
647 high_bound = FIRST_PARM_OFFSET (current_function_decl)
648 + stack_boundary;
649 low_bound = FIRST_PARM_OFFSET (current_function_decl)
650 - crtl->args.size - stack_boundary;
651 #else
652 low_bound = FIRST_PARM_OFFSET (current_function_decl)
653 - stack_boundary;
654 high_bound = FIRST_PARM_OFFSET (current_function_decl)
655 + crtl->args.size + stack_boundary;
656 #endif
659 if (offset >= low_bound && offset <= high_bound - size)
660 return 0;
661 return 1;
663 /* All of the virtual frame registers are stack references. */
664 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
665 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
666 return 0;
667 return 1;
669 case CONST:
670 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
671 mode, unaligned_mems);
673 case PLUS:
674 /* An address is assumed not to trap if:
675 - it is the pic register plus a constant. */
676 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
677 return 0;
679 /* - or it is an address that can't trap plus a constant integer. */
680 if (CONST_INT_P (XEXP (x, 1))
681 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
682 size, mode, unaligned_mems))
683 return 0;
685 return 1;
687 case LO_SUM:
688 case PRE_MODIFY:
689 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
690 mode, unaligned_mems);
692 case PRE_DEC:
693 case PRE_INC:
694 case POST_DEC:
695 case POST_INC:
696 case POST_MODIFY:
697 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
698 mode, unaligned_mems);
700 default:
701 break;
704 /* If it isn't one of the case above, it can cause a trap. */
705 return 1;
708 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
711 rtx_addr_can_trap_p (const_rtx x)
713 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
716 /* Return true if X is an address that is known to not be zero. */
718 bool
719 nonzero_address_p (const_rtx x)
721 const enum rtx_code code = GET_CODE (x);
723 switch (code)
725 case SYMBOL_REF:
726 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
728 case LABEL_REF:
729 return true;
731 case REG:
732 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
733 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
734 || x == stack_pointer_rtx
735 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
736 return true;
737 /* All of the virtual frame registers are stack references. */
738 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
739 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
740 return true;
741 return false;
743 case CONST:
744 return nonzero_address_p (XEXP (x, 0));
746 case PLUS:
747 /* Handle PIC references. */
748 if (XEXP (x, 0) == pic_offset_table_rtx
749 && CONSTANT_P (XEXP (x, 1)))
750 return true;
751 return false;
753 case PRE_MODIFY:
754 /* Similar to the above; allow positive offsets. Further, since
755 auto-inc is only allowed in memories, the register must be a
756 pointer. */
757 if (CONST_INT_P (XEXP (x, 1))
758 && INTVAL (XEXP (x, 1)) > 0)
759 return true;
760 return nonzero_address_p (XEXP (x, 0));
762 case PRE_INC:
763 /* Similarly. Further, the offset is always positive. */
764 return true;
766 case PRE_DEC:
767 case POST_DEC:
768 case POST_INC:
769 case POST_MODIFY:
770 return nonzero_address_p (XEXP (x, 0));
772 case LO_SUM:
773 return nonzero_address_p (XEXP (x, 1));
775 default:
776 break;
779 /* If it isn't one of the case above, might be zero. */
780 return false;
783 /* Return 1 if X refers to a memory location whose address
784 cannot be compared reliably with constant addresses,
785 or if X refers to a BLKmode memory object.
786 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
787 zero, we are slightly more conservative. */
789 bool
790 rtx_addr_varies_p (const_rtx x, bool for_alias)
792 enum rtx_code code;
793 int i;
794 const char *fmt;
796 if (x == 0)
797 return 0;
799 code = GET_CODE (x);
800 if (code == MEM)
801 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
803 fmt = GET_RTX_FORMAT (code);
804 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
805 if (fmt[i] == 'e')
807 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
808 return 1;
810 else if (fmt[i] == 'E')
812 int j;
813 for (j = 0; j < XVECLEN (x, i); j++)
814 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
815 return 1;
817 return 0;
820 /* Return the CALL in X if there is one. */
823 get_call_rtx_from (rtx x)
825 if (INSN_P (x))
826 x = PATTERN (x);
827 if (GET_CODE (x) == PARALLEL)
828 x = XVECEXP (x, 0, 0);
829 if (GET_CODE (x) == SET)
830 x = SET_SRC (x);
831 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
832 return x;
833 return NULL_RTX;
836 /* Return the value of the integer term in X, if one is apparent;
837 otherwise return 0.
838 Only obvious integer terms are detected.
839 This is used in cse.c with the `related_value' field. */
841 HOST_WIDE_INT
842 get_integer_term (const_rtx x)
844 if (GET_CODE (x) == CONST)
845 x = XEXP (x, 0);
847 if (GET_CODE (x) == MINUS
848 && CONST_INT_P (XEXP (x, 1)))
849 return - INTVAL (XEXP (x, 1));
850 if (GET_CODE (x) == PLUS
851 && CONST_INT_P (XEXP (x, 1)))
852 return INTVAL (XEXP (x, 1));
853 return 0;
856 /* If X is a constant, return the value sans apparent integer term;
857 otherwise return 0.
858 Only obvious integer terms are detected. */
861 get_related_value (const_rtx x)
863 if (GET_CODE (x) != CONST)
864 return 0;
865 x = XEXP (x, 0);
866 if (GET_CODE (x) == PLUS
867 && CONST_INT_P (XEXP (x, 1)))
868 return XEXP (x, 0);
869 else if (GET_CODE (x) == MINUS
870 && CONST_INT_P (XEXP (x, 1)))
871 return XEXP (x, 0);
872 return 0;
875 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
876 to somewhere in the same object or object_block as SYMBOL. */
878 bool
879 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
881 tree decl;
883 if (GET_CODE (symbol) != SYMBOL_REF)
884 return false;
886 if (offset == 0)
887 return true;
889 if (offset > 0)
891 if (CONSTANT_POOL_ADDRESS_P (symbol)
892 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
893 return true;
895 decl = SYMBOL_REF_DECL (symbol);
896 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
897 return true;
900 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
901 && SYMBOL_REF_BLOCK (symbol)
902 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
903 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
904 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
905 return true;
907 return false;
910 /* Split X into a base and a constant offset, storing them in *BASE_OUT
911 and *OFFSET_OUT respectively. */
913 void
914 split_const (rtx x, rtx *base_out, rtx *offset_out)
916 if (GET_CODE (x) == CONST)
918 x = XEXP (x, 0);
919 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
921 *base_out = XEXP (x, 0);
922 *offset_out = XEXP (x, 1);
923 return;
926 *base_out = x;
927 *offset_out = const0_rtx;
930 /* Return the number of places FIND appears within X. If COUNT_DEST is
931 zero, we do not count occurrences inside the destination of a SET. */
934 count_occurrences (const_rtx x, const_rtx find, int count_dest)
936 int i, j;
937 enum rtx_code code;
938 const char *format_ptr;
939 int count;
941 if (x == find)
942 return 1;
944 code = GET_CODE (x);
946 switch (code)
948 case REG:
949 CASE_CONST_ANY:
950 case SYMBOL_REF:
951 case CODE_LABEL:
952 case PC:
953 case CC0:
954 return 0;
956 case EXPR_LIST:
957 count = count_occurrences (XEXP (x, 0), find, count_dest);
958 if (XEXP (x, 1))
959 count += count_occurrences (XEXP (x, 1), find, count_dest);
960 return count;
962 case MEM:
963 if (MEM_P (find) && rtx_equal_p (x, find))
964 return 1;
965 break;
967 case SET:
968 if (SET_DEST (x) == find && ! count_dest)
969 return count_occurrences (SET_SRC (x), find, count_dest);
970 break;
972 default:
973 break;
976 format_ptr = GET_RTX_FORMAT (code);
977 count = 0;
979 for (i = 0; i < GET_RTX_LENGTH (code); i++)
981 switch (*format_ptr++)
983 case 'e':
984 count += count_occurrences (XEXP (x, i), find, count_dest);
985 break;
987 case 'E':
988 for (j = 0; j < XVECLEN (x, i); j++)
989 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
990 break;
993 return count;
997 /* Return TRUE if OP is a register or subreg of a register that
998 holds an unsigned quantity. Otherwise, return FALSE. */
1000 bool
1001 unsigned_reg_p (rtx op)
1003 if (REG_P (op)
1004 && REG_EXPR (op)
1005 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1006 return true;
1008 if (GET_CODE (op) == SUBREG
1009 && SUBREG_PROMOTED_SIGN (op))
1010 return true;
1012 return false;
1016 /* Nonzero if register REG appears somewhere within IN.
1017 Also works if REG is not a register; in this case it checks
1018 for a subexpression of IN that is Lisp "equal" to REG. */
1021 reg_mentioned_p (const_rtx reg, const_rtx in)
1023 const char *fmt;
1024 int i;
1025 enum rtx_code code;
1027 if (in == 0)
1028 return 0;
1030 if (reg == in)
1031 return 1;
1033 if (GET_CODE (in) == LABEL_REF)
1034 return reg == LABEL_REF_LABEL (in);
1036 code = GET_CODE (in);
1038 switch (code)
1040 /* Compare registers by number. */
1041 case REG:
1042 return REG_P (reg) && REGNO (in) == REGNO (reg);
1044 /* These codes have no constituent expressions
1045 and are unique. */
1046 case SCRATCH:
1047 case CC0:
1048 case PC:
1049 return 0;
1051 CASE_CONST_ANY:
1052 /* These are kept unique for a given value. */
1053 return 0;
1055 default:
1056 break;
1059 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1060 return 1;
1062 fmt = GET_RTX_FORMAT (code);
1064 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1066 if (fmt[i] == 'E')
1068 int j;
1069 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1070 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1071 return 1;
1073 else if (fmt[i] == 'e'
1074 && reg_mentioned_p (reg, XEXP (in, i)))
1075 return 1;
1077 return 0;
1080 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1081 no CODE_LABEL insn. */
1084 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1086 rtx_insn *p;
1087 if (beg == end)
1088 return 0;
1089 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1090 if (LABEL_P (p))
1091 return 0;
1092 return 1;
1095 /* Nonzero if register REG is used in an insn between
1096 FROM_INSN and TO_INSN (exclusive of those two). */
1099 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1100 const rtx_insn *to_insn)
1102 rtx_insn *insn;
1104 if (from_insn == to_insn)
1105 return 0;
1107 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1108 if (NONDEBUG_INSN_P (insn)
1109 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1110 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1111 return 1;
1112 return 0;
1115 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1116 is entirely replaced by a new value and the only use is as a SET_DEST,
1117 we do not consider it a reference. */
1120 reg_referenced_p (const_rtx x, const_rtx body)
1122 int i;
1124 switch (GET_CODE (body))
1126 case SET:
1127 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1128 return 1;
1130 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1131 of a REG that occupies all of the REG, the insn references X if
1132 it is mentioned in the destination. */
1133 if (GET_CODE (SET_DEST (body)) != CC0
1134 && GET_CODE (SET_DEST (body)) != PC
1135 && !REG_P (SET_DEST (body))
1136 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1137 && REG_P (SUBREG_REG (SET_DEST (body)))
1138 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1139 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1140 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1141 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1142 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1143 return 1;
1144 return 0;
1146 case ASM_OPERANDS:
1147 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1148 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1149 return 1;
1150 return 0;
1152 case CALL:
1153 case USE:
1154 case IF_THEN_ELSE:
1155 return reg_overlap_mentioned_p (x, body);
1157 case TRAP_IF:
1158 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1160 case PREFETCH:
1161 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1163 case UNSPEC:
1164 case UNSPEC_VOLATILE:
1165 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1166 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1167 return 1;
1168 return 0;
1170 case PARALLEL:
1171 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1172 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1173 return 1;
1174 return 0;
1176 case CLOBBER:
1177 if (MEM_P (XEXP (body, 0)))
1178 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1179 return 1;
1180 return 0;
1182 case COND_EXEC:
1183 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1184 return 1;
1185 return reg_referenced_p (x, COND_EXEC_CODE (body));
1187 default:
1188 return 0;
1192 /* Nonzero if register REG is set or clobbered in an insn between
1193 FROM_INSN and TO_INSN (exclusive of those two). */
1196 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1197 const rtx_insn *to_insn)
1199 const rtx_insn *insn;
1201 if (from_insn == to_insn)
1202 return 0;
1204 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1205 if (INSN_P (insn) && reg_set_p (reg, insn))
1206 return 1;
1207 return 0;
1210 /* Internals of reg_set_between_p. */
1212 reg_set_p (const_rtx reg, const_rtx insn)
1214 /* After delay slot handling, call and branch insns might be in a
1215 sequence. Check all the elements there. */
1216 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1218 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1219 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1220 return true;
1222 return false;
1225 /* We can be passed an insn or part of one. If we are passed an insn,
1226 check if a side-effect of the insn clobbers REG. */
1227 if (INSN_P (insn)
1228 && (FIND_REG_INC_NOTE (insn, reg)
1229 || (CALL_P (insn)
1230 && ((REG_P (reg)
1231 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1232 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1233 GET_MODE (reg), REGNO (reg)))
1234 || MEM_P (reg)
1235 || find_reg_fusage (insn, CLOBBER, reg)))))
1236 return true;
1238 return set_of (reg, insn) != NULL_RTX;
1241 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1242 only if none of them are modified between START and END. Return 1 if
1243 X contains a MEM; this routine does use memory aliasing. */
1246 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1248 const enum rtx_code code = GET_CODE (x);
1249 const char *fmt;
1250 int i, j;
1251 rtx_insn *insn;
1253 if (start == end)
1254 return 0;
1256 switch (code)
1258 CASE_CONST_ANY:
1259 case CONST:
1260 case SYMBOL_REF:
1261 case LABEL_REF:
1262 return 0;
1264 case PC:
1265 case CC0:
1266 return 1;
1268 case MEM:
1269 if (modified_between_p (XEXP (x, 0), start, end))
1270 return 1;
1271 if (MEM_READONLY_P (x))
1272 return 0;
1273 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1274 if (memory_modified_in_insn_p (x, insn))
1275 return 1;
1276 return 0;
1277 break;
1279 case REG:
1280 return reg_set_between_p (x, start, end);
1282 default:
1283 break;
1286 fmt = GET_RTX_FORMAT (code);
1287 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1289 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1290 return 1;
1292 else if (fmt[i] == 'E')
1293 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1294 if (modified_between_p (XVECEXP (x, i, j), start, end))
1295 return 1;
1298 return 0;
1301 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1302 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1303 does use memory aliasing. */
1306 modified_in_p (const_rtx x, const_rtx insn)
1308 const enum rtx_code code = GET_CODE (x);
1309 const char *fmt;
1310 int i, j;
1312 switch (code)
1314 CASE_CONST_ANY:
1315 case CONST:
1316 case SYMBOL_REF:
1317 case LABEL_REF:
1318 return 0;
1320 case PC:
1321 case CC0:
1322 return 1;
1324 case MEM:
1325 if (modified_in_p (XEXP (x, 0), insn))
1326 return 1;
1327 if (MEM_READONLY_P (x))
1328 return 0;
1329 if (memory_modified_in_insn_p (x, insn))
1330 return 1;
1331 return 0;
1332 break;
1334 case REG:
1335 return reg_set_p (x, insn);
1337 default:
1338 break;
1341 fmt = GET_RTX_FORMAT (code);
1342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1344 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1345 return 1;
1347 else if (fmt[i] == 'E')
1348 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1349 if (modified_in_p (XVECEXP (x, i, j), insn))
1350 return 1;
1353 return 0;
1356 /* Helper function for set_of. */
1357 struct set_of_data
1359 const_rtx found;
1360 const_rtx pat;
1363 static void
1364 set_of_1 (rtx x, const_rtx pat, void *data1)
1366 struct set_of_data *const data = (struct set_of_data *) (data1);
1367 if (rtx_equal_p (x, data->pat)
1368 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1369 data->found = pat;
1372 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1373 (either directly or via STRICT_LOW_PART and similar modifiers). */
1374 const_rtx
1375 set_of (const_rtx pat, const_rtx insn)
1377 struct set_of_data data;
1378 data.found = NULL_RTX;
1379 data.pat = pat;
1380 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1381 return data.found;
1384 /* Add all hard register in X to *PSET. */
1385 void
1386 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1388 subrtx_iterator::array_type array;
1389 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1391 const_rtx x = *iter;
1392 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1393 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1397 /* This function, called through note_stores, collects sets and
1398 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1399 by DATA. */
1400 void
1401 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1403 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1404 if (REG_P (x) && HARD_REGISTER_P (x))
1405 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1408 /* Examine INSN, and compute the set of hard registers written by it.
1409 Store it in *PSET. Should only be called after reload. */
1410 void
1411 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1413 rtx link;
1415 CLEAR_HARD_REG_SET (*pset);
1416 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1417 if (CALL_P (insn))
1419 if (implicit)
1420 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1422 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1423 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1425 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1426 if (REG_NOTE_KIND (link) == REG_INC)
1427 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1430 /* Like record_hard_reg_sets, but called through note_uses. */
1431 void
1432 record_hard_reg_uses (rtx *px, void *data)
1434 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1437 /* Given an INSN, return a SET expression if this insn has only a single SET.
1438 It may also have CLOBBERs, USEs, or SET whose output
1439 will not be used, which we ignore. */
1442 single_set_2 (const rtx_insn *insn, const_rtx pat)
1444 rtx set = NULL;
1445 int set_verified = 1;
1446 int i;
1448 if (GET_CODE (pat) == PARALLEL)
1450 for (i = 0; i < XVECLEN (pat, 0); i++)
1452 rtx sub = XVECEXP (pat, 0, i);
1453 switch (GET_CODE (sub))
1455 case USE:
1456 case CLOBBER:
1457 break;
1459 case SET:
1460 /* We can consider insns having multiple sets, where all
1461 but one are dead as single set insns. In common case
1462 only single set is present in the pattern so we want
1463 to avoid checking for REG_UNUSED notes unless necessary.
1465 When we reach set first time, we just expect this is
1466 the single set we are looking for and only when more
1467 sets are found in the insn, we check them. */
1468 if (!set_verified)
1470 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1471 && !side_effects_p (set))
1472 set = NULL;
1473 else
1474 set_verified = 1;
1476 if (!set)
1477 set = sub, set_verified = 0;
1478 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1479 || side_effects_p (sub))
1480 return NULL_RTX;
1481 break;
1483 default:
1484 return NULL_RTX;
1488 return set;
1491 /* Given an INSN, return nonzero if it has more than one SET, else return
1492 zero. */
1495 multiple_sets (const_rtx insn)
1497 int found;
1498 int i;
1500 /* INSN must be an insn. */
1501 if (! INSN_P (insn))
1502 return 0;
1504 /* Only a PARALLEL can have multiple SETs. */
1505 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1507 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1508 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1510 /* If we have already found a SET, then return now. */
1511 if (found)
1512 return 1;
1513 else
1514 found = 1;
1518 /* Either zero or one SET. */
1519 return 0;
1522 /* Return nonzero if the destination of SET equals the source
1523 and there are no side effects. */
1526 set_noop_p (const_rtx set)
1528 rtx src = SET_SRC (set);
1529 rtx dst = SET_DEST (set);
1531 if (dst == pc_rtx && src == pc_rtx)
1532 return 1;
1534 if (MEM_P (dst) && MEM_P (src))
1535 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1537 if (GET_CODE (dst) == ZERO_EXTRACT)
1538 return rtx_equal_p (XEXP (dst, 0), src)
1539 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1540 && !side_effects_p (src);
1542 if (GET_CODE (dst) == STRICT_LOW_PART)
1543 dst = XEXP (dst, 0);
1545 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1547 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1548 return 0;
1549 src = SUBREG_REG (src);
1550 dst = SUBREG_REG (dst);
1553 /* It is a NOOP if destination overlaps with selected src vector
1554 elements. */
1555 if (GET_CODE (src) == VEC_SELECT
1556 && REG_P (XEXP (src, 0)) && REG_P (dst)
1557 && HARD_REGISTER_P (XEXP (src, 0))
1558 && HARD_REGISTER_P (dst))
1560 int i;
1561 rtx par = XEXP (src, 1);
1562 rtx src0 = XEXP (src, 0);
1563 int c0 = INTVAL (XVECEXP (par, 0, 0));
1564 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1566 for (i = 1; i < XVECLEN (par, 0); i++)
1567 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1568 return 0;
1569 return
1570 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1571 offset, GET_MODE (dst)) == (int) REGNO (dst);
1574 return (REG_P (src) && REG_P (dst)
1575 && REGNO (src) == REGNO (dst));
1578 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1579 value to itself. */
1582 noop_move_p (const rtx_insn *insn)
1584 rtx pat = PATTERN (insn);
1586 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1587 return 1;
1589 /* Insns carrying these notes are useful later on. */
1590 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1591 return 0;
1593 /* Check the code to be executed for COND_EXEC. */
1594 if (GET_CODE (pat) == COND_EXEC)
1595 pat = COND_EXEC_CODE (pat);
1597 if (GET_CODE (pat) == SET && set_noop_p (pat))
1598 return 1;
1600 if (GET_CODE (pat) == PARALLEL)
1602 int i;
1603 /* If nothing but SETs of registers to themselves,
1604 this insn can also be deleted. */
1605 for (i = 0; i < XVECLEN (pat, 0); i++)
1607 rtx tem = XVECEXP (pat, 0, i);
1609 if (GET_CODE (tem) == USE
1610 || GET_CODE (tem) == CLOBBER)
1611 continue;
1613 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1614 return 0;
1617 return 1;
1619 return 0;
1623 /* Return nonzero if register in range [REGNO, ENDREGNO)
1624 appears either explicitly or implicitly in X
1625 other than being stored into.
1627 References contained within the substructure at LOC do not count.
1628 LOC may be zero, meaning don't ignore anything. */
1630 bool
1631 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1632 rtx *loc)
1634 int i;
1635 unsigned int x_regno;
1636 RTX_CODE code;
1637 const char *fmt;
1639 repeat:
1640 /* The contents of a REG_NONNEG note is always zero, so we must come here
1641 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1642 if (x == 0)
1643 return false;
1645 code = GET_CODE (x);
1647 switch (code)
1649 case REG:
1650 x_regno = REGNO (x);
1652 /* If we modifying the stack, frame, or argument pointer, it will
1653 clobber a virtual register. In fact, we could be more precise,
1654 but it isn't worth it. */
1655 if ((x_regno == STACK_POINTER_REGNUM
1656 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1657 && x_regno == ARG_POINTER_REGNUM)
1658 || x_regno == FRAME_POINTER_REGNUM)
1659 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1660 return true;
1662 return endregno > x_regno && regno < END_REGNO (x);
1664 case SUBREG:
1665 /* If this is a SUBREG of a hard reg, we can see exactly which
1666 registers are being modified. Otherwise, handle normally. */
1667 if (REG_P (SUBREG_REG (x))
1668 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1670 unsigned int inner_regno = subreg_regno (x);
1671 unsigned int inner_endregno
1672 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1673 ? subreg_nregs (x) : 1);
1675 return endregno > inner_regno && regno < inner_endregno;
1677 break;
1679 case CLOBBER:
1680 case SET:
1681 if (&SET_DEST (x) != loc
1682 /* Note setting a SUBREG counts as referring to the REG it is in for
1683 a pseudo but not for hard registers since we can
1684 treat each word individually. */
1685 && ((GET_CODE (SET_DEST (x)) == SUBREG
1686 && loc != &SUBREG_REG (SET_DEST (x))
1687 && REG_P (SUBREG_REG (SET_DEST (x)))
1688 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1689 && refers_to_regno_p (regno, endregno,
1690 SUBREG_REG (SET_DEST (x)), loc))
1691 || (!REG_P (SET_DEST (x))
1692 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1693 return true;
1695 if (code == CLOBBER || loc == &SET_SRC (x))
1696 return false;
1697 x = SET_SRC (x);
1698 goto repeat;
1700 default:
1701 break;
1704 /* X does not match, so try its subexpressions. */
1706 fmt = GET_RTX_FORMAT (code);
1707 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1709 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1711 if (i == 0)
1713 x = XEXP (x, 0);
1714 goto repeat;
1716 else
1717 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1718 return true;
1720 else if (fmt[i] == 'E')
1722 int j;
1723 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1724 if (loc != &XVECEXP (x, i, j)
1725 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1726 return true;
1729 return false;
1732 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1733 we check if any register number in X conflicts with the relevant register
1734 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1735 contains a MEM (we don't bother checking for memory addresses that can't
1736 conflict because we expect this to be a rare case. */
1739 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1741 unsigned int regno, endregno;
1743 /* If either argument is a constant, then modifying X can not
1744 affect IN. Here we look at IN, we can profitably combine
1745 CONSTANT_P (x) with the switch statement below. */
1746 if (CONSTANT_P (in))
1747 return 0;
1749 recurse:
1750 switch (GET_CODE (x))
1752 case STRICT_LOW_PART:
1753 case ZERO_EXTRACT:
1754 case SIGN_EXTRACT:
1755 /* Overly conservative. */
1756 x = XEXP (x, 0);
1757 goto recurse;
1759 case SUBREG:
1760 regno = REGNO (SUBREG_REG (x));
1761 if (regno < FIRST_PSEUDO_REGISTER)
1762 regno = subreg_regno (x);
1763 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1764 ? subreg_nregs (x) : 1);
1765 goto do_reg;
1767 case REG:
1768 regno = REGNO (x);
1769 endregno = END_REGNO (x);
1770 do_reg:
1771 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1773 case MEM:
1775 const char *fmt;
1776 int i;
1778 if (MEM_P (in))
1779 return 1;
1781 fmt = GET_RTX_FORMAT (GET_CODE (in));
1782 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1783 if (fmt[i] == 'e')
1785 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1786 return 1;
1788 else if (fmt[i] == 'E')
1790 int j;
1791 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1792 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1793 return 1;
1796 return 0;
1799 case SCRATCH:
1800 case PC:
1801 case CC0:
1802 return reg_mentioned_p (x, in);
1804 case PARALLEL:
1806 int i;
1808 /* If any register in here refers to it we return true. */
1809 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1810 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1811 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1812 return 1;
1813 return 0;
1816 default:
1817 gcc_assert (CONSTANT_P (x));
1818 return 0;
1822 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1823 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1824 ignored by note_stores, but passed to FUN.
1826 FUN receives three arguments:
1827 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1828 2. the SET or CLOBBER rtx that does the store,
1829 3. the pointer DATA provided to note_stores.
1831 If the item being stored in or clobbered is a SUBREG of a hard register,
1832 the SUBREG will be passed. */
1834 void
1835 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1837 int i;
1839 if (GET_CODE (x) == COND_EXEC)
1840 x = COND_EXEC_CODE (x);
1842 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1844 rtx dest = SET_DEST (x);
1846 while ((GET_CODE (dest) == SUBREG
1847 && (!REG_P (SUBREG_REG (dest))
1848 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1849 || GET_CODE (dest) == ZERO_EXTRACT
1850 || GET_CODE (dest) == STRICT_LOW_PART)
1851 dest = XEXP (dest, 0);
1853 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1854 each of whose first operand is a register. */
1855 if (GET_CODE (dest) == PARALLEL)
1857 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1858 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1859 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1861 else
1862 (*fun) (dest, x, data);
1865 else if (GET_CODE (x) == PARALLEL)
1866 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1867 note_stores (XVECEXP (x, 0, i), fun, data);
1870 /* Like notes_stores, but call FUN for each expression that is being
1871 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1872 FUN for each expression, not any interior subexpressions. FUN receives a
1873 pointer to the expression and the DATA passed to this function.
1875 Note that this is not quite the same test as that done in reg_referenced_p
1876 since that considers something as being referenced if it is being
1877 partially set, while we do not. */
1879 void
1880 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1882 rtx body = *pbody;
1883 int i;
1885 switch (GET_CODE (body))
1887 case COND_EXEC:
1888 (*fun) (&COND_EXEC_TEST (body), data);
1889 note_uses (&COND_EXEC_CODE (body), fun, data);
1890 return;
1892 case PARALLEL:
1893 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1894 note_uses (&XVECEXP (body, 0, i), fun, data);
1895 return;
1897 case SEQUENCE:
1898 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1899 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1900 return;
1902 case USE:
1903 (*fun) (&XEXP (body, 0), data);
1904 return;
1906 case ASM_OPERANDS:
1907 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1908 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1909 return;
1911 case TRAP_IF:
1912 (*fun) (&TRAP_CONDITION (body), data);
1913 return;
1915 case PREFETCH:
1916 (*fun) (&XEXP (body, 0), data);
1917 return;
1919 case UNSPEC:
1920 case UNSPEC_VOLATILE:
1921 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1922 (*fun) (&XVECEXP (body, 0, i), data);
1923 return;
1925 case CLOBBER:
1926 if (MEM_P (XEXP (body, 0)))
1927 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1928 return;
1930 case SET:
1932 rtx dest = SET_DEST (body);
1934 /* For sets we replace everything in source plus registers in memory
1935 expression in store and operands of a ZERO_EXTRACT. */
1936 (*fun) (&SET_SRC (body), data);
1938 if (GET_CODE (dest) == ZERO_EXTRACT)
1940 (*fun) (&XEXP (dest, 1), data);
1941 (*fun) (&XEXP (dest, 2), data);
1944 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1945 dest = XEXP (dest, 0);
1947 if (MEM_P (dest))
1948 (*fun) (&XEXP (dest, 0), data);
1950 return;
1952 default:
1953 /* All the other possibilities never store. */
1954 (*fun) (pbody, data);
1955 return;
1959 /* Return nonzero if X's old contents don't survive after INSN.
1960 This will be true if X is (cc0) or if X is a register and
1961 X dies in INSN or because INSN entirely sets X.
1963 "Entirely set" means set directly and not through a SUBREG, or
1964 ZERO_EXTRACT, so no trace of the old contents remains.
1965 Likewise, REG_INC does not count.
1967 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1968 but for this use that makes no difference, since regs don't overlap
1969 during their lifetimes. Therefore, this function may be used
1970 at any time after deaths have been computed.
1972 If REG is a hard reg that occupies multiple machine registers, this
1973 function will only return 1 if each of those registers will be replaced
1974 by INSN. */
1977 dead_or_set_p (const_rtx insn, const_rtx x)
1979 unsigned int regno, end_regno;
1980 unsigned int i;
1982 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1983 if (GET_CODE (x) == CC0)
1984 return 1;
1986 gcc_assert (REG_P (x));
1988 regno = REGNO (x);
1989 end_regno = END_REGNO (x);
1990 for (i = regno; i < end_regno; i++)
1991 if (! dead_or_set_regno_p (insn, i))
1992 return 0;
1994 return 1;
1997 /* Return TRUE iff DEST is a register or subreg of a register and
1998 doesn't change the number of words of the inner register, and any
1999 part of the register is TEST_REGNO. */
2001 static bool
2002 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2004 unsigned int regno, endregno;
2006 if (GET_CODE (dest) == SUBREG
2007 && (((GET_MODE_SIZE (GET_MODE (dest))
2008 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
2009 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
2010 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
2011 dest = SUBREG_REG (dest);
2013 if (!REG_P (dest))
2014 return false;
2016 regno = REGNO (dest);
2017 endregno = END_REGNO (dest);
2018 return (test_regno >= regno && test_regno < endregno);
2021 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2022 any member matches the covers_regno_no_parallel_p criteria. */
2024 static bool
2025 covers_regno_p (const_rtx dest, unsigned int test_regno)
2027 if (GET_CODE (dest) == PARALLEL)
2029 /* Some targets place small structures in registers for return
2030 values of functions, and those registers are wrapped in
2031 PARALLELs that we may see as the destination of a SET. */
2032 int i;
2034 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2036 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2037 if (inner != NULL_RTX
2038 && covers_regno_no_parallel_p (inner, test_regno))
2039 return true;
2042 return false;
2044 else
2045 return covers_regno_no_parallel_p (dest, test_regno);
2048 /* Utility function for dead_or_set_p to check an individual register. */
2051 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
2053 const_rtx pattern;
2055 /* See if there is a death note for something that includes TEST_REGNO. */
2056 if (find_regno_note (insn, REG_DEAD, test_regno))
2057 return 1;
2059 if (CALL_P (insn)
2060 && find_regno_fusage (insn, CLOBBER, test_regno))
2061 return 1;
2063 pattern = PATTERN (insn);
2065 /* If a COND_EXEC is not executed, the value survives. */
2066 if (GET_CODE (pattern) == COND_EXEC)
2067 return 0;
2069 if (GET_CODE (pattern) == SET)
2070 return covers_regno_p (SET_DEST (pattern), test_regno);
2071 else if (GET_CODE (pattern) == PARALLEL)
2073 int i;
2075 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2077 rtx body = XVECEXP (pattern, 0, i);
2079 if (GET_CODE (body) == COND_EXEC)
2080 body = COND_EXEC_CODE (body);
2082 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2083 && covers_regno_p (SET_DEST (body), test_regno))
2084 return 1;
2088 return 0;
2091 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2092 If DATUM is nonzero, look for one whose datum is DATUM. */
2095 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2097 rtx link;
2099 gcc_checking_assert (insn);
2101 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2102 if (! INSN_P (insn))
2103 return 0;
2104 if (datum == 0)
2106 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2107 if (REG_NOTE_KIND (link) == kind)
2108 return link;
2109 return 0;
2112 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2113 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2114 return link;
2115 return 0;
2118 /* Return the reg-note of kind KIND in insn INSN which applies to register
2119 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2120 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2121 it might be the case that the note overlaps REGNO. */
2124 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2126 rtx link;
2128 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2129 if (! INSN_P (insn))
2130 return 0;
2132 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2133 if (REG_NOTE_KIND (link) == kind
2134 /* Verify that it is a register, so that scratch and MEM won't cause a
2135 problem here. */
2136 && REG_P (XEXP (link, 0))
2137 && REGNO (XEXP (link, 0)) <= regno
2138 && END_REGNO (XEXP (link, 0)) > regno)
2139 return link;
2140 return 0;
2143 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2144 has such a note. */
2147 find_reg_equal_equiv_note (const_rtx insn)
2149 rtx link;
2151 if (!INSN_P (insn))
2152 return 0;
2154 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2155 if (REG_NOTE_KIND (link) == REG_EQUAL
2156 || REG_NOTE_KIND (link) == REG_EQUIV)
2158 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2159 insns that have multiple sets. Checking single_set to
2160 make sure of this is not the proper check, as explained
2161 in the comment in set_unique_reg_note.
2163 This should be changed into an assert. */
2164 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2165 return 0;
2166 return link;
2168 return NULL;
2171 /* Check whether INSN is a single_set whose source is known to be
2172 equivalent to a constant. Return that constant if so, otherwise
2173 return null. */
2176 find_constant_src (const rtx_insn *insn)
2178 rtx note, set, x;
2180 set = single_set (insn);
2181 if (set)
2183 x = avoid_constant_pool_reference (SET_SRC (set));
2184 if (CONSTANT_P (x))
2185 return x;
2188 note = find_reg_equal_equiv_note (insn);
2189 if (note && CONSTANT_P (XEXP (note, 0)))
2190 return XEXP (note, 0);
2192 return NULL_RTX;
2195 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2196 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2199 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2201 /* If it's not a CALL_INSN, it can't possibly have a
2202 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2203 if (!CALL_P (insn))
2204 return 0;
2206 gcc_assert (datum);
2208 if (!REG_P (datum))
2210 rtx link;
2212 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2213 link;
2214 link = XEXP (link, 1))
2215 if (GET_CODE (XEXP (link, 0)) == code
2216 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2217 return 1;
2219 else
2221 unsigned int regno = REGNO (datum);
2223 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2224 to pseudo registers, so don't bother checking. */
2226 if (regno < FIRST_PSEUDO_REGISTER)
2228 unsigned int end_regno = END_REGNO (datum);
2229 unsigned int i;
2231 for (i = regno; i < end_regno; i++)
2232 if (find_regno_fusage (insn, code, i))
2233 return 1;
2237 return 0;
2240 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2241 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2244 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2246 rtx link;
2248 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2249 to pseudo registers, so don't bother checking. */
2251 if (regno >= FIRST_PSEUDO_REGISTER
2252 || !CALL_P (insn) )
2253 return 0;
2255 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2257 rtx op, reg;
2259 if (GET_CODE (op = XEXP (link, 0)) == code
2260 && REG_P (reg = XEXP (op, 0))
2261 && REGNO (reg) <= regno
2262 && END_REGNO (reg) > regno)
2263 return 1;
2266 return 0;
2270 /* Return true if KIND is an integer REG_NOTE. */
2272 static bool
2273 int_reg_note_p (enum reg_note kind)
2275 return kind == REG_BR_PROB;
2278 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2279 stored as the pointer to the next register note. */
2282 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2284 rtx note;
2286 gcc_checking_assert (!int_reg_note_p (kind));
2287 switch (kind)
2289 case REG_CC_SETTER:
2290 case REG_CC_USER:
2291 case REG_LABEL_TARGET:
2292 case REG_LABEL_OPERAND:
2293 case REG_TM:
2294 /* These types of register notes use an INSN_LIST rather than an
2295 EXPR_LIST, so that copying is done right and dumps look
2296 better. */
2297 note = alloc_INSN_LIST (datum, list);
2298 PUT_REG_NOTE_KIND (note, kind);
2299 break;
2301 default:
2302 note = alloc_EXPR_LIST (kind, datum, list);
2303 break;
2306 return note;
2309 /* Add register note with kind KIND and datum DATUM to INSN. */
2311 void
2312 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2314 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2317 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2319 void
2320 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2322 gcc_checking_assert (int_reg_note_p (kind));
2323 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2324 datum, REG_NOTES (insn));
2327 /* Add a register note like NOTE to INSN. */
2329 void
2330 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2332 if (GET_CODE (note) == INT_LIST)
2333 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2334 else
2335 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2338 /* Remove register note NOTE from the REG_NOTES of INSN. */
2340 void
2341 remove_note (rtx insn, const_rtx note)
2343 rtx link;
2345 if (note == NULL_RTX)
2346 return;
2348 if (REG_NOTES (insn) == note)
2349 REG_NOTES (insn) = XEXP (note, 1);
2350 else
2351 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2352 if (XEXP (link, 1) == note)
2354 XEXP (link, 1) = XEXP (note, 1);
2355 break;
2358 switch (REG_NOTE_KIND (note))
2360 case REG_EQUAL:
2361 case REG_EQUIV:
2362 df_notes_rescan (as_a <rtx_insn *> (insn));
2363 break;
2364 default:
2365 break;
2369 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2371 void
2372 remove_reg_equal_equiv_notes (rtx_insn *insn)
2374 rtx *loc;
2376 loc = &REG_NOTES (insn);
2377 while (*loc)
2379 enum reg_note kind = REG_NOTE_KIND (*loc);
2380 if (kind == REG_EQUAL || kind == REG_EQUIV)
2381 *loc = XEXP (*loc, 1);
2382 else
2383 loc = &XEXP (*loc, 1);
2387 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2389 void
2390 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2392 df_ref eq_use;
2394 if (!df)
2395 return;
2397 /* This loop is a little tricky. We cannot just go down the chain because
2398 it is being modified by some actions in the loop. So we just iterate
2399 over the head. We plan to drain the list anyway. */
2400 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2402 rtx_insn *insn = DF_REF_INSN (eq_use);
2403 rtx note = find_reg_equal_equiv_note (insn);
2405 /* This assert is generally triggered when someone deletes a REG_EQUAL
2406 or REG_EQUIV note by hacking the list manually rather than calling
2407 remove_note. */
2408 gcc_assert (note);
2410 remove_note (insn, note);
2414 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2415 return 1 if it is found. A simple equality test is used to determine if
2416 NODE matches. */
2418 bool
2419 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2421 const_rtx x;
2423 for (x = listp; x; x = XEXP (x, 1))
2424 if (node == XEXP (x, 0))
2425 return true;
2427 return false;
2430 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2431 remove that entry from the list if it is found.
2433 A simple equality test is used to determine if NODE matches. */
2435 void
2436 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2438 rtx_expr_list *temp = *listp;
2439 rtx_expr_list *prev = NULL;
2441 while (temp)
2443 if (node == temp->element ())
2445 /* Splice the node out of the list. */
2446 if (prev)
2447 XEXP (prev, 1) = temp->next ();
2448 else
2449 *listp = temp->next ();
2451 return;
2454 prev = temp;
2455 temp = temp->next ();
2459 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2460 remove that entry from the list if it is found.
2462 A simple equality test is used to determine if NODE matches. */
2464 void
2465 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2467 rtx_insn_list *temp = *listp;
2468 rtx_insn_list *prev = NULL;
2470 while (temp)
2472 if (node == temp->insn ())
2474 /* Splice the node out of the list. */
2475 if (prev)
2476 XEXP (prev, 1) = temp->next ();
2477 else
2478 *listp = temp->next ();
2480 return;
2483 prev = temp;
2484 temp = temp->next ();
2488 /* Nonzero if X contains any volatile instructions. These are instructions
2489 which may cause unpredictable machine state instructions, and thus no
2490 instructions or register uses should be moved or combined across them.
2491 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2494 volatile_insn_p (const_rtx x)
2496 const RTX_CODE code = GET_CODE (x);
2497 switch (code)
2499 case LABEL_REF:
2500 case SYMBOL_REF:
2501 case CONST:
2502 CASE_CONST_ANY:
2503 case CC0:
2504 case PC:
2505 case REG:
2506 case SCRATCH:
2507 case CLOBBER:
2508 case ADDR_VEC:
2509 case ADDR_DIFF_VEC:
2510 case CALL:
2511 case MEM:
2512 return 0;
2514 case UNSPEC_VOLATILE:
2515 return 1;
2517 case ASM_INPUT:
2518 case ASM_OPERANDS:
2519 if (MEM_VOLATILE_P (x))
2520 return 1;
2522 default:
2523 break;
2526 /* Recursively scan the operands of this expression. */
2529 const char *const fmt = GET_RTX_FORMAT (code);
2530 int i;
2532 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2534 if (fmt[i] == 'e')
2536 if (volatile_insn_p (XEXP (x, i)))
2537 return 1;
2539 else if (fmt[i] == 'E')
2541 int j;
2542 for (j = 0; j < XVECLEN (x, i); j++)
2543 if (volatile_insn_p (XVECEXP (x, i, j)))
2544 return 1;
2548 return 0;
2551 /* Nonzero if X contains any volatile memory references
2552 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2555 volatile_refs_p (const_rtx x)
2557 const RTX_CODE code = GET_CODE (x);
2558 switch (code)
2560 case LABEL_REF:
2561 case SYMBOL_REF:
2562 case CONST:
2563 CASE_CONST_ANY:
2564 case CC0:
2565 case PC:
2566 case REG:
2567 case SCRATCH:
2568 case CLOBBER:
2569 case ADDR_VEC:
2570 case ADDR_DIFF_VEC:
2571 return 0;
2573 case UNSPEC_VOLATILE:
2574 return 1;
2576 case MEM:
2577 case ASM_INPUT:
2578 case ASM_OPERANDS:
2579 if (MEM_VOLATILE_P (x))
2580 return 1;
2582 default:
2583 break;
2586 /* Recursively scan the operands of this expression. */
2589 const char *const fmt = GET_RTX_FORMAT (code);
2590 int i;
2592 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2594 if (fmt[i] == 'e')
2596 if (volatile_refs_p (XEXP (x, i)))
2597 return 1;
2599 else if (fmt[i] == 'E')
2601 int j;
2602 for (j = 0; j < XVECLEN (x, i); j++)
2603 if (volatile_refs_p (XVECEXP (x, i, j)))
2604 return 1;
2608 return 0;
2611 /* Similar to above, except that it also rejects register pre- and post-
2612 incrementing. */
2615 side_effects_p (const_rtx x)
2617 const RTX_CODE code = GET_CODE (x);
2618 switch (code)
2620 case LABEL_REF:
2621 case SYMBOL_REF:
2622 case CONST:
2623 CASE_CONST_ANY:
2624 case CC0:
2625 case PC:
2626 case REG:
2627 case SCRATCH:
2628 case ADDR_VEC:
2629 case ADDR_DIFF_VEC:
2630 case VAR_LOCATION:
2631 return 0;
2633 case CLOBBER:
2634 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2635 when some combination can't be done. If we see one, don't think
2636 that we can simplify the expression. */
2637 return (GET_MODE (x) != VOIDmode);
2639 case PRE_INC:
2640 case PRE_DEC:
2641 case POST_INC:
2642 case POST_DEC:
2643 case PRE_MODIFY:
2644 case POST_MODIFY:
2645 case CALL:
2646 case UNSPEC_VOLATILE:
2647 return 1;
2649 case MEM:
2650 case ASM_INPUT:
2651 case ASM_OPERANDS:
2652 if (MEM_VOLATILE_P (x))
2653 return 1;
2655 default:
2656 break;
2659 /* Recursively scan the operands of this expression. */
2662 const char *fmt = GET_RTX_FORMAT (code);
2663 int i;
2665 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2667 if (fmt[i] == 'e')
2669 if (side_effects_p (XEXP (x, i)))
2670 return 1;
2672 else if (fmt[i] == 'E')
2674 int j;
2675 for (j = 0; j < XVECLEN (x, i); j++)
2676 if (side_effects_p (XVECEXP (x, i, j)))
2677 return 1;
2681 return 0;
2684 /* Return nonzero if evaluating rtx X might cause a trap.
2685 FLAGS controls how to consider MEMs. A nonzero means the context
2686 of the access may have changed from the original, such that the
2687 address may have become invalid. */
2690 may_trap_p_1 (const_rtx x, unsigned flags)
2692 int i;
2693 enum rtx_code code;
2694 const char *fmt;
2696 /* We make no distinction currently, but this function is part of
2697 the internal target-hooks ABI so we keep the parameter as
2698 "unsigned flags". */
2699 bool code_changed = flags != 0;
2701 if (x == 0)
2702 return 0;
2703 code = GET_CODE (x);
2704 switch (code)
2706 /* Handle these cases quickly. */
2707 CASE_CONST_ANY:
2708 case SYMBOL_REF:
2709 case LABEL_REF:
2710 case CONST:
2711 case PC:
2712 case CC0:
2713 case REG:
2714 case SCRATCH:
2715 return 0;
2717 case UNSPEC:
2718 return targetm.unspec_may_trap_p (x, flags);
2720 case UNSPEC_VOLATILE:
2721 case ASM_INPUT:
2722 case TRAP_IF:
2723 return 1;
2725 case ASM_OPERANDS:
2726 return MEM_VOLATILE_P (x);
2728 /* Memory ref can trap unless it's a static var or a stack slot. */
2729 case MEM:
2730 /* Recognize specific pattern of stack checking probes. */
2731 if (flag_stack_check
2732 && MEM_VOLATILE_P (x)
2733 && XEXP (x, 0) == stack_pointer_rtx)
2734 return 1;
2735 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2736 reference; moving it out of context such as when moving code
2737 when optimizing, might cause its address to become invalid. */
2738 code_changed
2739 || !MEM_NOTRAP_P (x))
2741 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2742 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2743 GET_MODE (x), code_changed);
2746 return 0;
2748 /* Division by a non-constant might trap. */
2749 case DIV:
2750 case MOD:
2751 case UDIV:
2752 case UMOD:
2753 if (HONOR_SNANS (x))
2754 return 1;
2755 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2756 return flag_trapping_math;
2757 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2758 return 1;
2759 break;
2761 case EXPR_LIST:
2762 /* An EXPR_LIST is used to represent a function call. This
2763 certainly may trap. */
2764 return 1;
2766 case GE:
2767 case GT:
2768 case LE:
2769 case LT:
2770 case LTGT:
2771 case COMPARE:
2772 /* Some floating point comparisons may trap. */
2773 if (!flag_trapping_math)
2774 break;
2775 /* ??? There is no machine independent way to check for tests that trap
2776 when COMPARE is used, though many targets do make this distinction.
2777 For instance, sparc uses CCFPE for compares which generate exceptions
2778 and CCFP for compares which do not generate exceptions. */
2779 if (HONOR_NANS (x))
2780 return 1;
2781 /* But often the compare has some CC mode, so check operand
2782 modes as well. */
2783 if (HONOR_NANS (XEXP (x, 0))
2784 || HONOR_NANS (XEXP (x, 1)))
2785 return 1;
2786 break;
2788 case EQ:
2789 case NE:
2790 if (HONOR_SNANS (x))
2791 return 1;
2792 /* Often comparison is CC mode, so check operand modes. */
2793 if (HONOR_SNANS (XEXP (x, 0))
2794 || HONOR_SNANS (XEXP (x, 1)))
2795 return 1;
2796 break;
2798 case FIX:
2799 /* Conversion of floating point might trap. */
2800 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2801 return 1;
2802 break;
2804 case NEG:
2805 case ABS:
2806 case SUBREG:
2807 /* These operations don't trap even with floating point. */
2808 break;
2810 default:
2811 /* Any floating arithmetic may trap. */
2812 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2813 return 1;
2816 fmt = GET_RTX_FORMAT (code);
2817 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2819 if (fmt[i] == 'e')
2821 if (may_trap_p_1 (XEXP (x, i), flags))
2822 return 1;
2824 else if (fmt[i] == 'E')
2826 int j;
2827 for (j = 0; j < XVECLEN (x, i); j++)
2828 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2829 return 1;
2832 return 0;
2835 /* Return nonzero if evaluating rtx X might cause a trap. */
2838 may_trap_p (const_rtx x)
2840 return may_trap_p_1 (x, 0);
2843 /* Same as above, but additionally return nonzero if evaluating rtx X might
2844 cause a fault. We define a fault for the purpose of this function as a
2845 erroneous execution condition that cannot be encountered during the normal
2846 execution of a valid program; the typical example is an unaligned memory
2847 access on a strict alignment machine. The compiler guarantees that it
2848 doesn't generate code that will fault from a valid program, but this
2849 guarantee doesn't mean anything for individual instructions. Consider
2850 the following example:
2852 struct S { int d; union { char *cp; int *ip; }; };
2854 int foo(struct S *s)
2856 if (s->d == 1)
2857 return *s->ip;
2858 else
2859 return *s->cp;
2862 on a strict alignment machine. In a valid program, foo will never be
2863 invoked on a structure for which d is equal to 1 and the underlying
2864 unique field of the union not aligned on a 4-byte boundary, but the
2865 expression *s->ip might cause a fault if considered individually.
2867 At the RTL level, potentially problematic expressions will almost always
2868 verify may_trap_p; for example, the above dereference can be emitted as
2869 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2870 However, suppose that foo is inlined in a caller that causes s->cp to
2871 point to a local character variable and guarantees that s->d is not set
2872 to 1; foo may have been effectively translated into pseudo-RTL as:
2874 if ((reg:SI) == 1)
2875 (set (reg:SI) (mem:SI (%fp - 7)))
2876 else
2877 (set (reg:QI) (mem:QI (%fp - 7)))
2879 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2880 memory reference to a stack slot, but it will certainly cause a fault
2881 on a strict alignment machine. */
2884 may_trap_or_fault_p (const_rtx x)
2886 return may_trap_p_1 (x, 1);
2889 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2890 i.e., an inequality. */
2893 inequality_comparisons_p (const_rtx x)
2895 const char *fmt;
2896 int len, i;
2897 const enum rtx_code code = GET_CODE (x);
2899 switch (code)
2901 case REG:
2902 case SCRATCH:
2903 case PC:
2904 case CC0:
2905 CASE_CONST_ANY:
2906 case CONST:
2907 case LABEL_REF:
2908 case SYMBOL_REF:
2909 return 0;
2911 case LT:
2912 case LTU:
2913 case GT:
2914 case GTU:
2915 case LE:
2916 case LEU:
2917 case GE:
2918 case GEU:
2919 return 1;
2921 default:
2922 break;
2925 len = GET_RTX_LENGTH (code);
2926 fmt = GET_RTX_FORMAT (code);
2928 for (i = 0; i < len; i++)
2930 if (fmt[i] == 'e')
2932 if (inequality_comparisons_p (XEXP (x, i)))
2933 return 1;
2935 else if (fmt[i] == 'E')
2937 int j;
2938 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2939 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2940 return 1;
2944 return 0;
2947 /* Replace any occurrence of FROM in X with TO. The function does
2948 not enter into CONST_DOUBLE for the replace.
2950 Note that copying is not done so X must not be shared unless all copies
2951 are to be modified. */
2954 replace_rtx (rtx x, rtx from, rtx to)
2956 int i, j;
2957 const char *fmt;
2959 if (x == from)
2960 return to;
2962 /* Allow this function to make replacements in EXPR_LISTs. */
2963 if (x == 0)
2964 return 0;
2966 if (GET_CODE (x) == SUBREG)
2968 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to);
2970 if (CONST_INT_P (new_rtx))
2972 x = simplify_subreg (GET_MODE (x), new_rtx,
2973 GET_MODE (SUBREG_REG (x)),
2974 SUBREG_BYTE (x));
2975 gcc_assert (x);
2977 else
2978 SUBREG_REG (x) = new_rtx;
2980 return x;
2982 else if (GET_CODE (x) == ZERO_EXTEND)
2984 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to);
2986 if (CONST_INT_P (new_rtx))
2988 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2989 new_rtx, GET_MODE (XEXP (x, 0)));
2990 gcc_assert (x);
2992 else
2993 XEXP (x, 0) = new_rtx;
2995 return x;
2998 fmt = GET_RTX_FORMAT (GET_CODE (x));
2999 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3001 if (fmt[i] == 'e')
3002 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
3003 else if (fmt[i] == 'E')
3004 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3005 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
3008 return x;
3011 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3012 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3014 void
3015 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3017 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3018 rtx x = *loc;
3019 if (JUMP_TABLE_DATA_P (x))
3021 x = PATTERN (x);
3022 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3023 int len = GET_NUM_ELEM (vec);
3024 for (int i = 0; i < len; ++i)
3026 rtx ref = RTVEC_ELT (vec, i);
3027 if (XEXP (ref, 0) == old_label)
3029 XEXP (ref, 0) = new_label;
3030 if (update_label_nuses)
3032 ++LABEL_NUSES (new_label);
3033 --LABEL_NUSES (old_label);
3037 return;
3040 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3041 field. This is not handled by the iterator because it doesn't
3042 handle unprinted ('0') fields. */
3043 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3044 JUMP_LABEL (x) = new_label;
3046 subrtx_ptr_iterator::array_type array;
3047 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3049 rtx *loc = *iter;
3050 if (rtx x = *loc)
3052 if (GET_CODE (x) == SYMBOL_REF
3053 && CONSTANT_POOL_ADDRESS_P (x))
3055 rtx c = get_pool_constant (x);
3056 if (rtx_referenced_p (old_label, c))
3058 /* Create a copy of constant C; replace the label inside
3059 but do not update LABEL_NUSES because uses in constant pool
3060 are not counted. */
3061 rtx new_c = copy_rtx (c);
3062 replace_label (&new_c, old_label, new_label, false);
3064 /* Add the new constant NEW_C to constant pool and replace
3065 the old reference to constant by new reference. */
3066 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3067 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3071 if ((GET_CODE (x) == LABEL_REF
3072 || GET_CODE (x) == INSN_LIST)
3073 && XEXP (x, 0) == old_label)
3075 XEXP (x, 0) = new_label;
3076 if (update_label_nuses)
3078 ++LABEL_NUSES (new_label);
3079 --LABEL_NUSES (old_label);
3086 void
3087 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
3088 bool update_label_nuses)
3090 rtx insn_as_rtx = insn;
3091 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3092 gcc_checking_assert (insn_as_rtx == insn);
3095 /* Return true if X is referenced in BODY. */
3097 bool
3098 rtx_referenced_p (const_rtx x, const_rtx body)
3100 subrtx_iterator::array_type array;
3101 FOR_EACH_SUBRTX (iter, array, body, ALL)
3102 if (const_rtx y = *iter)
3104 /* Check if a label_ref Y refers to label X. */
3105 if (GET_CODE (y) == LABEL_REF
3106 && LABEL_P (x)
3107 && LABEL_REF_LABEL (y) == x)
3108 return true;
3110 if (rtx_equal_p (x, y))
3111 return true;
3113 /* If Y is a reference to pool constant traverse the constant. */
3114 if (GET_CODE (y) == SYMBOL_REF
3115 && CONSTANT_POOL_ADDRESS_P (y))
3116 iter.substitute (get_pool_constant (y));
3118 return false;
3121 /* If INSN is a tablejump return true and store the label (before jump table) to
3122 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3124 bool
3125 tablejump_p (const rtx_insn *insn, rtx *labelp, rtx_jump_table_data **tablep)
3127 rtx label;
3128 rtx_insn *table;
3130 if (!JUMP_P (insn))
3131 return false;
3133 label = JUMP_LABEL (insn);
3134 if (label != NULL_RTX && !ANY_RETURN_P (label)
3135 && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
3136 && JUMP_TABLE_DATA_P (table))
3138 if (labelp)
3139 *labelp = label;
3140 if (tablep)
3141 *tablep = as_a <rtx_jump_table_data *> (table);
3142 return true;
3144 return false;
3147 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3148 constant that is not in the constant pool and not in the condition
3149 of an IF_THEN_ELSE. */
3151 static int
3152 computed_jump_p_1 (const_rtx x)
3154 const enum rtx_code code = GET_CODE (x);
3155 int i, j;
3156 const char *fmt;
3158 switch (code)
3160 case LABEL_REF:
3161 case PC:
3162 return 0;
3164 case CONST:
3165 CASE_CONST_ANY:
3166 case SYMBOL_REF:
3167 case REG:
3168 return 1;
3170 case MEM:
3171 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3172 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3174 case IF_THEN_ELSE:
3175 return (computed_jump_p_1 (XEXP (x, 1))
3176 || computed_jump_p_1 (XEXP (x, 2)));
3178 default:
3179 break;
3182 fmt = GET_RTX_FORMAT (code);
3183 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3185 if (fmt[i] == 'e'
3186 && computed_jump_p_1 (XEXP (x, i)))
3187 return 1;
3189 else if (fmt[i] == 'E')
3190 for (j = 0; j < XVECLEN (x, i); j++)
3191 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3192 return 1;
3195 return 0;
3198 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3200 Tablejumps and casesi insns are not considered indirect jumps;
3201 we can recognize them by a (use (label_ref)). */
3204 computed_jump_p (const rtx_insn *insn)
3206 int i;
3207 if (JUMP_P (insn))
3209 rtx pat = PATTERN (insn);
3211 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3212 if (JUMP_LABEL (insn) != NULL)
3213 return 0;
3215 if (GET_CODE (pat) == PARALLEL)
3217 int len = XVECLEN (pat, 0);
3218 int has_use_labelref = 0;
3220 for (i = len - 1; i >= 0; i--)
3221 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3222 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3223 == LABEL_REF))
3225 has_use_labelref = 1;
3226 break;
3229 if (! has_use_labelref)
3230 for (i = len - 1; i >= 0; i--)
3231 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3232 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3233 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3234 return 1;
3236 else if (GET_CODE (pat) == SET
3237 && SET_DEST (pat) == pc_rtx
3238 && computed_jump_p_1 (SET_SRC (pat)))
3239 return 1;
3241 return 0;
3246 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3247 the equivalent add insn and pass the result to FN, using DATA as the
3248 final argument. */
3250 static int
3251 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3253 rtx x = XEXP (mem, 0);
3254 switch (GET_CODE (x))
3256 case PRE_INC:
3257 case POST_INC:
3259 int size = GET_MODE_SIZE (GET_MODE (mem));
3260 rtx r1 = XEXP (x, 0);
3261 rtx c = gen_int_mode (size, GET_MODE (r1));
3262 return fn (mem, x, r1, r1, c, data);
3265 case PRE_DEC:
3266 case POST_DEC:
3268 int size = GET_MODE_SIZE (GET_MODE (mem));
3269 rtx r1 = XEXP (x, 0);
3270 rtx c = gen_int_mode (-size, GET_MODE (r1));
3271 return fn (mem, x, r1, r1, c, data);
3274 case PRE_MODIFY:
3275 case POST_MODIFY:
3277 rtx r1 = XEXP (x, 0);
3278 rtx add = XEXP (x, 1);
3279 return fn (mem, x, r1, add, NULL, data);
3282 default:
3283 gcc_unreachable ();
3287 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3288 For each such autoinc operation found, call FN, passing it
3289 the innermost enclosing MEM, the operation itself, the RTX modified
3290 by the operation, two RTXs (the second may be NULL) that, once
3291 added, represent the value to be held by the modified RTX
3292 afterwards, and DATA. FN is to return 0 to continue the
3293 traversal or any other value to have it returned to the caller of
3294 for_each_inc_dec. */
3297 for_each_inc_dec (rtx x,
3298 for_each_inc_dec_fn fn,
3299 void *data)
3301 subrtx_var_iterator::array_type array;
3302 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3304 rtx mem = *iter;
3305 if (mem
3306 && MEM_P (mem)
3307 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3309 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3310 if (res != 0)
3311 return res;
3312 iter.skip_subrtxes ();
3315 return 0;
3319 /* Searches X for any reference to REGNO, returning the rtx of the
3320 reference found if any. Otherwise, returns NULL_RTX. */
3323 regno_use_in (unsigned int regno, rtx x)
3325 const char *fmt;
3326 int i, j;
3327 rtx tem;
3329 if (REG_P (x) && REGNO (x) == regno)
3330 return x;
3332 fmt = GET_RTX_FORMAT (GET_CODE (x));
3333 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3335 if (fmt[i] == 'e')
3337 if ((tem = regno_use_in (regno, XEXP (x, i))))
3338 return tem;
3340 else if (fmt[i] == 'E')
3341 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3342 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3343 return tem;
3346 return NULL_RTX;
3349 /* Return a value indicating whether OP, an operand of a commutative
3350 operation, is preferred as the first or second operand. The more
3351 positive the value, the stronger the preference for being the first
3352 operand. */
3355 commutative_operand_precedence (rtx op)
3357 enum rtx_code code = GET_CODE (op);
3359 /* Constants always become the second operand. Prefer "nice" constants. */
3360 if (code == CONST_INT)
3361 return -8;
3362 if (code == CONST_WIDE_INT)
3363 return -8;
3364 if (code == CONST_DOUBLE)
3365 return -7;
3366 if (code == CONST_FIXED)
3367 return -7;
3368 op = avoid_constant_pool_reference (op);
3369 code = GET_CODE (op);
3371 switch (GET_RTX_CLASS (code))
3373 case RTX_CONST_OBJ:
3374 if (code == CONST_INT)
3375 return -6;
3376 if (code == CONST_WIDE_INT)
3377 return -6;
3378 if (code == CONST_DOUBLE)
3379 return -5;
3380 if (code == CONST_FIXED)
3381 return -5;
3382 return -4;
3384 case RTX_EXTRA:
3385 /* SUBREGs of objects should come second. */
3386 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3387 return -3;
3388 return 0;
3390 case RTX_OBJ:
3391 /* Complex expressions should be the first, so decrease priority
3392 of objects. Prefer pointer objects over non pointer objects. */
3393 if ((REG_P (op) && REG_POINTER (op))
3394 || (MEM_P (op) && MEM_POINTER (op)))
3395 return -1;
3396 return -2;
3398 case RTX_COMM_ARITH:
3399 /* Prefer operands that are themselves commutative to be first.
3400 This helps to make things linear. In particular,
3401 (and (and (reg) (reg)) (not (reg))) is canonical. */
3402 return 4;
3404 case RTX_BIN_ARITH:
3405 /* If only one operand is a binary expression, it will be the first
3406 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3407 is canonical, although it will usually be further simplified. */
3408 return 2;
3410 case RTX_UNARY:
3411 /* Then prefer NEG and NOT. */
3412 if (code == NEG || code == NOT)
3413 return 1;
3415 default:
3416 return 0;
3420 /* Return 1 iff it is necessary to swap operands of commutative operation
3421 in order to canonicalize expression. */
3423 bool
3424 swap_commutative_operands_p (rtx x, rtx y)
3426 return (commutative_operand_precedence (x)
3427 < commutative_operand_precedence (y));
3430 /* Return 1 if X is an autoincrement side effect and the register is
3431 not the stack pointer. */
3433 auto_inc_p (const_rtx x)
3435 switch (GET_CODE (x))
3437 case PRE_INC:
3438 case POST_INC:
3439 case PRE_DEC:
3440 case POST_DEC:
3441 case PRE_MODIFY:
3442 case POST_MODIFY:
3443 /* There are no REG_INC notes for SP. */
3444 if (XEXP (x, 0) != stack_pointer_rtx)
3445 return 1;
3446 default:
3447 break;
3449 return 0;
3452 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3454 loc_mentioned_in_p (rtx *loc, const_rtx in)
3456 enum rtx_code code;
3457 const char *fmt;
3458 int i, j;
3460 if (!in)
3461 return 0;
3463 code = GET_CODE (in);
3464 fmt = GET_RTX_FORMAT (code);
3465 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3467 if (fmt[i] == 'e')
3469 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3470 return 1;
3472 else if (fmt[i] == 'E')
3473 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3474 if (loc == &XVECEXP (in, i, j)
3475 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3476 return 1;
3478 return 0;
3481 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3482 and SUBREG_BYTE, return the bit offset where the subreg begins
3483 (counting from the least significant bit of the operand). */
3485 unsigned int
3486 subreg_lsb_1 (machine_mode outer_mode,
3487 machine_mode inner_mode,
3488 unsigned int subreg_byte)
3490 unsigned int bitpos;
3491 unsigned int byte;
3492 unsigned int word;
3494 /* A paradoxical subreg begins at bit position 0. */
3495 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3496 return 0;
3498 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3499 /* If the subreg crosses a word boundary ensure that
3500 it also begins and ends on a word boundary. */
3501 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3502 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3503 && (subreg_byte % UNITS_PER_WORD
3504 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3506 if (WORDS_BIG_ENDIAN)
3507 word = (GET_MODE_SIZE (inner_mode)
3508 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3509 else
3510 word = subreg_byte / UNITS_PER_WORD;
3511 bitpos = word * BITS_PER_WORD;
3513 if (BYTES_BIG_ENDIAN)
3514 byte = (GET_MODE_SIZE (inner_mode)
3515 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3516 else
3517 byte = subreg_byte % UNITS_PER_WORD;
3518 bitpos += byte * BITS_PER_UNIT;
3520 return bitpos;
3523 /* Given a subreg X, return the bit offset where the subreg begins
3524 (counting from the least significant bit of the reg). */
3526 unsigned int
3527 subreg_lsb (const_rtx x)
3529 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3530 SUBREG_BYTE (x));
3533 /* Fill in information about a subreg of a hard register.
3534 xregno - A regno of an inner hard subreg_reg (or what will become one).
3535 xmode - The mode of xregno.
3536 offset - The byte offset.
3537 ymode - The mode of a top level SUBREG (or what may become one).
3538 info - Pointer to structure to fill in.
3540 Rather than considering one particular inner register (and thus one
3541 particular "outer" register) in isolation, this function really uses
3542 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3543 function does not check whether adding INFO->offset to XREGNO gives
3544 a valid hard register; even if INFO->offset + XREGNO is out of range,
3545 there might be another register of the same type that is in range.
3546 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3547 register, since that can depend on things like whether the final
3548 register number is even or odd. Callers that want to check whether
3549 this particular subreg can be replaced by a simple (reg ...) should
3550 use simplify_subreg_regno. */
3552 void
3553 subreg_get_info (unsigned int xregno, machine_mode xmode,
3554 unsigned int offset, machine_mode ymode,
3555 struct subreg_info *info)
3557 int nregs_xmode, nregs_ymode;
3558 int mode_multiple, nregs_multiple;
3559 int offset_adj, y_offset, y_offset_adj;
3560 int regsize_xmode, regsize_ymode;
3561 bool rknown;
3563 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3565 rknown = false;
3567 /* If there are holes in a non-scalar mode in registers, we expect
3568 that it is made up of its units concatenated together. */
3569 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3571 machine_mode xmode_unit;
3573 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3574 xmode_unit = GET_MODE_INNER (xmode);
3575 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3576 gcc_assert (nregs_xmode
3577 == (GET_MODE_NUNITS (xmode)
3578 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3579 gcc_assert (hard_regno_nregs[xregno][xmode]
3580 == (hard_regno_nregs[xregno][xmode_unit]
3581 * GET_MODE_NUNITS (xmode)));
3583 /* You can only ask for a SUBREG of a value with holes in the middle
3584 if you don't cross the holes. (Such a SUBREG should be done by
3585 picking a different register class, or doing it in memory if
3586 necessary.) An example of a value with holes is XCmode on 32-bit
3587 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3588 3 for each part, but in memory it's two 128-bit parts.
3589 Padding is assumed to be at the end (not necessarily the 'high part')
3590 of each unit. */
3591 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3592 < GET_MODE_NUNITS (xmode))
3593 && (offset / GET_MODE_SIZE (xmode_unit)
3594 != ((offset + GET_MODE_SIZE (ymode) - 1)
3595 / GET_MODE_SIZE (xmode_unit))))
3597 info->representable_p = false;
3598 rknown = true;
3601 else
3602 nregs_xmode = hard_regno_nregs[xregno][xmode];
3604 nregs_ymode = hard_regno_nregs[xregno][ymode];
3606 /* Paradoxical subregs are otherwise valid. */
3607 if (!rknown
3608 && offset == 0
3609 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3611 info->representable_p = true;
3612 /* If this is a big endian paradoxical subreg, which uses more
3613 actual hard registers than the original register, we must
3614 return a negative offset so that we find the proper highpart
3615 of the register. */
3616 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3617 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3618 info->offset = nregs_xmode - nregs_ymode;
3619 else
3620 info->offset = 0;
3621 info->nregs = nregs_ymode;
3622 return;
3625 /* If registers store different numbers of bits in the different
3626 modes, we cannot generally form this subreg. */
3627 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3628 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3629 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3630 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3632 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3633 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3634 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3636 info->representable_p = false;
3637 info->nregs
3638 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3639 info->offset = offset / regsize_xmode;
3640 return;
3642 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3644 info->representable_p = false;
3645 info->nregs
3646 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3647 info->offset = offset / regsize_xmode;
3648 return;
3650 /* Quick exit for the simple and common case of extracting whole
3651 subregisters from a multiregister value. */
3652 /* ??? It would be better to integrate this into the code below,
3653 if we can generalize the concept enough and figure out how
3654 odd-sized modes can coexist with the other weird cases we support. */
3655 if (!rknown
3656 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3657 && regsize_xmode == regsize_ymode
3658 && (offset % regsize_ymode) == 0)
3660 info->representable_p = true;
3661 info->nregs = nregs_ymode;
3662 info->offset = offset / regsize_ymode;
3663 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3664 return;
3668 /* Lowpart subregs are otherwise valid. */
3669 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3671 info->representable_p = true;
3672 rknown = true;
3674 if (offset == 0 || nregs_xmode == nregs_ymode)
3676 info->offset = 0;
3677 info->nregs = nregs_ymode;
3678 return;
3682 /* This should always pass, otherwise we don't know how to verify
3683 the constraint. These conditions may be relaxed but
3684 subreg_regno_offset would need to be redesigned. */
3685 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3686 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3688 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3689 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3691 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3692 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3693 HOST_WIDE_INT off_low = offset & (ysize - 1);
3694 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3695 offset = (xsize - ysize - off_high) | off_low;
3697 /* The XMODE value can be seen as a vector of NREGS_XMODE
3698 values. The subreg must represent a lowpart of given field.
3699 Compute what field it is. */
3700 offset_adj = offset;
3701 offset_adj -= subreg_lowpart_offset (ymode,
3702 mode_for_size (GET_MODE_BITSIZE (xmode)
3703 / nregs_xmode,
3704 MODE_INT, 0));
3706 /* Size of ymode must not be greater than the size of xmode. */
3707 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3708 gcc_assert (mode_multiple != 0);
3710 y_offset = offset / GET_MODE_SIZE (ymode);
3711 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3712 nregs_multiple = nregs_xmode / nregs_ymode;
3714 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3715 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3717 if (!rknown)
3719 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3720 rknown = true;
3722 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3723 info->nregs = nregs_ymode;
3726 /* This function returns the regno offset of a subreg expression.
3727 xregno - A regno of an inner hard subreg_reg (or what will become one).
3728 xmode - The mode of xregno.
3729 offset - The byte offset.
3730 ymode - The mode of a top level SUBREG (or what may become one).
3731 RETURN - The regno offset which would be used. */
3732 unsigned int
3733 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3734 unsigned int offset, machine_mode ymode)
3736 struct subreg_info info;
3737 subreg_get_info (xregno, xmode, offset, ymode, &info);
3738 return info.offset;
3741 /* This function returns true when the offset is representable via
3742 subreg_offset in the given regno.
3743 xregno - A regno of an inner hard subreg_reg (or what will become one).
3744 xmode - The mode of xregno.
3745 offset - The byte offset.
3746 ymode - The mode of a top level SUBREG (or what may become one).
3747 RETURN - Whether the offset is representable. */
3748 bool
3749 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3750 unsigned int offset, machine_mode ymode)
3752 struct subreg_info info;
3753 subreg_get_info (xregno, xmode, offset, ymode, &info);
3754 return info.representable_p;
3757 /* Return the number of a YMODE register to which
3759 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3761 can be simplified. Return -1 if the subreg can't be simplified.
3763 XREGNO is a hard register number. */
3766 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3767 unsigned int offset, machine_mode ymode)
3769 struct subreg_info info;
3770 unsigned int yregno;
3772 #ifdef CANNOT_CHANGE_MODE_CLASS
3773 /* Give the backend a chance to disallow the mode change. */
3774 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3775 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3776 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3777 /* We can use mode change in LRA for some transformations. */
3778 && ! lra_in_progress)
3779 return -1;
3780 #endif
3782 /* We shouldn't simplify stack-related registers. */
3783 if ((!reload_completed || frame_pointer_needed)
3784 && xregno == FRAME_POINTER_REGNUM)
3785 return -1;
3787 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3788 && xregno == ARG_POINTER_REGNUM)
3789 return -1;
3791 if (xregno == STACK_POINTER_REGNUM
3792 /* We should convert hard stack register in LRA if it is
3793 possible. */
3794 && ! lra_in_progress)
3795 return -1;
3797 /* Try to get the register offset. */
3798 subreg_get_info (xregno, xmode, offset, ymode, &info);
3799 if (!info.representable_p)
3800 return -1;
3802 /* Make sure that the offsetted register value is in range. */
3803 yregno = xregno + info.offset;
3804 if (!HARD_REGISTER_NUM_P (yregno))
3805 return -1;
3807 /* See whether (reg:YMODE YREGNO) is valid.
3809 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3810 This is a kludge to work around how complex FP arguments are passed
3811 on IA-64 and should be fixed. See PR target/49226. */
3812 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3813 && HARD_REGNO_MODE_OK (xregno, xmode))
3814 return -1;
3816 return (int) yregno;
3819 /* Return the final regno that a subreg expression refers to. */
3820 unsigned int
3821 subreg_regno (const_rtx x)
3823 unsigned int ret;
3824 rtx subreg = SUBREG_REG (x);
3825 int regno = REGNO (subreg);
3827 ret = regno + subreg_regno_offset (regno,
3828 GET_MODE (subreg),
3829 SUBREG_BYTE (x),
3830 GET_MODE (x));
3831 return ret;
3835 /* Return the number of registers that a subreg expression refers
3836 to. */
3837 unsigned int
3838 subreg_nregs (const_rtx x)
3840 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3843 /* Return the number of registers that a subreg REG with REGNO
3844 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3845 changed so that the regno can be passed in. */
3847 unsigned int
3848 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3850 struct subreg_info info;
3851 rtx subreg = SUBREG_REG (x);
3853 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3854 &info);
3855 return info.nregs;
3859 struct parms_set_data
3861 int nregs;
3862 HARD_REG_SET regs;
3865 /* Helper function for noticing stores to parameter registers. */
3866 static void
3867 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3869 struct parms_set_data *const d = (struct parms_set_data *) data;
3870 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3871 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3873 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3874 d->nregs--;
3878 /* Look backward for first parameter to be loaded.
3879 Note that loads of all parameters will not necessarily be
3880 found if CSE has eliminated some of them (e.g., an argument
3881 to the outer function is passed down as a parameter).
3882 Do not skip BOUNDARY. */
3883 rtx_insn *
3884 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3886 struct parms_set_data parm;
3887 rtx p;
3888 rtx_insn *before, *first_set;
3890 /* Since different machines initialize their parameter registers
3891 in different orders, assume nothing. Collect the set of all
3892 parameter registers. */
3893 CLEAR_HARD_REG_SET (parm.regs);
3894 parm.nregs = 0;
3895 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3896 if (GET_CODE (XEXP (p, 0)) == USE
3897 && REG_P (XEXP (XEXP (p, 0), 0)))
3899 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3901 /* We only care about registers which can hold function
3902 arguments. */
3903 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3904 continue;
3906 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3907 parm.nregs++;
3909 before = call_insn;
3910 first_set = call_insn;
3912 /* Search backward for the first set of a register in this set. */
3913 while (parm.nregs && before != boundary)
3915 before = PREV_INSN (before);
3917 /* It is possible that some loads got CSEed from one call to
3918 another. Stop in that case. */
3919 if (CALL_P (before))
3920 break;
3922 /* Our caller needs either ensure that we will find all sets
3923 (in case code has not been optimized yet), or take care
3924 for possible labels in a way by setting boundary to preceding
3925 CODE_LABEL. */
3926 if (LABEL_P (before))
3928 gcc_assert (before == boundary);
3929 break;
3932 if (INSN_P (before))
3934 int nregs_old = parm.nregs;
3935 note_stores (PATTERN (before), parms_set, &parm);
3936 /* If we found something that did not set a parameter reg,
3937 we're done. Do not keep going, as that might result
3938 in hoisting an insn before the setting of a pseudo
3939 that is used by the hoisted insn. */
3940 if (nregs_old != parm.nregs)
3941 first_set = before;
3942 else
3943 break;
3946 return first_set;
3949 /* Return true if we should avoid inserting code between INSN and preceding
3950 call instruction. */
3952 bool
3953 keep_with_call_p (const rtx_insn *insn)
3955 rtx set;
3957 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3959 if (REG_P (SET_DEST (set))
3960 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3961 && fixed_regs[REGNO (SET_DEST (set))]
3962 && general_operand (SET_SRC (set), VOIDmode))
3963 return true;
3964 if (REG_P (SET_SRC (set))
3965 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3966 && REG_P (SET_DEST (set))
3967 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3968 return true;
3969 /* There may be a stack pop just after the call and before the store
3970 of the return register. Search for the actual store when deciding
3971 if we can break or not. */
3972 if (SET_DEST (set) == stack_pointer_rtx)
3974 /* This CONST_CAST is okay because next_nonnote_insn just
3975 returns its argument and we assign it to a const_rtx
3976 variable. */
3977 const rtx_insn *i2
3978 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3979 if (i2 && keep_with_call_p (i2))
3980 return true;
3983 return false;
3986 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3987 to non-complex jumps. That is, direct unconditional, conditional,
3988 and tablejumps, but not computed jumps or returns. It also does
3989 not apply to the fallthru case of a conditional jump. */
3991 bool
3992 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
3994 rtx tmp = JUMP_LABEL (jump_insn);
3995 rtx_jump_table_data *table;
3997 if (label == tmp)
3998 return true;
4000 if (tablejump_p (jump_insn, NULL, &table))
4002 rtvec vec = table->get_labels ();
4003 int i, veclen = GET_NUM_ELEM (vec);
4005 for (i = 0; i < veclen; ++i)
4006 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4007 return true;
4010 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4011 return true;
4013 return false;
4017 /* Return an estimate of the cost of computing rtx X.
4018 One use is in cse, to decide which expression to keep in the hash table.
4019 Another is in rtl generation, to pick the cheapest way to multiply.
4020 Other uses like the latter are expected in the future.
4022 X appears as operand OPNO in an expression with code OUTER_CODE.
4023 SPEED specifies whether costs optimized for speed or size should
4024 be returned. */
4027 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4028 int opno, bool speed)
4030 int i, j;
4031 enum rtx_code code;
4032 const char *fmt;
4033 int total;
4034 int factor;
4036 if (x == 0)
4037 return 0;
4039 if (GET_MODE (x) != VOIDmode)
4040 mode = GET_MODE (x);
4042 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4043 many insns, taking N times as long. */
4044 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4045 if (factor == 0)
4046 factor = 1;
4048 /* Compute the default costs of certain things.
4049 Note that targetm.rtx_costs can override the defaults. */
4051 code = GET_CODE (x);
4052 switch (code)
4054 case MULT:
4055 /* Multiplication has time-complexity O(N*N), where N is the
4056 number of units (translated from digits) when using
4057 schoolbook long multiplication. */
4058 total = factor * factor * COSTS_N_INSNS (5);
4059 break;
4060 case DIV:
4061 case UDIV:
4062 case MOD:
4063 case UMOD:
4064 /* Similarly, complexity for schoolbook long division. */
4065 total = factor * factor * COSTS_N_INSNS (7);
4066 break;
4067 case USE:
4068 /* Used in combine.c as a marker. */
4069 total = 0;
4070 break;
4071 case SET:
4072 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4073 the mode for the factor. */
4074 mode = GET_MODE (SET_DEST (x));
4075 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4076 if (factor == 0)
4077 factor = 1;
4078 /* Pass through. */
4079 default:
4080 total = factor * COSTS_N_INSNS (1);
4083 switch (code)
4085 case REG:
4086 return 0;
4088 case SUBREG:
4089 total = 0;
4090 /* If we can't tie these modes, make this expensive. The larger
4091 the mode, the more expensive it is. */
4092 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4093 return COSTS_N_INSNS (2 + factor);
4094 break;
4096 default:
4097 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4098 return total;
4099 break;
4102 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4103 which is already in total. */
4105 fmt = GET_RTX_FORMAT (code);
4106 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4107 if (fmt[i] == 'e')
4108 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4109 else if (fmt[i] == 'E')
4110 for (j = 0; j < XVECLEN (x, i); j++)
4111 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4113 return total;
4116 /* Fill in the structure C with information about both speed and size rtx
4117 costs for X, which is operand OPNO in an expression with code OUTER. */
4119 void
4120 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4121 struct full_rtx_costs *c)
4123 c->speed = rtx_cost (x, mode, outer, opno, true);
4124 c->size = rtx_cost (x, mode, outer, opno, false);
4128 /* Return cost of address expression X.
4129 Expect that X is properly formed address reference.
4131 SPEED parameter specify whether costs optimized for speed or size should
4132 be returned. */
4135 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4137 /* We may be asked for cost of various unusual addresses, such as operands
4138 of push instruction. It is not worthwhile to complicate writing
4139 of the target hook by such cases. */
4141 if (!memory_address_addr_space_p (mode, x, as))
4142 return 1000;
4144 return targetm.address_cost (x, mode, as, speed);
4147 /* If the target doesn't override, compute the cost as with arithmetic. */
4150 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4152 return rtx_cost (x, Pmode, MEM, 0, speed);
4156 unsigned HOST_WIDE_INT
4157 nonzero_bits (const_rtx x, machine_mode mode)
4159 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4162 unsigned int
4163 num_sign_bit_copies (const_rtx x, machine_mode mode)
4165 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4168 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4169 It avoids exponential behavior in nonzero_bits1 when X has
4170 identical subexpressions on the first or the second level. */
4172 static unsigned HOST_WIDE_INT
4173 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4174 machine_mode known_mode,
4175 unsigned HOST_WIDE_INT known_ret)
4177 if (x == known_x && mode == known_mode)
4178 return known_ret;
4180 /* Try to find identical subexpressions. If found call
4181 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4182 precomputed value for the subexpression as KNOWN_RET. */
4184 if (ARITHMETIC_P (x))
4186 rtx x0 = XEXP (x, 0);
4187 rtx x1 = XEXP (x, 1);
4189 /* Check the first level. */
4190 if (x0 == x1)
4191 return nonzero_bits1 (x, mode, x0, mode,
4192 cached_nonzero_bits (x0, mode, known_x,
4193 known_mode, known_ret));
4195 /* Check the second level. */
4196 if (ARITHMETIC_P (x0)
4197 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4198 return nonzero_bits1 (x, mode, x1, mode,
4199 cached_nonzero_bits (x1, mode, known_x,
4200 known_mode, known_ret));
4202 if (ARITHMETIC_P (x1)
4203 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4204 return nonzero_bits1 (x, mode, x0, mode,
4205 cached_nonzero_bits (x0, mode, known_x,
4206 known_mode, known_ret));
4209 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4212 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4213 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4214 is less useful. We can't allow both, because that results in exponential
4215 run time recursion. There is a nullstone testcase that triggered
4216 this. This macro avoids accidental uses of num_sign_bit_copies. */
4217 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4219 /* Given an expression, X, compute which bits in X can be nonzero.
4220 We don't care about bits outside of those defined in MODE.
4222 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4223 an arithmetic operation, we can do better. */
4225 static unsigned HOST_WIDE_INT
4226 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4227 machine_mode known_mode,
4228 unsigned HOST_WIDE_INT known_ret)
4230 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4231 unsigned HOST_WIDE_INT inner_nz;
4232 enum rtx_code code;
4233 machine_mode inner_mode;
4234 unsigned int mode_width = GET_MODE_PRECISION (mode);
4236 /* For floating-point and vector values, assume all bits are needed. */
4237 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4238 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4239 return nonzero;
4241 /* If X is wider than MODE, use its mode instead. */
4242 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4244 mode = GET_MODE (x);
4245 nonzero = GET_MODE_MASK (mode);
4246 mode_width = GET_MODE_PRECISION (mode);
4249 if (mode_width > HOST_BITS_PER_WIDE_INT)
4250 /* Our only callers in this case look for single bit values. So
4251 just return the mode mask. Those tests will then be false. */
4252 return nonzero;
4254 /* If MODE is wider than X, but both are a single word for both the host
4255 and target machines, we can compute this from which bits of the
4256 object might be nonzero in its own mode, taking into account the fact
4257 that on many CISC machines, accessing an object in a wider mode
4258 causes the high-order bits to become undefined. So they are
4259 not known to be zero. */
4261 if (!WORD_REGISTER_OPERATIONS
4262 && GET_MODE (x) != VOIDmode
4263 && GET_MODE (x) != mode
4264 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4265 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4266 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4268 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4269 known_x, known_mode, known_ret);
4270 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4271 return nonzero;
4274 code = GET_CODE (x);
4275 switch (code)
4277 case REG:
4278 #if defined(POINTERS_EXTEND_UNSIGNED)
4279 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4280 all the bits above ptr_mode are known to be zero. */
4281 /* As we do not know which address space the pointer is referring to,
4282 we can do this only if the target does not support different pointer
4283 or address modes depending on the address space. */
4284 if (target_default_pointer_address_modes_p ()
4285 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4286 && REG_POINTER (x)
4287 && !targetm.have_ptr_extend ())
4288 nonzero &= GET_MODE_MASK (ptr_mode);
4289 #endif
4291 /* Include declared information about alignment of pointers. */
4292 /* ??? We don't properly preserve REG_POINTER changes across
4293 pointer-to-integer casts, so we can't trust it except for
4294 things that we know must be pointers. See execute/960116-1.c. */
4295 if ((x == stack_pointer_rtx
4296 || x == frame_pointer_rtx
4297 || x == arg_pointer_rtx)
4298 && REGNO_POINTER_ALIGN (REGNO (x)))
4300 unsigned HOST_WIDE_INT alignment
4301 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4303 #ifdef PUSH_ROUNDING
4304 /* If PUSH_ROUNDING is defined, it is possible for the
4305 stack to be momentarily aligned only to that amount,
4306 so we pick the least alignment. */
4307 if (x == stack_pointer_rtx && PUSH_ARGS)
4308 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4309 alignment);
4310 #endif
4312 nonzero &= ~(alignment - 1);
4316 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4317 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4318 known_mode, known_ret,
4319 &nonzero_for_hook);
4321 if (new_rtx)
4322 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4323 known_mode, known_ret);
4325 return nonzero_for_hook;
4328 case CONST_INT:
4329 /* If X is negative in MODE, sign-extend the value. */
4330 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4331 && mode_width < BITS_PER_WORD
4332 && (UINTVAL (x) & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
4333 != 0)
4334 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4336 return UINTVAL (x);
4338 case MEM:
4339 #ifdef LOAD_EXTEND_OP
4340 /* In many, if not most, RISC machines, reading a byte from memory
4341 zeros the rest of the register. Noticing that fact saves a lot
4342 of extra zero-extends. */
4343 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
4344 nonzero &= GET_MODE_MASK (GET_MODE (x));
4345 #endif
4346 break;
4348 case EQ: case NE:
4349 case UNEQ: case LTGT:
4350 case GT: case GTU: case UNGT:
4351 case LT: case LTU: case UNLT:
4352 case GE: case GEU: case UNGE:
4353 case LE: case LEU: case UNLE:
4354 case UNORDERED: case ORDERED:
4355 /* If this produces an integer result, we know which bits are set.
4356 Code here used to clear bits outside the mode of X, but that is
4357 now done above. */
4358 /* Mind that MODE is the mode the caller wants to look at this
4359 operation in, and not the actual operation mode. We can wind
4360 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4361 that describes the results of a vector compare. */
4362 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4363 && mode_width <= HOST_BITS_PER_WIDE_INT)
4364 nonzero = STORE_FLAG_VALUE;
4365 break;
4367 case NEG:
4368 #if 0
4369 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4370 and num_sign_bit_copies. */
4371 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4372 == GET_MODE_PRECISION (GET_MODE (x)))
4373 nonzero = 1;
4374 #endif
4376 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4377 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4378 break;
4380 case ABS:
4381 #if 0
4382 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4383 and num_sign_bit_copies. */
4384 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4385 == GET_MODE_PRECISION (GET_MODE (x)))
4386 nonzero = 1;
4387 #endif
4388 break;
4390 case TRUNCATE:
4391 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4392 known_x, known_mode, known_ret)
4393 & GET_MODE_MASK (mode));
4394 break;
4396 case ZERO_EXTEND:
4397 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4398 known_x, known_mode, known_ret);
4399 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4400 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4401 break;
4403 case SIGN_EXTEND:
4404 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4405 Otherwise, show all the bits in the outer mode but not the inner
4406 may be nonzero. */
4407 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4408 known_x, known_mode, known_ret);
4409 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4411 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4412 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4413 inner_nz |= (GET_MODE_MASK (mode)
4414 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4417 nonzero &= inner_nz;
4418 break;
4420 case AND:
4421 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4422 known_x, known_mode, known_ret)
4423 & cached_nonzero_bits (XEXP (x, 1), mode,
4424 known_x, known_mode, known_ret);
4425 break;
4427 case XOR: case IOR:
4428 case UMIN: case UMAX: case SMIN: case SMAX:
4430 unsigned HOST_WIDE_INT nonzero0
4431 = cached_nonzero_bits (XEXP (x, 0), mode,
4432 known_x, known_mode, known_ret);
4434 /* Don't call nonzero_bits for the second time if it cannot change
4435 anything. */
4436 if ((nonzero & nonzero0) != nonzero)
4437 nonzero &= nonzero0
4438 | cached_nonzero_bits (XEXP (x, 1), mode,
4439 known_x, known_mode, known_ret);
4441 break;
4443 case PLUS: case MINUS:
4444 case MULT:
4445 case DIV: case UDIV:
4446 case MOD: case UMOD:
4447 /* We can apply the rules of arithmetic to compute the number of
4448 high- and low-order zero bits of these operations. We start by
4449 computing the width (position of the highest-order nonzero bit)
4450 and the number of low-order zero bits for each value. */
4452 unsigned HOST_WIDE_INT nz0
4453 = cached_nonzero_bits (XEXP (x, 0), mode,
4454 known_x, known_mode, known_ret);
4455 unsigned HOST_WIDE_INT nz1
4456 = cached_nonzero_bits (XEXP (x, 1), mode,
4457 known_x, known_mode, known_ret);
4458 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4459 int width0 = floor_log2 (nz0) + 1;
4460 int width1 = floor_log2 (nz1) + 1;
4461 int low0 = floor_log2 (nz0 & -nz0);
4462 int low1 = floor_log2 (nz1 & -nz1);
4463 unsigned HOST_WIDE_INT op0_maybe_minusp
4464 = nz0 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4465 unsigned HOST_WIDE_INT op1_maybe_minusp
4466 = nz1 & ((unsigned HOST_WIDE_INT) 1 << sign_index);
4467 unsigned int result_width = mode_width;
4468 int result_low = 0;
4470 switch (code)
4472 case PLUS:
4473 result_width = MAX (width0, width1) + 1;
4474 result_low = MIN (low0, low1);
4475 break;
4476 case MINUS:
4477 result_low = MIN (low0, low1);
4478 break;
4479 case MULT:
4480 result_width = width0 + width1;
4481 result_low = low0 + low1;
4482 break;
4483 case DIV:
4484 if (width1 == 0)
4485 break;
4486 if (!op0_maybe_minusp && !op1_maybe_minusp)
4487 result_width = width0;
4488 break;
4489 case UDIV:
4490 if (width1 == 0)
4491 break;
4492 result_width = width0;
4493 break;
4494 case MOD:
4495 if (width1 == 0)
4496 break;
4497 if (!op0_maybe_minusp && !op1_maybe_minusp)
4498 result_width = MIN (width0, width1);
4499 result_low = MIN (low0, low1);
4500 break;
4501 case UMOD:
4502 if (width1 == 0)
4503 break;
4504 result_width = MIN (width0, width1);
4505 result_low = MIN (low0, low1);
4506 break;
4507 default:
4508 gcc_unreachable ();
4511 if (result_width < mode_width)
4512 nonzero &= ((unsigned HOST_WIDE_INT) 1 << result_width) - 1;
4514 if (result_low > 0)
4515 nonzero &= ~(((unsigned HOST_WIDE_INT) 1 << result_low) - 1);
4517 break;
4519 case ZERO_EXTRACT:
4520 if (CONST_INT_P (XEXP (x, 1))
4521 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4522 nonzero &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
4523 break;
4525 case SUBREG:
4526 /* If this is a SUBREG formed for a promoted variable that has
4527 been zero-extended, we know that at least the high-order bits
4528 are zero, though others might be too. */
4530 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4531 nonzero = GET_MODE_MASK (GET_MODE (x))
4532 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4533 known_x, known_mode, known_ret);
4535 inner_mode = GET_MODE (SUBREG_REG (x));
4536 /* If the inner mode is a single word for both the host and target
4537 machines, we can compute this from which bits of the inner
4538 object might be nonzero. */
4539 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4540 && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
4542 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4543 known_x, known_mode, known_ret);
4545 #if WORD_REGISTER_OPERATIONS && defined (LOAD_EXTEND_OP)
4546 /* If this is a typical RISC machine, we only have to worry
4547 about the way loads are extended. */
4548 if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND
4549 ? val_signbit_known_set_p (inner_mode, nonzero)
4550 : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND)
4551 || !MEM_P (SUBREG_REG (x)))
4552 #endif
4554 /* On many CISC machines, accessing an object in a wider mode
4555 causes the high-order bits to become undefined. So they are
4556 not known to be zero. */
4557 if (GET_MODE_PRECISION (GET_MODE (x))
4558 > GET_MODE_PRECISION (inner_mode))
4559 nonzero |= (GET_MODE_MASK (GET_MODE (x))
4560 & ~GET_MODE_MASK (inner_mode));
4563 break;
4565 case ASHIFTRT:
4566 case LSHIFTRT:
4567 case ASHIFT:
4568 case ROTATE:
4569 /* The nonzero bits are in two classes: any bits within MODE
4570 that aren't in GET_MODE (x) are always significant. The rest of the
4571 nonzero bits are those that are significant in the operand of
4572 the shift when shifted the appropriate number of bits. This
4573 shows that high-order bits are cleared by the right shift and
4574 low-order bits by left shifts. */
4575 if (CONST_INT_P (XEXP (x, 1))
4576 && INTVAL (XEXP (x, 1)) >= 0
4577 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4578 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4580 machine_mode inner_mode = GET_MODE (x);
4581 unsigned int width = GET_MODE_PRECISION (inner_mode);
4582 int count = INTVAL (XEXP (x, 1));
4583 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4584 unsigned HOST_WIDE_INT op_nonzero
4585 = cached_nonzero_bits (XEXP (x, 0), mode,
4586 known_x, known_mode, known_ret);
4587 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4588 unsigned HOST_WIDE_INT outer = 0;
4590 if (mode_width > width)
4591 outer = (op_nonzero & nonzero & ~mode_mask);
4593 if (code == LSHIFTRT)
4594 inner >>= count;
4595 else if (code == ASHIFTRT)
4597 inner >>= count;
4599 /* If the sign bit may have been nonzero before the shift, we
4600 need to mark all the places it could have been copied to
4601 by the shift as possibly nonzero. */
4602 if (inner & ((unsigned HOST_WIDE_INT) 1 << (width - 1 - count)))
4603 inner |= (((unsigned HOST_WIDE_INT) 1 << count) - 1)
4604 << (width - count);
4606 else if (code == ASHIFT)
4607 inner <<= count;
4608 else
4609 inner = ((inner << (count % width)
4610 | (inner >> (width - (count % width)))) & mode_mask);
4612 nonzero &= (outer | inner);
4614 break;
4616 case FFS:
4617 case POPCOUNT:
4618 /* This is at most the number of bits in the mode. */
4619 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4620 break;
4622 case CLZ:
4623 /* If CLZ has a known value at zero, then the nonzero bits are
4624 that value, plus the number of bits in the mode minus one. */
4625 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4626 nonzero
4627 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4628 else
4629 nonzero = -1;
4630 break;
4632 case CTZ:
4633 /* If CTZ has a known value at zero, then the nonzero bits are
4634 that value, plus the number of bits in the mode minus one. */
4635 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4636 nonzero
4637 |= ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4638 else
4639 nonzero = -1;
4640 break;
4642 case CLRSB:
4643 /* This is at most the number of bits in the mode minus 1. */
4644 nonzero = ((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4645 break;
4647 case PARITY:
4648 nonzero = 1;
4649 break;
4651 case IF_THEN_ELSE:
4653 unsigned HOST_WIDE_INT nonzero_true
4654 = cached_nonzero_bits (XEXP (x, 1), mode,
4655 known_x, known_mode, known_ret);
4657 /* Don't call nonzero_bits for the second time if it cannot change
4658 anything. */
4659 if ((nonzero & nonzero_true) != nonzero)
4660 nonzero &= nonzero_true
4661 | cached_nonzero_bits (XEXP (x, 2), mode,
4662 known_x, known_mode, known_ret);
4664 break;
4666 default:
4667 break;
4670 return nonzero;
4673 /* See the macro definition above. */
4674 #undef cached_num_sign_bit_copies
4677 /* The function cached_num_sign_bit_copies is a wrapper around
4678 num_sign_bit_copies1. It avoids exponential behavior in
4679 num_sign_bit_copies1 when X has identical subexpressions on the
4680 first or the second level. */
4682 static unsigned int
4683 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4684 machine_mode known_mode,
4685 unsigned int known_ret)
4687 if (x == known_x && mode == known_mode)
4688 return known_ret;
4690 /* Try to find identical subexpressions. If found call
4691 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4692 the precomputed value for the subexpression as KNOWN_RET. */
4694 if (ARITHMETIC_P (x))
4696 rtx x0 = XEXP (x, 0);
4697 rtx x1 = XEXP (x, 1);
4699 /* Check the first level. */
4700 if (x0 == x1)
4701 return
4702 num_sign_bit_copies1 (x, mode, x0, mode,
4703 cached_num_sign_bit_copies (x0, mode, known_x,
4704 known_mode,
4705 known_ret));
4707 /* Check the second level. */
4708 if (ARITHMETIC_P (x0)
4709 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4710 return
4711 num_sign_bit_copies1 (x, mode, x1, mode,
4712 cached_num_sign_bit_copies (x1, mode, known_x,
4713 known_mode,
4714 known_ret));
4716 if (ARITHMETIC_P (x1)
4717 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4718 return
4719 num_sign_bit_copies1 (x, mode, x0, mode,
4720 cached_num_sign_bit_copies (x0, mode, known_x,
4721 known_mode,
4722 known_ret));
4725 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4728 /* Return the number of bits at the high-order end of X that are known to
4729 be equal to the sign bit. X will be used in mode MODE; if MODE is
4730 VOIDmode, X will be used in its own mode. The returned value will always
4731 be between 1 and the number of bits in MODE. */
4733 static unsigned int
4734 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4735 machine_mode known_mode,
4736 unsigned int known_ret)
4738 enum rtx_code code = GET_CODE (x);
4739 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4740 int num0, num1, result;
4741 unsigned HOST_WIDE_INT nonzero;
4743 /* If we weren't given a mode, use the mode of X. If the mode is still
4744 VOIDmode, we don't know anything. Likewise if one of the modes is
4745 floating-point. */
4747 if (mode == VOIDmode)
4748 mode = GET_MODE (x);
4750 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4751 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4752 return 1;
4754 /* For a smaller object, just ignore the high bits. */
4755 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4757 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4758 known_x, known_mode, known_ret);
4759 return MAX (1,
4760 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4763 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4765 /* If this machine does not do all register operations on the entire
4766 register and MODE is wider than the mode of X, we can say nothing
4767 at all about the high-order bits. */
4768 if (!WORD_REGISTER_OPERATIONS)
4769 return 1;
4771 /* Likewise on machines that do, if the mode of the object is smaller
4772 than a word and loads of that size don't sign extend, we can say
4773 nothing about the high order bits. */
4774 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4775 #ifdef LOAD_EXTEND_OP
4776 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4777 #endif
4779 return 1;
4782 switch (code)
4784 case REG:
4786 #if defined(POINTERS_EXTEND_UNSIGNED)
4787 /* If pointers extend signed and this is a pointer in Pmode, say that
4788 all the bits above ptr_mode are known to be sign bit copies. */
4789 /* As we do not know which address space the pointer is referring to,
4790 we can do this only if the target does not support different pointer
4791 or address modes depending on the address space. */
4792 if (target_default_pointer_address_modes_p ()
4793 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4794 && mode == Pmode && REG_POINTER (x)
4795 && !targetm.have_ptr_extend ())
4796 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4797 #endif
4800 unsigned int copies_for_hook = 1, copies = 1;
4801 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4802 known_mode, known_ret,
4803 &copies_for_hook);
4805 if (new_rtx)
4806 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4807 known_mode, known_ret);
4809 if (copies > 1 || copies_for_hook > 1)
4810 return MAX (copies, copies_for_hook);
4812 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4814 break;
4816 case MEM:
4817 #ifdef LOAD_EXTEND_OP
4818 /* Some RISC machines sign-extend all loads of smaller than a word. */
4819 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4820 return MAX (1, ((int) bitwidth
4821 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4822 #endif
4823 break;
4825 case CONST_INT:
4826 /* If the constant is negative, take its 1's complement and remask.
4827 Then see how many zero bits we have. */
4828 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4829 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4830 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4831 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4833 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4835 case SUBREG:
4836 /* If this is a SUBREG for a promoted object that is sign-extended
4837 and we are looking at it in a wider mode, we know that at least the
4838 high-order bits are known to be sign bit copies. */
4840 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4842 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4843 known_x, known_mode, known_ret);
4844 return MAX ((int) bitwidth
4845 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4846 num0);
4849 /* For a smaller object, just ignore the high bits. */
4850 if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
4852 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4853 known_x, known_mode, known_ret);
4854 return MAX (1, (num0
4855 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
4856 - bitwidth)));
4859 #ifdef LOAD_EXTEND_OP
4860 /* For paradoxical SUBREGs on machines where all register operations
4861 affect the entire register, just look inside. Note that we are
4862 passing MODE to the recursive call, so the number of sign bit copies
4863 will remain relative to that mode, not the inner mode. */
4865 /* This works only if loads sign extend. Otherwise, if we get a
4866 reload for the inner part, it may be loaded from the stack, and
4867 then we lose all sign bit copies that existed before the store
4868 to the stack. */
4870 if (WORD_REGISTER_OPERATIONS
4871 && paradoxical_subreg_p (x)
4872 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4873 && MEM_P (SUBREG_REG (x)))
4874 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4875 known_x, known_mode, known_ret);
4876 #endif
4877 break;
4879 case SIGN_EXTRACT:
4880 if (CONST_INT_P (XEXP (x, 1)))
4881 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4882 break;
4884 case SIGN_EXTEND:
4885 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4886 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4887 known_x, known_mode, known_ret));
4889 case TRUNCATE:
4890 /* For a smaller object, just ignore the high bits. */
4891 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4892 known_x, known_mode, known_ret);
4893 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4894 - bitwidth)));
4896 case NOT:
4897 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4898 known_x, known_mode, known_ret);
4900 case ROTATE: case ROTATERT:
4901 /* If we are rotating left by a number of bits less than the number
4902 of sign bit copies, we can just subtract that amount from the
4903 number. */
4904 if (CONST_INT_P (XEXP (x, 1))
4905 && INTVAL (XEXP (x, 1)) >= 0
4906 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4908 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4909 known_x, known_mode, known_ret);
4910 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4911 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4913 break;
4915 case NEG:
4916 /* In general, this subtracts one sign bit copy. But if the value
4917 is known to be positive, the number of sign bit copies is the
4918 same as that of the input. Finally, if the input has just one bit
4919 that might be nonzero, all the bits are copies of the sign bit. */
4920 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4921 known_x, known_mode, known_ret);
4922 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4923 return num0 > 1 ? num0 - 1 : 1;
4925 nonzero = nonzero_bits (XEXP (x, 0), mode);
4926 if (nonzero == 1)
4927 return bitwidth;
4929 if (num0 > 1
4930 && (((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4931 num0--;
4933 return num0;
4935 case IOR: case AND: case XOR:
4936 case SMIN: case SMAX: case UMIN: case UMAX:
4937 /* Logical operations will preserve the number of sign-bit copies.
4938 MIN and MAX operations always return one of the operands. */
4939 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4940 known_x, known_mode, known_ret);
4941 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4942 known_x, known_mode, known_ret);
4944 /* If num1 is clearing some of the top bits then regardless of
4945 the other term, we are guaranteed to have at least that many
4946 high-order zero bits. */
4947 if (code == AND
4948 && num1 > 1
4949 && bitwidth <= HOST_BITS_PER_WIDE_INT
4950 && CONST_INT_P (XEXP (x, 1))
4951 && (UINTVAL (XEXP (x, 1))
4952 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) == 0)
4953 return num1;
4955 /* Similarly for IOR when setting high-order bits. */
4956 if (code == IOR
4957 && num1 > 1
4958 && bitwidth <= HOST_BITS_PER_WIDE_INT
4959 && CONST_INT_P (XEXP (x, 1))
4960 && (UINTVAL (XEXP (x, 1))
4961 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4962 return num1;
4964 return MIN (num0, num1);
4966 case PLUS: case MINUS:
4967 /* For addition and subtraction, we can have a 1-bit carry. However,
4968 if we are subtracting 1 from a positive number, there will not
4969 be such a carry. Furthermore, if the positive number is known to
4970 be 0 or 1, we know the result is either -1 or 0. */
4972 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4973 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4975 nonzero = nonzero_bits (XEXP (x, 0), mode);
4976 if ((((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4977 return (nonzero == 1 || nonzero == 0 ? bitwidth
4978 : bitwidth - floor_log2 (nonzero) - 1);
4981 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4982 known_x, known_mode, known_ret);
4983 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4984 known_x, known_mode, known_ret);
4985 result = MAX (1, MIN (num0, num1) - 1);
4987 return result;
4989 case MULT:
4990 /* The number of bits of the product is the sum of the number of
4991 bits of both terms. However, unless one of the terms if known
4992 to be positive, we must allow for an additional bit since negating
4993 a negative number can remove one sign bit copy. */
4995 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4996 known_x, known_mode, known_ret);
4997 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4998 known_x, known_mode, known_ret);
5000 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5001 if (result > 0
5002 && (bitwidth > HOST_BITS_PER_WIDE_INT
5003 || (((nonzero_bits (XEXP (x, 0), mode)
5004 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5005 && ((nonzero_bits (XEXP (x, 1), mode)
5006 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1)))
5007 != 0))))
5008 result--;
5010 return MAX (1, result);
5012 case UDIV:
5013 /* The result must be <= the first operand. If the first operand
5014 has the high bit set, we know nothing about the number of sign
5015 bit copies. */
5016 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5017 return 1;
5018 else if ((nonzero_bits (XEXP (x, 0), mode)
5019 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5020 return 1;
5021 else
5022 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5023 known_x, known_mode, known_ret);
5025 case UMOD:
5026 /* The result must be <= the second operand. If the second operand
5027 has (or just might have) the high bit set, we know nothing about
5028 the number of sign bit copies. */
5029 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5030 return 1;
5031 else if ((nonzero_bits (XEXP (x, 1), mode)
5032 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5033 return 1;
5034 else
5035 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5036 known_x, known_mode, known_ret);
5038 case DIV:
5039 /* Similar to unsigned division, except that we have to worry about
5040 the case where the divisor is negative, in which case we have
5041 to add 1. */
5042 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5043 known_x, known_mode, known_ret);
5044 if (result > 1
5045 && (bitwidth > HOST_BITS_PER_WIDE_INT
5046 || (nonzero_bits (XEXP (x, 1), mode)
5047 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5048 result--;
5050 return result;
5052 case MOD:
5053 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5054 known_x, known_mode, known_ret);
5055 if (result > 1
5056 && (bitwidth > HOST_BITS_PER_WIDE_INT
5057 || (nonzero_bits (XEXP (x, 1), mode)
5058 & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
5059 result--;
5061 return result;
5063 case ASHIFTRT:
5064 /* Shifts by a constant add to the number of bits equal to the
5065 sign bit. */
5066 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5067 known_x, known_mode, known_ret);
5068 if (CONST_INT_P (XEXP (x, 1))
5069 && INTVAL (XEXP (x, 1)) > 0
5070 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5071 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5073 return num0;
5075 case ASHIFT:
5076 /* Left shifts destroy copies. */
5077 if (!CONST_INT_P (XEXP (x, 1))
5078 || INTVAL (XEXP (x, 1)) < 0
5079 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5080 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5081 return 1;
5083 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5084 known_x, known_mode, known_ret);
5085 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5087 case IF_THEN_ELSE:
5088 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5089 known_x, known_mode, known_ret);
5090 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5091 known_x, known_mode, known_ret);
5092 return MIN (num0, num1);
5094 case EQ: case NE: case GE: case GT: case LE: case LT:
5095 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5096 case GEU: case GTU: case LEU: case LTU:
5097 case UNORDERED: case ORDERED:
5098 /* If the constant is negative, take its 1's complement and remask.
5099 Then see how many zero bits we have. */
5100 nonzero = STORE_FLAG_VALUE;
5101 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5102 && (nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
5103 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5105 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5107 default:
5108 break;
5111 /* If we haven't been able to figure it out by one of the above rules,
5112 see if some of the high-order bits are known to be zero. If so,
5113 count those bits and return one less than that amount. If we can't
5114 safely compute the mask for this mode, always return BITWIDTH. */
5116 bitwidth = GET_MODE_PRECISION (mode);
5117 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5118 return 1;
5120 nonzero = nonzero_bits (x, mode);
5121 return nonzero & ((unsigned HOST_WIDE_INT) 1 << (bitwidth - 1))
5122 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5125 /* Calculate the rtx_cost of a single instruction. A return value of
5126 zero indicates an instruction pattern without a known cost. */
5129 insn_rtx_cost (rtx pat, bool speed)
5131 int i, cost;
5132 rtx set;
5134 /* Extract the single set rtx from the instruction pattern.
5135 We can't use single_set since we only have the pattern. */
5136 if (GET_CODE (pat) == SET)
5137 set = pat;
5138 else if (GET_CODE (pat) == PARALLEL)
5140 set = NULL_RTX;
5141 for (i = 0; i < XVECLEN (pat, 0); i++)
5143 rtx x = XVECEXP (pat, 0, i);
5144 if (GET_CODE (x) == SET)
5146 if (set)
5147 return 0;
5148 set = x;
5151 if (!set)
5152 return 0;
5154 else
5155 return 0;
5157 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5158 return cost > 0 ? cost : COSTS_N_INSNS (1);
5161 /* Returns estimate on cost of computing SEQ. */
5163 unsigned
5164 seq_cost (const rtx_insn *seq, bool speed)
5166 unsigned cost = 0;
5167 rtx set;
5169 for (; seq; seq = NEXT_INSN (seq))
5171 set = single_set (seq);
5172 if (set)
5173 cost += set_rtx_cost (set, speed);
5174 else
5175 cost++;
5178 return cost;
5181 /* Given an insn INSN and condition COND, return the condition in a
5182 canonical form to simplify testing by callers. Specifically:
5184 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5185 (2) Both operands will be machine operands; (cc0) will have been replaced.
5186 (3) If an operand is a constant, it will be the second operand.
5187 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5188 for GE, GEU, and LEU.
5190 If the condition cannot be understood, or is an inequality floating-point
5191 comparison which needs to be reversed, 0 will be returned.
5193 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5195 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5196 insn used in locating the condition was found. If a replacement test
5197 of the condition is desired, it should be placed in front of that
5198 insn and we will be sure that the inputs are still valid.
5200 If WANT_REG is nonzero, we wish the condition to be relative to that
5201 register, if possible. Therefore, do not canonicalize the condition
5202 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5203 to be a compare to a CC mode register.
5205 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5206 and at INSN. */
5209 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5210 rtx_insn **earliest,
5211 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5213 enum rtx_code code;
5214 rtx_insn *prev = insn;
5215 const_rtx set;
5216 rtx tem;
5217 rtx op0, op1;
5218 int reverse_code = 0;
5219 machine_mode mode;
5220 basic_block bb = BLOCK_FOR_INSN (insn);
5222 code = GET_CODE (cond);
5223 mode = GET_MODE (cond);
5224 op0 = XEXP (cond, 0);
5225 op1 = XEXP (cond, 1);
5227 if (reverse)
5228 code = reversed_comparison_code (cond, insn);
5229 if (code == UNKNOWN)
5230 return 0;
5232 if (earliest)
5233 *earliest = insn;
5235 /* If we are comparing a register with zero, see if the register is set
5236 in the previous insn to a COMPARE or a comparison operation. Perform
5237 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5238 in cse.c */
5240 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5241 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5242 && op1 == CONST0_RTX (GET_MODE (op0))
5243 && op0 != want_reg)
5245 /* Set nonzero when we find something of interest. */
5246 rtx x = 0;
5248 /* If comparison with cc0, import actual comparison from compare
5249 insn. */
5250 if (op0 == cc0_rtx)
5252 if ((prev = prev_nonnote_insn (prev)) == 0
5253 || !NONJUMP_INSN_P (prev)
5254 || (set = single_set (prev)) == 0
5255 || SET_DEST (set) != cc0_rtx)
5256 return 0;
5258 op0 = SET_SRC (set);
5259 op1 = CONST0_RTX (GET_MODE (op0));
5260 if (earliest)
5261 *earliest = prev;
5264 /* If this is a COMPARE, pick up the two things being compared. */
5265 if (GET_CODE (op0) == COMPARE)
5267 op1 = XEXP (op0, 1);
5268 op0 = XEXP (op0, 0);
5269 continue;
5271 else if (!REG_P (op0))
5272 break;
5274 /* Go back to the previous insn. Stop if it is not an INSN. We also
5275 stop if it isn't a single set or if it has a REG_INC note because
5276 we don't want to bother dealing with it. */
5278 prev = prev_nonnote_nondebug_insn (prev);
5280 if (prev == 0
5281 || !NONJUMP_INSN_P (prev)
5282 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5283 /* In cfglayout mode, there do not have to be labels at the
5284 beginning of a block, or jumps at the end, so the previous
5285 conditions would not stop us when we reach bb boundary. */
5286 || BLOCK_FOR_INSN (prev) != bb)
5287 break;
5289 set = set_of (op0, prev);
5291 if (set
5292 && (GET_CODE (set) != SET
5293 || !rtx_equal_p (SET_DEST (set), op0)))
5294 break;
5296 /* If this is setting OP0, get what it sets it to if it looks
5297 relevant. */
5298 if (set)
5300 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5301 #ifdef FLOAT_STORE_FLAG_VALUE
5302 REAL_VALUE_TYPE fsfv;
5303 #endif
5305 /* ??? We may not combine comparisons done in a CCmode with
5306 comparisons not done in a CCmode. This is to aid targets
5307 like Alpha that have an IEEE compliant EQ instruction, and
5308 a non-IEEE compliant BEQ instruction. The use of CCmode is
5309 actually artificial, simply to prevent the combination, but
5310 should not affect other platforms.
5312 However, we must allow VOIDmode comparisons to match either
5313 CCmode or non-CCmode comparison, because some ports have
5314 modeless comparisons inside branch patterns.
5316 ??? This mode check should perhaps look more like the mode check
5317 in simplify_comparison in combine. */
5318 if (((GET_MODE_CLASS (mode) == MODE_CC)
5319 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5320 && mode != VOIDmode
5321 && inner_mode != VOIDmode)
5322 break;
5323 if (GET_CODE (SET_SRC (set)) == COMPARE
5324 || (((code == NE
5325 || (code == LT
5326 && val_signbit_known_set_p (inner_mode,
5327 STORE_FLAG_VALUE))
5328 #ifdef FLOAT_STORE_FLAG_VALUE
5329 || (code == LT
5330 && SCALAR_FLOAT_MODE_P (inner_mode)
5331 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5332 REAL_VALUE_NEGATIVE (fsfv)))
5333 #endif
5335 && COMPARISON_P (SET_SRC (set))))
5336 x = SET_SRC (set);
5337 else if (((code == EQ
5338 || (code == GE
5339 && val_signbit_known_set_p (inner_mode,
5340 STORE_FLAG_VALUE))
5341 #ifdef FLOAT_STORE_FLAG_VALUE
5342 || (code == GE
5343 && SCALAR_FLOAT_MODE_P (inner_mode)
5344 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5345 REAL_VALUE_NEGATIVE (fsfv)))
5346 #endif
5348 && COMPARISON_P (SET_SRC (set)))
5350 reverse_code = 1;
5351 x = SET_SRC (set);
5353 else if ((code == EQ || code == NE)
5354 && GET_CODE (SET_SRC (set)) == XOR)
5355 /* Handle sequences like:
5357 (set op0 (xor X Y))
5358 ...(eq|ne op0 (const_int 0))...
5360 in which case:
5362 (eq op0 (const_int 0)) reduces to (eq X Y)
5363 (ne op0 (const_int 0)) reduces to (ne X Y)
5365 This is the form used by MIPS16, for example. */
5366 x = SET_SRC (set);
5367 else
5368 break;
5371 else if (reg_set_p (op0, prev))
5372 /* If this sets OP0, but not directly, we have to give up. */
5373 break;
5375 if (x)
5377 /* If the caller is expecting the condition to be valid at INSN,
5378 make sure X doesn't change before INSN. */
5379 if (valid_at_insn_p)
5380 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5381 break;
5382 if (COMPARISON_P (x))
5383 code = GET_CODE (x);
5384 if (reverse_code)
5386 code = reversed_comparison_code (x, prev);
5387 if (code == UNKNOWN)
5388 return 0;
5389 reverse_code = 0;
5392 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5393 if (earliest)
5394 *earliest = prev;
5398 /* If constant is first, put it last. */
5399 if (CONSTANT_P (op0))
5400 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5402 /* If OP0 is the result of a comparison, we weren't able to find what
5403 was really being compared, so fail. */
5404 if (!allow_cc_mode
5405 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5406 return 0;
5408 /* Canonicalize any ordered comparison with integers involving equality
5409 if we can do computations in the relevant mode and we do not
5410 overflow. */
5412 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5413 && CONST_INT_P (op1)
5414 && GET_MODE (op0) != VOIDmode
5415 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5417 HOST_WIDE_INT const_val = INTVAL (op1);
5418 unsigned HOST_WIDE_INT uconst_val = const_val;
5419 unsigned HOST_WIDE_INT max_val
5420 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5422 switch (code)
5424 case LE:
5425 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5426 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5427 break;
5429 /* When cross-compiling, const_val might be sign-extended from
5430 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5431 case GE:
5432 if ((const_val & max_val)
5433 != ((unsigned HOST_WIDE_INT) 1
5434 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5435 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5436 break;
5438 case LEU:
5439 if (uconst_val < max_val)
5440 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5441 break;
5443 case GEU:
5444 if (uconst_val != 0)
5445 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5446 break;
5448 default:
5449 break;
5453 /* Never return CC0; return zero instead. */
5454 if (CC0_P (op0))
5455 return 0;
5457 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5460 /* Given a jump insn JUMP, return the condition that will cause it to branch
5461 to its JUMP_LABEL. If the condition cannot be understood, or is an
5462 inequality floating-point comparison which needs to be reversed, 0 will
5463 be returned.
5465 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5466 insn used in locating the condition was found. If a replacement test
5467 of the condition is desired, it should be placed in front of that
5468 insn and we will be sure that the inputs are still valid. If EARLIEST
5469 is null, the returned condition will be valid at INSN.
5471 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5472 compare CC mode register.
5474 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5477 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5478 int valid_at_insn_p)
5480 rtx cond;
5481 int reverse;
5482 rtx set;
5484 /* If this is not a standard conditional jump, we can't parse it. */
5485 if (!JUMP_P (jump)
5486 || ! any_condjump_p (jump))
5487 return 0;
5488 set = pc_set (jump);
5490 cond = XEXP (SET_SRC (set), 0);
5492 /* If this branches to JUMP_LABEL when the condition is false, reverse
5493 the condition. */
5494 reverse
5495 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5496 && LABEL_REF_LABEL (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5498 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5499 allow_cc_mode, valid_at_insn_p);
5502 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5503 TARGET_MODE_REP_EXTENDED.
5505 Note that we assume that the property of
5506 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5507 narrower than mode B. I.e., if A is a mode narrower than B then in
5508 order to be able to operate on it in mode B, mode A needs to
5509 satisfy the requirements set by the representation of mode B. */
5511 static void
5512 init_num_sign_bit_copies_in_rep (void)
5514 machine_mode mode, in_mode;
5516 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5517 in_mode = GET_MODE_WIDER_MODE (mode))
5518 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5519 mode = GET_MODE_WIDER_MODE (mode))
5521 machine_mode i;
5523 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5524 extends to the next widest mode. */
5525 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5526 || GET_MODE_WIDER_MODE (mode) == in_mode);
5528 /* We are in in_mode. Count how many bits outside of mode
5529 have to be copies of the sign-bit. */
5530 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5532 machine_mode wider = GET_MODE_WIDER_MODE (i);
5534 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5535 /* We can only check sign-bit copies starting from the
5536 top-bit. In order to be able to check the bits we
5537 have already seen we pretend that subsequent bits
5538 have to be sign-bit copies too. */
5539 || num_sign_bit_copies_in_rep [in_mode][mode])
5540 num_sign_bit_copies_in_rep [in_mode][mode]
5541 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5546 /* Suppose that truncation from the machine mode of X to MODE is not a
5547 no-op. See if there is anything special about X so that we can
5548 assume it already contains a truncated value of MODE. */
5550 bool
5551 truncated_to_mode (machine_mode mode, const_rtx x)
5553 /* This register has already been used in MODE without explicit
5554 truncation. */
5555 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5556 return true;
5558 /* See if we already satisfy the requirements of MODE. If yes we
5559 can just switch to MODE. */
5560 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5561 && (num_sign_bit_copies (x, GET_MODE (x))
5562 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5563 return true;
5565 return false;
5568 /* Return true if RTX code CODE has a single sequence of zero or more
5569 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5570 entry in that case. */
5572 static bool
5573 setup_reg_subrtx_bounds (unsigned int code)
5575 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5576 unsigned int i = 0;
5577 for (; format[i] != 'e'; ++i)
5579 if (!format[i])
5580 /* No subrtxes. Leave start and count as 0. */
5581 return true;
5582 if (format[i] == 'E' || format[i] == 'V')
5583 return false;
5586 /* Record the sequence of 'e's. */
5587 rtx_all_subrtx_bounds[code].start = i;
5589 ++i;
5590 while (format[i] == 'e');
5591 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5592 /* rtl-iter.h relies on this. */
5593 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5595 for (; format[i]; ++i)
5596 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5597 return false;
5599 return true;
5602 /* Initialize rtx_all_subrtx_bounds. */
5603 void
5604 init_rtlanal (void)
5606 int i;
5607 for (i = 0; i < NUM_RTX_CODE; i++)
5609 if (!setup_reg_subrtx_bounds (i))
5610 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5611 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5612 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5615 init_num_sign_bit_copies_in_rep ();
5618 /* Check whether this is a constant pool constant. */
5619 bool
5620 constant_pool_constant_p (rtx x)
5622 x = avoid_constant_pool_reference (x);
5623 return CONST_DOUBLE_P (x);
5626 /* If M is a bitmask that selects a field of low-order bits within an item but
5627 not the entire word, return the length of the field. Return -1 otherwise.
5628 M is used in machine mode MODE. */
5631 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5633 if (mode != VOIDmode)
5635 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5636 return -1;
5637 m &= GET_MODE_MASK (mode);
5640 return exact_log2 (m + 1);
5643 /* Return the mode of MEM's address. */
5645 machine_mode
5646 get_address_mode (rtx mem)
5648 machine_mode mode;
5650 gcc_assert (MEM_P (mem));
5651 mode = GET_MODE (XEXP (mem, 0));
5652 if (mode != VOIDmode)
5653 return mode;
5654 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5657 /* Split up a CONST_DOUBLE or integer constant rtx
5658 into two rtx's for single words,
5659 storing in *FIRST the word that comes first in memory in the target
5660 and in *SECOND the other.
5662 TODO: This function needs to be rewritten to work on any size
5663 integer. */
5665 void
5666 split_double (rtx value, rtx *first, rtx *second)
5668 if (CONST_INT_P (value))
5670 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5672 /* In this case the CONST_INT holds both target words.
5673 Extract the bits from it into two word-sized pieces.
5674 Sign extend each half to HOST_WIDE_INT. */
5675 unsigned HOST_WIDE_INT low, high;
5676 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5677 unsigned bits_per_word = BITS_PER_WORD;
5679 /* Set sign_bit to the most significant bit of a word. */
5680 sign_bit = 1;
5681 sign_bit <<= bits_per_word - 1;
5683 /* Set mask so that all bits of the word are set. We could
5684 have used 1 << BITS_PER_WORD instead of basing the
5685 calculation on sign_bit. However, on machines where
5686 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5687 compiler warning, even though the code would never be
5688 executed. */
5689 mask = sign_bit << 1;
5690 mask--;
5692 /* Set sign_extend as any remaining bits. */
5693 sign_extend = ~mask;
5695 /* Pick the lower word and sign-extend it. */
5696 low = INTVAL (value);
5697 low &= mask;
5698 if (low & sign_bit)
5699 low |= sign_extend;
5701 /* Pick the higher word, shifted to the least significant
5702 bits, and sign-extend it. */
5703 high = INTVAL (value);
5704 high >>= bits_per_word - 1;
5705 high >>= 1;
5706 high &= mask;
5707 if (high & sign_bit)
5708 high |= sign_extend;
5710 /* Store the words in the target machine order. */
5711 if (WORDS_BIG_ENDIAN)
5713 *first = GEN_INT (high);
5714 *second = GEN_INT (low);
5716 else
5718 *first = GEN_INT (low);
5719 *second = GEN_INT (high);
5722 else
5724 /* The rule for using CONST_INT for a wider mode
5725 is that we regard the value as signed.
5726 So sign-extend it. */
5727 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5728 if (WORDS_BIG_ENDIAN)
5730 *first = high;
5731 *second = value;
5733 else
5735 *first = value;
5736 *second = high;
5740 else if (GET_CODE (value) == CONST_WIDE_INT)
5742 /* All of this is scary code and needs to be converted to
5743 properly work with any size integer. */
5744 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5745 if (WORDS_BIG_ENDIAN)
5747 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5748 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5750 else
5752 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5753 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5756 else if (!CONST_DOUBLE_P (value))
5758 if (WORDS_BIG_ENDIAN)
5760 *first = const0_rtx;
5761 *second = value;
5763 else
5765 *first = value;
5766 *second = const0_rtx;
5769 else if (GET_MODE (value) == VOIDmode
5770 /* This is the old way we did CONST_DOUBLE integers. */
5771 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5773 /* In an integer, the words are defined as most and least significant.
5774 So order them by the target's convention. */
5775 if (WORDS_BIG_ENDIAN)
5777 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5778 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5780 else
5782 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5783 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5786 else
5788 long l[2];
5790 /* Note, this converts the REAL_VALUE_TYPE to the target's
5791 format, splits up the floating point double and outputs
5792 exactly 32 bits of it into each of l[0] and l[1] --
5793 not necessarily BITS_PER_WORD bits. */
5794 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5796 /* If 32 bits is an entire word for the target, but not for the host,
5797 then sign-extend on the host so that the number will look the same
5798 way on the host that it would on the target. See for instance
5799 simplify_unary_operation. The #if is needed to avoid compiler
5800 warnings. */
5802 #if HOST_BITS_PER_LONG > 32
5803 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5805 if (l[0] & ((long) 1 << 31))
5806 l[0] |= ((unsigned long) (-1) << 32);
5807 if (l[1] & ((long) 1 << 31))
5808 l[1] |= ((unsigned long) (-1) << 32);
5810 #endif
5812 *first = GEN_INT (l[0]);
5813 *second = GEN_INT (l[1]);
5817 /* Return true if X is a sign_extract or zero_extract from the least
5818 significant bit. */
5820 static bool
5821 lsb_bitfield_op_p (rtx x)
5823 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5825 machine_mode mode = GET_MODE (XEXP (x, 0));
5826 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5827 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5829 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5831 return false;
5834 /* Strip outer address "mutations" from LOC and return a pointer to the
5835 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5836 stripped expression there.
5838 "Mutations" either convert between modes or apply some kind of
5839 extension, truncation or alignment. */
5841 rtx *
5842 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5844 for (;;)
5846 enum rtx_code code = GET_CODE (*loc);
5847 if (GET_RTX_CLASS (code) == RTX_UNARY)
5848 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5849 used to convert between pointer sizes. */
5850 loc = &XEXP (*loc, 0);
5851 else if (lsb_bitfield_op_p (*loc))
5852 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5853 acts as a combined truncation and extension. */
5854 loc = &XEXP (*loc, 0);
5855 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5856 /* (and ... (const_int -X)) is used to align to X bytes. */
5857 loc = &XEXP (*loc, 0);
5858 else if (code == SUBREG
5859 && !OBJECT_P (SUBREG_REG (*loc))
5860 && subreg_lowpart_p (*loc))
5861 /* (subreg (operator ...) ...) inside and is used for mode
5862 conversion too. */
5863 loc = &SUBREG_REG (*loc);
5864 else
5865 return loc;
5866 if (outer_code)
5867 *outer_code = code;
5871 /* Return true if CODE applies some kind of scale. The scaled value is
5872 is the first operand and the scale is the second. */
5874 static bool
5875 binary_scale_code_p (enum rtx_code code)
5877 return (code == MULT
5878 || code == ASHIFT
5879 /* Needed by ARM targets. */
5880 || code == ASHIFTRT
5881 || code == LSHIFTRT
5882 || code == ROTATE
5883 || code == ROTATERT);
5886 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5887 (see address_info). Return null otherwise. */
5889 static rtx *
5890 get_base_term (rtx *inner)
5892 if (GET_CODE (*inner) == LO_SUM)
5893 inner = strip_address_mutations (&XEXP (*inner, 0));
5894 if (REG_P (*inner)
5895 || MEM_P (*inner)
5896 || GET_CODE (*inner) == SUBREG
5897 || GET_CODE (*inner) == SCRATCH)
5898 return inner;
5899 return 0;
5902 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5903 (see address_info). Return null otherwise. */
5905 static rtx *
5906 get_index_term (rtx *inner)
5908 /* At present, only constant scales are allowed. */
5909 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5910 inner = strip_address_mutations (&XEXP (*inner, 0));
5911 if (REG_P (*inner)
5912 || MEM_P (*inner)
5913 || GET_CODE (*inner) == SUBREG
5914 || GET_CODE (*inner) == SCRATCH)
5915 return inner;
5916 return 0;
5919 /* Set the segment part of address INFO to LOC, given that INNER is the
5920 unmutated value. */
5922 static void
5923 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5925 gcc_assert (!info->segment);
5926 info->segment = loc;
5927 info->segment_term = inner;
5930 /* Set the base part of address INFO to LOC, given that INNER is the
5931 unmutated value. */
5933 static void
5934 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5936 gcc_assert (!info->base);
5937 info->base = loc;
5938 info->base_term = inner;
5941 /* Set the index part of address INFO to LOC, given that INNER is the
5942 unmutated value. */
5944 static void
5945 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
5947 gcc_assert (!info->index);
5948 info->index = loc;
5949 info->index_term = inner;
5952 /* Set the displacement part of address INFO to LOC, given that INNER
5953 is the constant term. */
5955 static void
5956 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
5958 gcc_assert (!info->disp);
5959 info->disp = loc;
5960 info->disp_term = inner;
5963 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5964 rest of INFO accordingly. */
5966 static void
5967 decompose_incdec_address (struct address_info *info)
5969 info->autoinc_p = true;
5971 rtx *base = &XEXP (*info->inner, 0);
5972 set_address_base (info, base, base);
5973 gcc_checking_assert (info->base == info->base_term);
5975 /* These addresses are only valid when the size of the addressed
5976 value is known. */
5977 gcc_checking_assert (info->mode != VOIDmode);
5980 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5981 of INFO accordingly. */
5983 static void
5984 decompose_automod_address (struct address_info *info)
5986 info->autoinc_p = true;
5988 rtx *base = &XEXP (*info->inner, 0);
5989 set_address_base (info, base, base);
5990 gcc_checking_assert (info->base == info->base_term);
5992 rtx plus = XEXP (*info->inner, 1);
5993 gcc_assert (GET_CODE (plus) == PLUS);
5995 info->base_term2 = &XEXP (plus, 0);
5996 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
5998 rtx *step = &XEXP (plus, 1);
5999 rtx *inner_step = strip_address_mutations (step);
6000 if (CONSTANT_P (*inner_step))
6001 set_address_disp (info, step, inner_step);
6002 else
6003 set_address_index (info, step, inner_step);
6006 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6007 values in [PTR, END). Return a pointer to the end of the used array. */
6009 static rtx **
6010 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6012 rtx x = *loc;
6013 if (GET_CODE (x) == PLUS)
6015 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6016 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6018 else
6020 gcc_assert (ptr != end);
6021 *ptr++ = loc;
6023 return ptr;
6026 /* Evaluate the likelihood of X being a base or index value, returning
6027 positive if it is likely to be a base, negative if it is likely to be
6028 an index, and 0 if we can't tell. Make the magnitude of the return
6029 value reflect the amount of confidence we have in the answer.
6031 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6033 static int
6034 baseness (rtx x, machine_mode mode, addr_space_t as,
6035 enum rtx_code outer_code, enum rtx_code index_code)
6037 /* Believe *_POINTER unless the address shape requires otherwise. */
6038 if (REG_P (x) && REG_POINTER (x))
6039 return 2;
6040 if (MEM_P (x) && MEM_POINTER (x))
6041 return 2;
6043 if (REG_P (x) && HARD_REGISTER_P (x))
6045 /* X is a hard register. If it only fits one of the base
6046 or index classes, choose that interpretation. */
6047 int regno = REGNO (x);
6048 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6049 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6050 if (base_p != index_p)
6051 return base_p ? 1 : -1;
6053 return 0;
6056 /* INFO->INNER describes a normal, non-automodified address.
6057 Fill in the rest of INFO accordingly. */
6059 static void
6060 decompose_normal_address (struct address_info *info)
6062 /* Treat the address as the sum of up to four values. */
6063 rtx *ops[4];
6064 size_t n_ops = extract_plus_operands (info->inner, ops,
6065 ops + ARRAY_SIZE (ops)) - ops;
6067 /* If there is more than one component, any base component is in a PLUS. */
6068 if (n_ops > 1)
6069 info->base_outer_code = PLUS;
6071 /* Try to classify each sum operand now. Leave those that could be
6072 either a base or an index in OPS. */
6073 rtx *inner_ops[4];
6074 size_t out = 0;
6075 for (size_t in = 0; in < n_ops; ++in)
6077 rtx *loc = ops[in];
6078 rtx *inner = strip_address_mutations (loc);
6079 if (CONSTANT_P (*inner))
6080 set_address_disp (info, loc, inner);
6081 else if (GET_CODE (*inner) == UNSPEC)
6082 set_address_segment (info, loc, inner);
6083 else
6085 /* The only other possibilities are a base or an index. */
6086 rtx *base_term = get_base_term (inner);
6087 rtx *index_term = get_index_term (inner);
6088 gcc_assert (base_term || index_term);
6089 if (!base_term)
6090 set_address_index (info, loc, index_term);
6091 else if (!index_term)
6092 set_address_base (info, loc, base_term);
6093 else
6095 gcc_assert (base_term == index_term);
6096 ops[out] = loc;
6097 inner_ops[out] = base_term;
6098 ++out;
6103 /* Classify the remaining OPS members as bases and indexes. */
6104 if (out == 1)
6106 /* If we haven't seen a base or an index yet, assume that this is
6107 the base. If we were confident that another term was the base
6108 or index, treat the remaining operand as the other kind. */
6109 if (!info->base)
6110 set_address_base (info, ops[0], inner_ops[0]);
6111 else
6112 set_address_index (info, ops[0], inner_ops[0]);
6114 else if (out == 2)
6116 /* In the event of a tie, assume the base comes first. */
6117 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6118 GET_CODE (*ops[1]))
6119 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6120 GET_CODE (*ops[0])))
6122 set_address_base (info, ops[0], inner_ops[0]);
6123 set_address_index (info, ops[1], inner_ops[1]);
6125 else
6127 set_address_base (info, ops[1], inner_ops[1]);
6128 set_address_index (info, ops[0], inner_ops[0]);
6131 else
6132 gcc_assert (out == 0);
6135 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6136 or VOIDmode if not known. AS is the address space associated with LOC.
6137 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6139 void
6140 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6141 addr_space_t as, enum rtx_code outer_code)
6143 memset (info, 0, sizeof (*info));
6144 info->mode = mode;
6145 info->as = as;
6146 info->addr_outer_code = outer_code;
6147 info->outer = loc;
6148 info->inner = strip_address_mutations (loc, &outer_code);
6149 info->base_outer_code = outer_code;
6150 switch (GET_CODE (*info->inner))
6152 case PRE_DEC:
6153 case PRE_INC:
6154 case POST_DEC:
6155 case POST_INC:
6156 decompose_incdec_address (info);
6157 break;
6159 case PRE_MODIFY:
6160 case POST_MODIFY:
6161 decompose_automod_address (info);
6162 break;
6164 default:
6165 decompose_normal_address (info);
6166 break;
6170 /* Describe address operand LOC in INFO. */
6172 void
6173 decompose_lea_address (struct address_info *info, rtx *loc)
6175 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6178 /* Describe the address of MEM X in INFO. */
6180 void
6181 decompose_mem_address (struct address_info *info, rtx x)
6183 gcc_assert (MEM_P (x));
6184 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6185 MEM_ADDR_SPACE (x), MEM);
6188 /* Update INFO after a change to the address it describes. */
6190 void
6191 update_address (struct address_info *info)
6193 decompose_address (info, info->outer, info->mode, info->as,
6194 info->addr_outer_code);
6197 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6198 more complicated than that. */
6200 HOST_WIDE_INT
6201 get_index_scale (const struct address_info *info)
6203 rtx index = *info->index;
6204 if (GET_CODE (index) == MULT
6205 && CONST_INT_P (XEXP (index, 1))
6206 && info->index_term == &XEXP (index, 0))
6207 return INTVAL (XEXP (index, 1));
6209 if (GET_CODE (index) == ASHIFT
6210 && CONST_INT_P (XEXP (index, 1))
6211 && info->index_term == &XEXP (index, 0))
6212 return (HOST_WIDE_INT) 1 << INTVAL (XEXP (index, 1));
6214 if (info->index == info->index_term)
6215 return 1;
6217 return 0;
6220 /* Return the "index code" of INFO, in the form required by
6221 ok_for_base_p_1. */
6223 enum rtx_code
6224 get_index_code (const struct address_info *info)
6226 if (info->index)
6227 return GET_CODE (*info->index);
6229 if (info->disp)
6230 return GET_CODE (*info->disp);
6232 return SCRATCH;
6235 /* Return true if X contains a thread-local symbol. */
6237 bool
6238 tls_referenced_p (const_rtx x)
6240 if (!targetm.have_tls)
6241 return false;
6243 subrtx_iterator::array_type array;
6244 FOR_EACH_SUBRTX (iter, array, x, ALL)
6245 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6246 return true;
6247 return false;