make recog () take a rtx_insn *
[official-gcc.git] / gcc / rtlanal.c
blob55a9d2cc5090d3e0b66360022459b85b59587287
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
47 const_rtx, machine_mode,
48 unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
53 machine_mode,
54 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, machine_mode, const_rtx,
56 machine_mode, unsigned int);
58 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
59 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
61 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
62 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
63 SIGN_EXTEND then while narrowing we also have to enforce the
64 representation and sign-extend the value to mode DESTINATION_REP.
66 If the value is already sign-extended to DESTINATION_REP mode we
67 can just switch to DESTINATION mode on it. For each pair of
68 integral modes SOURCE and DESTINATION, when truncating from SOURCE
69 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
70 contains the number of high-order bits in SOURCE that have to be
71 copies of the sign-bit so that we can do this mode-switch to
72 DESTINATION. */
74 static unsigned int
75 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
77 /* Store X into index I of ARRAY. ARRAY is known to have at least I
78 elements. Return the new base of ARRAY. */
80 template <typename T>
81 typename T::value_type *
82 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
83 value_type *base,
84 size_t i, value_type x)
86 if (base == array.stack)
88 if (i < LOCAL_ELEMS)
90 base[i] = x;
91 return base;
93 gcc_checking_assert (i == LOCAL_ELEMS);
94 /* A previous iteration might also have moved from the stack to the
95 heap, in which case the heap array will already be big enough. */
96 if (vec_safe_length (array.heap) <= i)
97 vec_safe_grow (array.heap, i + 1);
98 base = array.heap->address ();
99 memcpy (base, array.stack, sizeof (array.stack));
100 base[LOCAL_ELEMS] = x;
101 return base;
103 unsigned int length = array.heap->length ();
104 if (length > i)
106 gcc_checking_assert (base == array.heap->address ());
107 base[i] = x;
108 return base;
110 else
112 gcc_checking_assert (i == length);
113 vec_safe_push (array.heap, x);
114 return array.heap->address ();
118 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
119 number of elements added to the worklist. */
121 template <typename T>
122 size_t
123 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
124 value_type *base,
125 size_t end, rtx_type x)
127 enum rtx_code code = GET_CODE (x);
128 const char *format = GET_RTX_FORMAT (code);
129 size_t orig_end = end;
130 if (__builtin_expect (INSN_P (x), false))
132 /* Put the pattern at the top of the queue, since that's what
133 we're likely to want most. It also allows for the SEQUENCE
134 code below. */
135 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
136 if (format[i] == 'e')
138 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
139 if (__builtin_expect (end < LOCAL_ELEMS, true))
140 base[end++] = subx;
141 else
142 base = add_single_to_queue (array, base, end++, subx);
145 else
146 for (int i = 0; format[i]; ++i)
147 if (format[i] == 'e')
149 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
150 if (__builtin_expect (end < LOCAL_ELEMS, true))
151 base[end++] = subx;
152 else
153 base = add_single_to_queue (array, base, end++, subx);
155 else if (format[i] == 'E')
157 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
158 rtx *vec = x->u.fld[i].rt_rtvec->elem;
159 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
160 for (unsigned int j = 0; j < length; j++)
161 base[end++] = T::get_value (vec[j]);
162 else
163 for (unsigned int j = 0; j < length; j++)
164 base = add_single_to_queue (array, base, end++,
165 T::get_value (vec[j]));
166 if (code == SEQUENCE && end == length)
167 /* If the subrtxes of the sequence fill the entire array then
168 we know that no other parts of a containing insn are queued.
169 The caller is therefore iterating over the sequence as a
170 PATTERN (...), so we also want the patterns of the
171 subinstructions. */
172 for (unsigned int j = 0; j < length; j++)
174 typename T::rtx_type x = T::get_rtx (base[j]);
175 if (INSN_P (x))
176 base[j] = T::get_value (PATTERN (x));
179 return end - orig_end;
182 template <typename T>
183 void
184 generic_subrtx_iterator <T>::free_array (array_type &array)
186 vec_free (array.heap);
189 template <typename T>
190 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
192 template class generic_subrtx_iterator <const_rtx_accessor>;
193 template class generic_subrtx_iterator <rtx_var_accessor>;
194 template class generic_subrtx_iterator <rtx_ptr_accessor>;
196 /* Return 1 if the value of X is unstable
197 (would be different at a different point in the program).
198 The frame pointer, arg pointer, etc. are considered stable
199 (within one function) and so is anything marked `unchanging'. */
202 rtx_unstable_p (const_rtx x)
204 const RTX_CODE code = GET_CODE (x);
205 int i;
206 const char *fmt;
208 switch (code)
210 case MEM:
211 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
213 case CONST:
214 CASE_CONST_ANY:
215 case SYMBOL_REF:
216 case LABEL_REF:
217 return 0;
219 case REG:
220 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
221 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
222 /* The arg pointer varies if it is not a fixed register. */
223 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
224 return 0;
225 /* ??? When call-clobbered, the value is stable modulo the restore
226 that must happen after a call. This currently screws up local-alloc
227 into believing that the restore is not needed. */
228 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
229 return 0;
230 return 1;
232 case ASM_OPERANDS:
233 if (MEM_VOLATILE_P (x))
234 return 1;
236 /* Fall through. */
238 default:
239 break;
242 fmt = GET_RTX_FORMAT (code);
243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
244 if (fmt[i] == 'e')
246 if (rtx_unstable_p (XEXP (x, i)))
247 return 1;
249 else if (fmt[i] == 'E')
251 int j;
252 for (j = 0; j < XVECLEN (x, i); j++)
253 if (rtx_unstable_p (XVECEXP (x, i, j)))
254 return 1;
257 return 0;
260 /* Return 1 if X has a value that can vary even between two
261 executions of the program. 0 means X can be compared reliably
262 against certain constants or near-constants.
263 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
264 zero, we are slightly more conservative.
265 The frame pointer and the arg pointer are considered constant. */
267 bool
268 rtx_varies_p (const_rtx x, bool for_alias)
270 RTX_CODE code;
271 int i;
272 const char *fmt;
274 if (!x)
275 return 0;
277 code = GET_CODE (x);
278 switch (code)
280 case MEM:
281 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
283 case CONST:
284 CASE_CONST_ANY:
285 case SYMBOL_REF:
286 case LABEL_REF:
287 return 0;
289 case REG:
290 /* Note that we have to test for the actual rtx used for the frame
291 and arg pointers and not just the register number in case we have
292 eliminated the frame and/or arg pointer and are using it
293 for pseudos. */
294 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
295 /* The arg pointer varies if it is not a fixed register. */
296 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
297 return 0;
298 if (x == pic_offset_table_rtx
299 /* ??? When call-clobbered, the value is stable modulo the restore
300 that must happen after a call. This currently screws up
301 local-alloc into believing that the restore is not needed, so we
302 must return 0 only if we are called from alias analysis. */
303 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
304 return 0;
305 return 1;
307 case LO_SUM:
308 /* The operand 0 of a LO_SUM is considered constant
309 (in fact it is related specifically to operand 1)
310 during alias analysis. */
311 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
312 || rtx_varies_p (XEXP (x, 1), for_alias);
314 case ASM_OPERANDS:
315 if (MEM_VOLATILE_P (x))
316 return 1;
318 /* Fall through. */
320 default:
321 break;
324 fmt = GET_RTX_FORMAT (code);
325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
326 if (fmt[i] == 'e')
328 if (rtx_varies_p (XEXP (x, i), for_alias))
329 return 1;
331 else if (fmt[i] == 'E')
333 int j;
334 for (j = 0; j < XVECLEN (x, i); j++)
335 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
336 return 1;
339 return 0;
342 /* Compute an approximation for the offset between the register
343 FROM and TO for the current function, as it was at the start
344 of the routine. */
346 static HOST_WIDE_INT
347 get_initial_register_offset (int from, int to)
349 static const struct elim_table_t
351 const int from;
352 const int to;
353 } table[] = ELIMINABLE_REGS;
354 HOST_WIDE_INT offset1, offset2;
355 unsigned int i, j;
357 if (to == from)
358 return 0;
360 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
361 before the reload pass. We need to give at least
362 an estimation for the resulting frame size. */
363 if (! reload_completed)
365 offset1 = crtl->outgoing_args_size + get_frame_size ();
366 #if !STACK_GROWS_DOWNWARD
367 offset1 = - offset1;
368 #endif
369 if (to == STACK_POINTER_REGNUM)
370 return offset1;
371 else if (from == STACK_POINTER_REGNUM)
372 return - offset1;
373 else
374 return 0;
377 for (i = 0; i < ARRAY_SIZE (table); i++)
378 if (table[i].from == from)
380 if (table[i].to == to)
382 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
383 offset1);
384 return offset1;
386 for (j = 0; j < ARRAY_SIZE (table); j++)
388 if (table[j].to == to
389 && table[j].from == table[i].to)
391 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
392 offset1);
393 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
394 offset2);
395 return offset1 + offset2;
397 if (table[j].from == to
398 && table[j].to == table[i].to)
400 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
401 offset1);
402 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
403 offset2);
404 return offset1 - offset2;
408 else if (table[i].to == from)
410 if (table[i].from == to)
412 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
413 offset1);
414 return - offset1;
416 for (j = 0; j < ARRAY_SIZE (table); j++)
418 if (table[j].to == to
419 && table[j].from == table[i].from)
421 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
422 offset1);
423 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
424 offset2);
425 return - offset1 + offset2;
427 if (table[j].from == to
428 && table[j].to == table[i].from)
430 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
431 offset1);
432 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
433 offset2);
434 return - offset1 - offset2;
439 /* If the requested register combination was not found,
440 try a different more simple combination. */
441 if (from == ARG_POINTER_REGNUM)
442 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
443 else if (to == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
445 else if (from == HARD_FRAME_POINTER_REGNUM)
446 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
447 else if (to == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
449 else
450 return 0;
453 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
454 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
455 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
456 references on strict alignment machines. */
458 static int
459 rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size,
460 machine_mode mode, bool unaligned_mems)
462 enum rtx_code code = GET_CODE (x);
464 /* The offset must be a multiple of the mode size if we are considering
465 unaligned memory references on strict alignment machines. */
466 if (STRICT_ALIGNMENT && unaligned_mems && GET_MODE_SIZE (mode) != 0)
468 HOST_WIDE_INT actual_offset = offset;
470 #ifdef SPARC_STACK_BOUNDARY_HACK
471 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
472 the real alignment of %sp. However, when it does this, the
473 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
474 if (SPARC_STACK_BOUNDARY_HACK
475 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
476 actual_offset -= STACK_POINTER_OFFSET;
477 #endif
479 if (actual_offset % GET_MODE_SIZE (mode) != 0)
480 return 1;
483 switch (code)
485 case SYMBOL_REF:
486 if (SYMBOL_REF_WEAK (x))
487 return 1;
488 if (!CONSTANT_POOL_ADDRESS_P (x))
490 tree decl;
491 HOST_WIDE_INT decl_size;
493 if (offset < 0)
494 return 1;
495 if (size == 0)
496 size = GET_MODE_SIZE (mode);
497 if (size == 0)
498 return offset != 0;
500 /* If the size of the access or of the symbol is unknown,
501 assume the worst. */
502 decl = SYMBOL_REF_DECL (x);
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
506 if (!decl)
507 decl_size = -1;
508 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl))
510 ? tree_to_shwi (DECL_SIZE_UNIT (decl))
511 : -1);
512 else if (TREE_CODE (decl) == STRING_CST)
513 decl_size = TREE_STRING_LENGTH (decl);
514 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
515 decl_size = int_size_in_bytes (TREE_TYPE (decl));
516 else
517 decl_size = -1;
519 return (decl_size <= 0 ? offset != 0 : offset + size > decl_size);
522 return 0;
524 case LABEL_REF:
525 return 0;
527 case REG:
528 /* Stack references are assumed not to trap, but we need to deal with
529 nonsensical offsets. */
530 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
531 || x == stack_pointer_rtx
532 /* The arg pointer varies if it is not a fixed register. */
533 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
535 #ifdef RED_ZONE_SIZE
536 HOST_WIDE_INT red_zone_size = RED_ZONE_SIZE;
537 #else
538 HOST_WIDE_INT red_zone_size = 0;
539 #endif
540 HOST_WIDE_INT stack_boundary = PREFERRED_STACK_BOUNDARY
541 / BITS_PER_UNIT;
542 HOST_WIDE_INT low_bound, high_bound;
544 if (size == 0)
545 size = GET_MODE_SIZE (mode);
546 if (size == 0)
547 return 1;
549 if (x == frame_pointer_rtx)
551 if (FRAME_GROWS_DOWNWARD)
553 high_bound = STARTING_FRAME_OFFSET;
554 low_bound = high_bound - get_frame_size ();
556 else
558 low_bound = STARTING_FRAME_OFFSET;
559 high_bound = low_bound + get_frame_size ();
562 else if (x == hard_frame_pointer_rtx)
564 HOST_WIDE_INT sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM,
566 HARD_FRAME_POINTER_REGNUM);
567 HOST_WIDE_INT ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM,
569 HARD_FRAME_POINTER_REGNUM);
571 #if STACK_GROWS_DOWNWARD
572 low_bound = sp_offset - red_zone_size - stack_boundary;
573 high_bound = ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 + crtl->args.size
577 #endif
578 + stack_boundary;
579 #else
580 high_bound = sp_offset + red_zone_size + stack_boundary;
581 low_bound = ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 - crtl->args.size
585 #endif
586 - stack_boundary;
587 #endif
589 else if (x == stack_pointer_rtx)
591 HOST_WIDE_INT ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM,
593 STACK_POINTER_REGNUM);
595 #if STACK_GROWS_DOWNWARD
596 low_bound = - red_zone_size - stack_boundary;
597 high_bound = ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 + crtl->args.size
601 #endif
602 + stack_boundary;
603 #else
604 high_bound = red_zone_size + stack_boundary;
605 low_bound = ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 - crtl->args.size
609 #endif
610 - stack_boundary;
611 #endif
613 else
615 /* We assume that accesses are safe to at least the
616 next stack boundary.
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 + stack_boundary;
621 low_bound = FIRST_PARM_OFFSET (current_function_decl)
622 - crtl->args.size - stack_boundary;
623 #else
624 low_bound = FIRST_PARM_OFFSET (current_function_decl)
625 - stack_boundary;
626 high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 + crtl->args.size + stack_boundary;
628 #endif
631 if (offset >= low_bound && offset <= high_bound - size)
632 return 0;
633 return 1;
635 /* All of the virtual frame registers are stack references. */
636 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
637 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
638 return 0;
639 return 1;
641 case CONST:
642 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
643 mode, unaligned_mems);
645 case PLUS:
646 /* An address is assumed not to trap if:
647 - it is the pic register plus a constant. */
648 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
649 return 0;
651 /* - or it is an address that can't trap plus a constant integer. */
652 if (CONST_INT_P (XEXP (x, 1))
653 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
654 size, mode, unaligned_mems))
655 return 0;
657 return 1;
659 case LO_SUM:
660 case PRE_MODIFY:
661 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
662 mode, unaligned_mems);
664 case PRE_DEC:
665 case PRE_INC:
666 case POST_DEC:
667 case POST_INC:
668 case POST_MODIFY:
669 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
670 mode, unaligned_mems);
672 default:
673 break;
676 /* If it isn't one of the case above, it can cause a trap. */
677 return 1;
680 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
683 rtx_addr_can_trap_p (const_rtx x)
685 return rtx_addr_can_trap_p_1 (x, 0, 0, VOIDmode, false);
688 /* Return true if X is an address that is known to not be zero. */
690 bool
691 nonzero_address_p (const_rtx x)
693 const enum rtx_code code = GET_CODE (x);
695 switch (code)
697 case SYMBOL_REF:
698 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
700 case LABEL_REF:
701 return true;
703 case REG:
704 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
705 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
706 || x == stack_pointer_rtx
707 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
708 return true;
709 /* All of the virtual frame registers are stack references. */
710 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
711 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
712 return true;
713 return false;
715 case CONST:
716 return nonzero_address_p (XEXP (x, 0));
718 case PLUS:
719 /* Handle PIC references. */
720 if (XEXP (x, 0) == pic_offset_table_rtx
721 && CONSTANT_P (XEXP (x, 1)))
722 return true;
723 return false;
725 case PRE_MODIFY:
726 /* Similar to the above; allow positive offsets. Further, since
727 auto-inc is only allowed in memories, the register must be a
728 pointer. */
729 if (CONST_INT_P (XEXP (x, 1))
730 && INTVAL (XEXP (x, 1)) > 0)
731 return true;
732 return nonzero_address_p (XEXP (x, 0));
734 case PRE_INC:
735 /* Similarly. Further, the offset is always positive. */
736 return true;
738 case PRE_DEC:
739 case POST_DEC:
740 case POST_INC:
741 case POST_MODIFY:
742 return nonzero_address_p (XEXP (x, 0));
744 case LO_SUM:
745 return nonzero_address_p (XEXP (x, 1));
747 default:
748 break;
751 /* If it isn't one of the case above, might be zero. */
752 return false;
755 /* Return 1 if X refers to a memory location whose address
756 cannot be compared reliably with constant addresses,
757 or if X refers to a BLKmode memory object.
758 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
759 zero, we are slightly more conservative. */
761 bool
762 rtx_addr_varies_p (const_rtx x, bool for_alias)
764 enum rtx_code code;
765 int i;
766 const char *fmt;
768 if (x == 0)
769 return 0;
771 code = GET_CODE (x);
772 if (code == MEM)
773 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
775 fmt = GET_RTX_FORMAT (code);
776 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
777 if (fmt[i] == 'e')
779 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
780 return 1;
782 else if (fmt[i] == 'E')
784 int j;
785 for (j = 0; j < XVECLEN (x, i); j++)
786 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
787 return 1;
789 return 0;
792 /* Return the CALL in X if there is one. */
795 get_call_rtx_from (rtx x)
797 if (INSN_P (x))
798 x = PATTERN (x);
799 if (GET_CODE (x) == PARALLEL)
800 x = XVECEXP (x, 0, 0);
801 if (GET_CODE (x) == SET)
802 x = SET_SRC (x);
803 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
804 return x;
805 return NULL_RTX;
808 /* Return the value of the integer term in X, if one is apparent;
809 otherwise return 0.
810 Only obvious integer terms are detected.
811 This is used in cse.c with the `related_value' field. */
813 HOST_WIDE_INT
814 get_integer_term (const_rtx x)
816 if (GET_CODE (x) == CONST)
817 x = XEXP (x, 0);
819 if (GET_CODE (x) == MINUS
820 && CONST_INT_P (XEXP (x, 1)))
821 return - INTVAL (XEXP (x, 1));
822 if (GET_CODE (x) == PLUS
823 && CONST_INT_P (XEXP (x, 1)))
824 return INTVAL (XEXP (x, 1));
825 return 0;
828 /* If X is a constant, return the value sans apparent integer term;
829 otherwise return 0.
830 Only obvious integer terms are detected. */
833 get_related_value (const_rtx x)
835 if (GET_CODE (x) != CONST)
836 return 0;
837 x = XEXP (x, 0);
838 if (GET_CODE (x) == PLUS
839 && CONST_INT_P (XEXP (x, 1)))
840 return XEXP (x, 0);
841 else if (GET_CODE (x) == MINUS
842 && CONST_INT_P (XEXP (x, 1)))
843 return XEXP (x, 0);
844 return 0;
847 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
848 to somewhere in the same object or object_block as SYMBOL. */
850 bool
851 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
853 tree decl;
855 if (GET_CODE (symbol) != SYMBOL_REF)
856 return false;
858 if (offset == 0)
859 return true;
861 if (offset > 0)
863 if (CONSTANT_POOL_ADDRESS_P (symbol)
864 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
865 return true;
867 decl = SYMBOL_REF_DECL (symbol);
868 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
869 return true;
872 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
873 && SYMBOL_REF_BLOCK (symbol)
874 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
875 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
876 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
877 return true;
879 return false;
882 /* Split X into a base and a constant offset, storing them in *BASE_OUT
883 and *OFFSET_OUT respectively. */
885 void
886 split_const (rtx x, rtx *base_out, rtx *offset_out)
888 if (GET_CODE (x) == CONST)
890 x = XEXP (x, 0);
891 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
893 *base_out = XEXP (x, 0);
894 *offset_out = XEXP (x, 1);
895 return;
898 *base_out = x;
899 *offset_out = const0_rtx;
902 /* Return the number of places FIND appears within X. If COUNT_DEST is
903 zero, we do not count occurrences inside the destination of a SET. */
906 count_occurrences (const_rtx x, const_rtx find, int count_dest)
908 int i, j;
909 enum rtx_code code;
910 const char *format_ptr;
911 int count;
913 if (x == find)
914 return 1;
916 code = GET_CODE (x);
918 switch (code)
920 case REG:
921 CASE_CONST_ANY:
922 case SYMBOL_REF:
923 case CODE_LABEL:
924 case PC:
925 case CC0:
926 return 0;
928 case EXPR_LIST:
929 count = count_occurrences (XEXP (x, 0), find, count_dest);
930 if (XEXP (x, 1))
931 count += count_occurrences (XEXP (x, 1), find, count_dest);
932 return count;
934 case MEM:
935 if (MEM_P (find) && rtx_equal_p (x, find))
936 return 1;
937 break;
939 case SET:
940 if (SET_DEST (x) == find && ! count_dest)
941 return count_occurrences (SET_SRC (x), find, count_dest);
942 break;
944 default:
945 break;
948 format_ptr = GET_RTX_FORMAT (code);
949 count = 0;
951 for (i = 0; i < GET_RTX_LENGTH (code); i++)
953 switch (*format_ptr++)
955 case 'e':
956 count += count_occurrences (XEXP (x, i), find, count_dest);
957 break;
959 case 'E':
960 for (j = 0; j < XVECLEN (x, i); j++)
961 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
962 break;
965 return count;
969 /* Return TRUE if OP is a register or subreg of a register that
970 holds an unsigned quantity. Otherwise, return FALSE. */
972 bool
973 unsigned_reg_p (rtx op)
975 if (REG_P (op)
976 && REG_EXPR (op)
977 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
978 return true;
980 if (GET_CODE (op) == SUBREG
981 && SUBREG_PROMOTED_SIGN (op))
982 return true;
984 return false;
988 /* Nonzero if register REG appears somewhere within IN.
989 Also works if REG is not a register; in this case it checks
990 for a subexpression of IN that is Lisp "equal" to REG. */
993 reg_mentioned_p (const_rtx reg, const_rtx in)
995 const char *fmt;
996 int i;
997 enum rtx_code code;
999 if (in == 0)
1000 return 0;
1002 if (reg == in)
1003 return 1;
1005 if (GET_CODE (in) == LABEL_REF)
1006 return reg == label_ref_label (in);
1008 code = GET_CODE (in);
1010 switch (code)
1012 /* Compare registers by number. */
1013 case REG:
1014 return REG_P (reg) && REGNO (in) == REGNO (reg);
1016 /* These codes have no constituent expressions
1017 and are unique. */
1018 case SCRATCH:
1019 case CC0:
1020 case PC:
1021 return 0;
1023 CASE_CONST_ANY:
1024 /* These are kept unique for a given value. */
1025 return 0;
1027 default:
1028 break;
1031 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1032 return 1;
1034 fmt = GET_RTX_FORMAT (code);
1036 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1038 if (fmt[i] == 'E')
1040 int j;
1041 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1042 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1043 return 1;
1045 else if (fmt[i] == 'e'
1046 && reg_mentioned_p (reg, XEXP (in, i)))
1047 return 1;
1049 return 0;
1052 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1053 no CODE_LABEL insn. */
1056 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1058 rtx_insn *p;
1059 if (beg == end)
1060 return 0;
1061 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1062 if (LABEL_P (p))
1063 return 0;
1064 return 1;
1067 /* Nonzero if register REG is used in an insn between
1068 FROM_INSN and TO_INSN (exclusive of those two). */
1071 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1072 const rtx_insn *to_insn)
1074 rtx_insn *insn;
1076 if (from_insn == to_insn)
1077 return 0;
1079 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1080 if (NONDEBUG_INSN_P (insn)
1081 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1082 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1083 return 1;
1084 return 0;
1087 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1088 is entirely replaced by a new value and the only use is as a SET_DEST,
1089 we do not consider it a reference. */
1092 reg_referenced_p (const_rtx x, const_rtx body)
1094 int i;
1096 switch (GET_CODE (body))
1098 case SET:
1099 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1100 return 1;
1102 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1103 of a REG that occupies all of the REG, the insn references X if
1104 it is mentioned in the destination. */
1105 if (GET_CODE (SET_DEST (body)) != CC0
1106 && GET_CODE (SET_DEST (body)) != PC
1107 && !REG_P (SET_DEST (body))
1108 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1109 && REG_P (SUBREG_REG (SET_DEST (body)))
1110 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
1111 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
1112 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
1113 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
1114 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1115 return 1;
1116 return 0;
1118 case ASM_OPERANDS:
1119 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1120 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1121 return 1;
1122 return 0;
1124 case CALL:
1125 case USE:
1126 case IF_THEN_ELSE:
1127 return reg_overlap_mentioned_p (x, body);
1129 case TRAP_IF:
1130 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1132 case PREFETCH:
1133 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1135 case UNSPEC:
1136 case UNSPEC_VOLATILE:
1137 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1138 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1139 return 1;
1140 return 0;
1142 case PARALLEL:
1143 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1144 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1145 return 1;
1146 return 0;
1148 case CLOBBER:
1149 if (MEM_P (XEXP (body, 0)))
1150 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1151 return 1;
1152 return 0;
1154 case COND_EXEC:
1155 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1156 return 1;
1157 return reg_referenced_p (x, COND_EXEC_CODE (body));
1159 default:
1160 return 0;
1164 /* Nonzero if register REG is set or clobbered in an insn between
1165 FROM_INSN and TO_INSN (exclusive of those two). */
1168 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1169 const rtx_insn *to_insn)
1171 const rtx_insn *insn;
1173 if (from_insn == to_insn)
1174 return 0;
1176 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1177 if (INSN_P (insn) && reg_set_p (reg, insn))
1178 return 1;
1179 return 0;
1182 /* Return true if REG is set or clobbered inside INSN. */
1185 reg_set_p (const_rtx reg, const_rtx insn)
1187 /* After delay slot handling, call and branch insns might be in a
1188 sequence. Check all the elements there. */
1189 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1191 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1192 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1193 return true;
1195 return false;
1198 /* We can be passed an insn or part of one. If we are passed an insn,
1199 check if a side-effect of the insn clobbers REG. */
1200 if (INSN_P (insn)
1201 && (FIND_REG_INC_NOTE (insn, reg)
1202 || (CALL_P (insn)
1203 && ((REG_P (reg)
1204 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1205 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1206 GET_MODE (reg), REGNO (reg)))
1207 || MEM_P (reg)
1208 || find_reg_fusage (insn, CLOBBER, reg)))))
1209 return true;
1211 return set_of (reg, insn) != NULL_RTX;
1214 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1215 only if none of them are modified between START and END. Return 1 if
1216 X contains a MEM; this routine does use memory aliasing. */
1219 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1221 const enum rtx_code code = GET_CODE (x);
1222 const char *fmt;
1223 int i, j;
1224 rtx_insn *insn;
1226 if (start == end)
1227 return 0;
1229 switch (code)
1231 CASE_CONST_ANY:
1232 case CONST:
1233 case SYMBOL_REF:
1234 case LABEL_REF:
1235 return 0;
1237 case PC:
1238 case CC0:
1239 return 1;
1241 case MEM:
1242 if (modified_between_p (XEXP (x, 0), start, end))
1243 return 1;
1244 if (MEM_READONLY_P (x))
1245 return 0;
1246 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1247 if (memory_modified_in_insn_p (x, insn))
1248 return 1;
1249 return 0;
1251 case REG:
1252 return reg_set_between_p (x, start, end);
1254 default:
1255 break;
1258 fmt = GET_RTX_FORMAT (code);
1259 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1261 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1262 return 1;
1264 else if (fmt[i] == 'E')
1265 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1266 if (modified_between_p (XVECEXP (x, i, j), start, end))
1267 return 1;
1270 return 0;
1273 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1274 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1275 does use memory aliasing. */
1278 modified_in_p (const_rtx x, const_rtx insn)
1280 const enum rtx_code code = GET_CODE (x);
1281 const char *fmt;
1282 int i, j;
1284 switch (code)
1286 CASE_CONST_ANY:
1287 case CONST:
1288 case SYMBOL_REF:
1289 case LABEL_REF:
1290 return 0;
1292 case PC:
1293 case CC0:
1294 return 1;
1296 case MEM:
1297 if (modified_in_p (XEXP (x, 0), insn))
1298 return 1;
1299 if (MEM_READONLY_P (x))
1300 return 0;
1301 if (memory_modified_in_insn_p (x, insn))
1302 return 1;
1303 return 0;
1305 case REG:
1306 return reg_set_p (x, insn);
1308 default:
1309 break;
1312 fmt = GET_RTX_FORMAT (code);
1313 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1315 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1316 return 1;
1318 else if (fmt[i] == 'E')
1319 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1320 if (modified_in_p (XVECEXP (x, i, j), insn))
1321 return 1;
1324 return 0;
1327 /* Helper function for set_of. */
1328 struct set_of_data
1330 const_rtx found;
1331 const_rtx pat;
1334 static void
1335 set_of_1 (rtx x, const_rtx pat, void *data1)
1337 struct set_of_data *const data = (struct set_of_data *) (data1);
1338 if (rtx_equal_p (x, data->pat)
1339 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1340 data->found = pat;
1343 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1344 (either directly or via STRICT_LOW_PART and similar modifiers). */
1345 const_rtx
1346 set_of (const_rtx pat, const_rtx insn)
1348 struct set_of_data data;
1349 data.found = NULL_RTX;
1350 data.pat = pat;
1351 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1352 return data.found;
1355 /* Add all hard register in X to *PSET. */
1356 void
1357 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1359 subrtx_iterator::array_type array;
1360 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1362 const_rtx x = *iter;
1363 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1364 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1368 /* This function, called through note_stores, collects sets and
1369 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1370 by DATA. */
1371 void
1372 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1374 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1375 if (REG_P (x) && HARD_REGISTER_P (x))
1376 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1379 /* Examine INSN, and compute the set of hard registers written by it.
1380 Store it in *PSET. Should only be called after reload. */
1381 void
1382 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1384 rtx link;
1386 CLEAR_HARD_REG_SET (*pset);
1387 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1388 if (CALL_P (insn))
1390 if (implicit)
1391 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1393 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1394 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1396 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1397 if (REG_NOTE_KIND (link) == REG_INC)
1398 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1401 /* Like record_hard_reg_sets, but called through note_uses. */
1402 void
1403 record_hard_reg_uses (rtx *px, void *data)
1405 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1408 /* Given an INSN, return a SET expression if this insn has only a single SET.
1409 It may also have CLOBBERs, USEs, or SET whose output
1410 will not be used, which we ignore. */
1413 single_set_2 (const rtx_insn *insn, const_rtx pat)
1415 rtx set = NULL;
1416 int set_verified = 1;
1417 int i;
1419 if (GET_CODE (pat) == PARALLEL)
1421 for (i = 0; i < XVECLEN (pat, 0); i++)
1423 rtx sub = XVECEXP (pat, 0, i);
1424 switch (GET_CODE (sub))
1426 case USE:
1427 case CLOBBER:
1428 break;
1430 case SET:
1431 /* We can consider insns having multiple sets, where all
1432 but one are dead as single set insns. In common case
1433 only single set is present in the pattern so we want
1434 to avoid checking for REG_UNUSED notes unless necessary.
1436 When we reach set first time, we just expect this is
1437 the single set we are looking for and only when more
1438 sets are found in the insn, we check them. */
1439 if (!set_verified)
1441 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1442 && !side_effects_p (set))
1443 set = NULL;
1444 else
1445 set_verified = 1;
1447 if (!set)
1448 set = sub, set_verified = 0;
1449 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1450 || side_effects_p (sub))
1451 return NULL_RTX;
1452 break;
1454 default:
1455 return NULL_RTX;
1459 return set;
1462 /* Given an INSN, return nonzero if it has more than one SET, else return
1463 zero. */
1466 multiple_sets (const_rtx insn)
1468 int found;
1469 int i;
1471 /* INSN must be an insn. */
1472 if (! INSN_P (insn))
1473 return 0;
1475 /* Only a PARALLEL can have multiple SETs. */
1476 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1478 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1479 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1481 /* If we have already found a SET, then return now. */
1482 if (found)
1483 return 1;
1484 else
1485 found = 1;
1489 /* Either zero or one SET. */
1490 return 0;
1493 /* Return nonzero if the destination of SET equals the source
1494 and there are no side effects. */
1497 set_noop_p (const_rtx set)
1499 rtx src = SET_SRC (set);
1500 rtx dst = SET_DEST (set);
1502 if (dst == pc_rtx && src == pc_rtx)
1503 return 1;
1505 if (MEM_P (dst) && MEM_P (src))
1506 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1508 if (GET_CODE (dst) == ZERO_EXTRACT)
1509 return rtx_equal_p (XEXP (dst, 0), src)
1510 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1511 && !side_effects_p (src);
1513 if (GET_CODE (dst) == STRICT_LOW_PART)
1514 dst = XEXP (dst, 0);
1516 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1518 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1519 return 0;
1520 src = SUBREG_REG (src);
1521 dst = SUBREG_REG (dst);
1524 /* It is a NOOP if destination overlaps with selected src vector
1525 elements. */
1526 if (GET_CODE (src) == VEC_SELECT
1527 && REG_P (XEXP (src, 0)) && REG_P (dst)
1528 && HARD_REGISTER_P (XEXP (src, 0))
1529 && HARD_REGISTER_P (dst))
1531 int i;
1532 rtx par = XEXP (src, 1);
1533 rtx src0 = XEXP (src, 0);
1534 int c0 = INTVAL (XVECEXP (par, 0, 0));
1535 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1537 for (i = 1; i < XVECLEN (par, 0); i++)
1538 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1539 return 0;
1540 return
1541 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1542 offset, GET_MODE (dst)) == (int) REGNO (dst);
1545 return (REG_P (src) && REG_P (dst)
1546 && REGNO (src) == REGNO (dst));
1549 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1550 value to itself. */
1553 noop_move_p (const rtx_insn *insn)
1555 rtx pat = PATTERN (insn);
1557 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1558 return 1;
1560 /* Insns carrying these notes are useful later on. */
1561 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1562 return 0;
1564 /* Check the code to be executed for COND_EXEC. */
1565 if (GET_CODE (pat) == COND_EXEC)
1566 pat = COND_EXEC_CODE (pat);
1568 if (GET_CODE (pat) == SET && set_noop_p (pat))
1569 return 1;
1571 if (GET_CODE (pat) == PARALLEL)
1573 int i;
1574 /* If nothing but SETs of registers to themselves,
1575 this insn can also be deleted. */
1576 for (i = 0; i < XVECLEN (pat, 0); i++)
1578 rtx tem = XVECEXP (pat, 0, i);
1580 if (GET_CODE (tem) == USE
1581 || GET_CODE (tem) == CLOBBER)
1582 continue;
1584 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1585 return 0;
1588 return 1;
1590 return 0;
1594 /* Return nonzero if register in range [REGNO, ENDREGNO)
1595 appears either explicitly or implicitly in X
1596 other than being stored into.
1598 References contained within the substructure at LOC do not count.
1599 LOC may be zero, meaning don't ignore anything. */
1601 bool
1602 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1603 rtx *loc)
1605 int i;
1606 unsigned int x_regno;
1607 RTX_CODE code;
1608 const char *fmt;
1610 repeat:
1611 /* The contents of a REG_NONNEG note is always zero, so we must come here
1612 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1613 if (x == 0)
1614 return false;
1616 code = GET_CODE (x);
1618 switch (code)
1620 case REG:
1621 x_regno = REGNO (x);
1623 /* If we modifying the stack, frame, or argument pointer, it will
1624 clobber a virtual register. In fact, we could be more precise,
1625 but it isn't worth it. */
1626 if ((x_regno == STACK_POINTER_REGNUM
1627 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1628 && x_regno == ARG_POINTER_REGNUM)
1629 || x_regno == FRAME_POINTER_REGNUM)
1630 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1631 return true;
1633 return endregno > x_regno && regno < END_REGNO (x);
1635 case SUBREG:
1636 /* If this is a SUBREG of a hard reg, we can see exactly which
1637 registers are being modified. Otherwise, handle normally. */
1638 if (REG_P (SUBREG_REG (x))
1639 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1641 unsigned int inner_regno = subreg_regno (x);
1642 unsigned int inner_endregno
1643 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1644 ? subreg_nregs (x) : 1);
1646 return endregno > inner_regno && regno < inner_endregno;
1648 break;
1650 case CLOBBER:
1651 case SET:
1652 if (&SET_DEST (x) != loc
1653 /* Note setting a SUBREG counts as referring to the REG it is in for
1654 a pseudo but not for hard registers since we can
1655 treat each word individually. */
1656 && ((GET_CODE (SET_DEST (x)) == SUBREG
1657 && loc != &SUBREG_REG (SET_DEST (x))
1658 && REG_P (SUBREG_REG (SET_DEST (x)))
1659 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1660 && refers_to_regno_p (regno, endregno,
1661 SUBREG_REG (SET_DEST (x)), loc))
1662 || (!REG_P (SET_DEST (x))
1663 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1664 return true;
1666 if (code == CLOBBER || loc == &SET_SRC (x))
1667 return false;
1668 x = SET_SRC (x);
1669 goto repeat;
1671 default:
1672 break;
1675 /* X does not match, so try its subexpressions. */
1677 fmt = GET_RTX_FORMAT (code);
1678 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1680 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1682 if (i == 0)
1684 x = XEXP (x, 0);
1685 goto repeat;
1687 else
1688 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1689 return true;
1691 else if (fmt[i] == 'E')
1693 int j;
1694 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1695 if (loc != &XVECEXP (x, i, j)
1696 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1697 return true;
1700 return false;
1703 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1704 we check if any register number in X conflicts with the relevant register
1705 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1706 contains a MEM (we don't bother checking for memory addresses that can't
1707 conflict because we expect this to be a rare case. */
1710 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1712 unsigned int regno, endregno;
1714 /* If either argument is a constant, then modifying X can not
1715 affect IN. Here we look at IN, we can profitably combine
1716 CONSTANT_P (x) with the switch statement below. */
1717 if (CONSTANT_P (in))
1718 return 0;
1720 recurse:
1721 switch (GET_CODE (x))
1723 case STRICT_LOW_PART:
1724 case ZERO_EXTRACT:
1725 case SIGN_EXTRACT:
1726 /* Overly conservative. */
1727 x = XEXP (x, 0);
1728 goto recurse;
1730 case SUBREG:
1731 regno = REGNO (SUBREG_REG (x));
1732 if (regno < FIRST_PSEUDO_REGISTER)
1733 regno = subreg_regno (x);
1734 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1735 ? subreg_nregs (x) : 1);
1736 goto do_reg;
1738 case REG:
1739 regno = REGNO (x);
1740 endregno = END_REGNO (x);
1741 do_reg:
1742 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1744 case MEM:
1746 const char *fmt;
1747 int i;
1749 if (MEM_P (in))
1750 return 1;
1752 fmt = GET_RTX_FORMAT (GET_CODE (in));
1753 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1754 if (fmt[i] == 'e')
1756 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1757 return 1;
1759 else if (fmt[i] == 'E')
1761 int j;
1762 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1763 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1764 return 1;
1767 return 0;
1770 case SCRATCH:
1771 case PC:
1772 case CC0:
1773 return reg_mentioned_p (x, in);
1775 case PARALLEL:
1777 int i;
1779 /* If any register in here refers to it we return true. */
1780 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1781 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1782 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1783 return 1;
1784 return 0;
1787 default:
1788 gcc_assert (CONSTANT_P (x));
1789 return 0;
1793 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1794 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1795 ignored by note_stores, but passed to FUN.
1797 FUN receives three arguments:
1798 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1799 2. the SET or CLOBBER rtx that does the store,
1800 3. the pointer DATA provided to note_stores.
1802 If the item being stored in or clobbered is a SUBREG of a hard register,
1803 the SUBREG will be passed. */
1805 void
1806 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1808 int i;
1810 if (GET_CODE (x) == COND_EXEC)
1811 x = COND_EXEC_CODE (x);
1813 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1815 rtx dest = SET_DEST (x);
1817 while ((GET_CODE (dest) == SUBREG
1818 && (!REG_P (SUBREG_REG (dest))
1819 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1820 || GET_CODE (dest) == ZERO_EXTRACT
1821 || GET_CODE (dest) == STRICT_LOW_PART)
1822 dest = XEXP (dest, 0);
1824 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1825 each of whose first operand is a register. */
1826 if (GET_CODE (dest) == PARALLEL)
1828 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1829 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1830 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1832 else
1833 (*fun) (dest, x, data);
1836 else if (GET_CODE (x) == PARALLEL)
1837 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1838 note_stores (XVECEXP (x, 0, i), fun, data);
1841 /* Like notes_stores, but call FUN for each expression that is being
1842 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1843 FUN for each expression, not any interior subexpressions. FUN receives a
1844 pointer to the expression and the DATA passed to this function.
1846 Note that this is not quite the same test as that done in reg_referenced_p
1847 since that considers something as being referenced if it is being
1848 partially set, while we do not. */
1850 void
1851 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1853 rtx body = *pbody;
1854 int i;
1856 switch (GET_CODE (body))
1858 case COND_EXEC:
1859 (*fun) (&COND_EXEC_TEST (body), data);
1860 note_uses (&COND_EXEC_CODE (body), fun, data);
1861 return;
1863 case PARALLEL:
1864 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1865 note_uses (&XVECEXP (body, 0, i), fun, data);
1866 return;
1868 case SEQUENCE:
1869 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1870 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1871 return;
1873 case USE:
1874 (*fun) (&XEXP (body, 0), data);
1875 return;
1877 case ASM_OPERANDS:
1878 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1879 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1880 return;
1882 case TRAP_IF:
1883 (*fun) (&TRAP_CONDITION (body), data);
1884 return;
1886 case PREFETCH:
1887 (*fun) (&XEXP (body, 0), data);
1888 return;
1890 case UNSPEC:
1891 case UNSPEC_VOLATILE:
1892 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1893 (*fun) (&XVECEXP (body, 0, i), data);
1894 return;
1896 case CLOBBER:
1897 if (MEM_P (XEXP (body, 0)))
1898 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1899 return;
1901 case SET:
1903 rtx dest = SET_DEST (body);
1905 /* For sets we replace everything in source plus registers in memory
1906 expression in store and operands of a ZERO_EXTRACT. */
1907 (*fun) (&SET_SRC (body), data);
1909 if (GET_CODE (dest) == ZERO_EXTRACT)
1911 (*fun) (&XEXP (dest, 1), data);
1912 (*fun) (&XEXP (dest, 2), data);
1915 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1916 dest = XEXP (dest, 0);
1918 if (MEM_P (dest))
1919 (*fun) (&XEXP (dest, 0), data);
1921 return;
1923 default:
1924 /* All the other possibilities never store. */
1925 (*fun) (pbody, data);
1926 return;
1930 /* Return nonzero if X's old contents don't survive after INSN.
1931 This will be true if X is (cc0) or if X is a register and
1932 X dies in INSN or because INSN entirely sets X.
1934 "Entirely set" means set directly and not through a SUBREG, or
1935 ZERO_EXTRACT, so no trace of the old contents remains.
1936 Likewise, REG_INC does not count.
1938 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1939 but for this use that makes no difference, since regs don't overlap
1940 during their lifetimes. Therefore, this function may be used
1941 at any time after deaths have been computed.
1943 If REG is a hard reg that occupies multiple machine registers, this
1944 function will only return 1 if each of those registers will be replaced
1945 by INSN. */
1948 dead_or_set_p (const_rtx insn, const_rtx x)
1950 unsigned int regno, end_regno;
1951 unsigned int i;
1953 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1954 if (GET_CODE (x) == CC0)
1955 return 1;
1957 gcc_assert (REG_P (x));
1959 regno = REGNO (x);
1960 end_regno = END_REGNO (x);
1961 for (i = regno; i < end_regno; i++)
1962 if (! dead_or_set_regno_p (insn, i))
1963 return 0;
1965 return 1;
1968 /* Return TRUE iff DEST is a register or subreg of a register and
1969 doesn't change the number of words of the inner register, and any
1970 part of the register is TEST_REGNO. */
1972 static bool
1973 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
1975 unsigned int regno, endregno;
1977 if (GET_CODE (dest) == SUBREG
1978 && (((GET_MODE_SIZE (GET_MODE (dest))
1979 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1980 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1981 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1982 dest = SUBREG_REG (dest);
1984 if (!REG_P (dest))
1985 return false;
1987 regno = REGNO (dest);
1988 endregno = END_REGNO (dest);
1989 return (test_regno >= regno && test_regno < endregno);
1992 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1993 any member matches the covers_regno_no_parallel_p criteria. */
1995 static bool
1996 covers_regno_p (const_rtx dest, unsigned int test_regno)
1998 if (GET_CODE (dest) == PARALLEL)
2000 /* Some targets place small structures in registers for return
2001 values of functions, and those registers are wrapped in
2002 PARALLELs that we may see as the destination of a SET. */
2003 int i;
2005 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2007 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2008 if (inner != NULL_RTX
2009 && covers_regno_no_parallel_p (inner, test_regno))
2010 return true;
2013 return false;
2015 else
2016 return covers_regno_no_parallel_p (dest, test_regno);
2019 /* Utility function for dead_or_set_p to check an individual register. */
2022 dead_or_set_regno_p (const_rtx insn, unsigned int test_regno)
2024 const_rtx pattern;
2026 /* See if there is a death note for something that includes TEST_REGNO. */
2027 if (find_regno_note (insn, REG_DEAD, test_regno))
2028 return 1;
2030 if (CALL_P (insn)
2031 && find_regno_fusage (insn, CLOBBER, test_regno))
2032 return 1;
2034 pattern = PATTERN (insn);
2036 /* If a COND_EXEC is not executed, the value survives. */
2037 if (GET_CODE (pattern) == COND_EXEC)
2038 return 0;
2040 if (GET_CODE (pattern) == SET)
2041 return covers_regno_p (SET_DEST (pattern), test_regno);
2042 else if (GET_CODE (pattern) == PARALLEL)
2044 int i;
2046 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2048 rtx body = XVECEXP (pattern, 0, i);
2050 if (GET_CODE (body) == COND_EXEC)
2051 body = COND_EXEC_CODE (body);
2053 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2054 && covers_regno_p (SET_DEST (body), test_regno))
2055 return 1;
2059 return 0;
2062 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2063 If DATUM is nonzero, look for one whose datum is DATUM. */
2066 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2068 rtx link;
2070 gcc_checking_assert (insn);
2072 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2073 if (! INSN_P (insn))
2074 return 0;
2075 if (datum == 0)
2077 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2078 if (REG_NOTE_KIND (link) == kind)
2079 return link;
2080 return 0;
2083 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2084 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2085 return link;
2086 return 0;
2089 /* Return the reg-note of kind KIND in insn INSN which applies to register
2090 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2091 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2092 it might be the case that the note overlaps REGNO. */
2095 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2097 rtx link;
2099 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2100 if (! INSN_P (insn))
2101 return 0;
2103 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2104 if (REG_NOTE_KIND (link) == kind
2105 /* Verify that it is a register, so that scratch and MEM won't cause a
2106 problem here. */
2107 && REG_P (XEXP (link, 0))
2108 && REGNO (XEXP (link, 0)) <= regno
2109 && END_REGNO (XEXP (link, 0)) > regno)
2110 return link;
2111 return 0;
2114 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2115 has such a note. */
2118 find_reg_equal_equiv_note (const_rtx insn)
2120 rtx link;
2122 if (!INSN_P (insn))
2123 return 0;
2125 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2126 if (REG_NOTE_KIND (link) == REG_EQUAL
2127 || REG_NOTE_KIND (link) == REG_EQUIV)
2129 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2130 insns that have multiple sets. Checking single_set to
2131 make sure of this is not the proper check, as explained
2132 in the comment in set_unique_reg_note.
2134 This should be changed into an assert. */
2135 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2136 return 0;
2137 return link;
2139 return NULL;
2142 /* Check whether INSN is a single_set whose source is known to be
2143 equivalent to a constant. Return that constant if so, otherwise
2144 return null. */
2147 find_constant_src (const rtx_insn *insn)
2149 rtx note, set, x;
2151 set = single_set (insn);
2152 if (set)
2154 x = avoid_constant_pool_reference (SET_SRC (set));
2155 if (CONSTANT_P (x))
2156 return x;
2159 note = find_reg_equal_equiv_note (insn);
2160 if (note && CONSTANT_P (XEXP (note, 0)))
2161 return XEXP (note, 0);
2163 return NULL_RTX;
2166 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2167 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2170 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2172 /* If it's not a CALL_INSN, it can't possibly have a
2173 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2174 if (!CALL_P (insn))
2175 return 0;
2177 gcc_assert (datum);
2179 if (!REG_P (datum))
2181 rtx link;
2183 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2184 link;
2185 link = XEXP (link, 1))
2186 if (GET_CODE (XEXP (link, 0)) == code
2187 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2188 return 1;
2190 else
2192 unsigned int regno = REGNO (datum);
2194 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2195 to pseudo registers, so don't bother checking. */
2197 if (regno < FIRST_PSEUDO_REGISTER)
2199 unsigned int end_regno = END_REGNO (datum);
2200 unsigned int i;
2202 for (i = regno; i < end_regno; i++)
2203 if (find_regno_fusage (insn, code, i))
2204 return 1;
2208 return 0;
2211 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2212 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2215 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2217 rtx link;
2219 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2220 to pseudo registers, so don't bother checking. */
2222 if (regno >= FIRST_PSEUDO_REGISTER
2223 || !CALL_P (insn) )
2224 return 0;
2226 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2228 rtx op, reg;
2230 if (GET_CODE (op = XEXP (link, 0)) == code
2231 && REG_P (reg = XEXP (op, 0))
2232 && REGNO (reg) <= regno
2233 && END_REGNO (reg) > regno)
2234 return 1;
2237 return 0;
2241 /* Return true if KIND is an integer REG_NOTE. */
2243 static bool
2244 int_reg_note_p (enum reg_note kind)
2246 return kind == REG_BR_PROB;
2249 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2250 stored as the pointer to the next register note. */
2253 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2255 rtx note;
2257 gcc_checking_assert (!int_reg_note_p (kind));
2258 switch (kind)
2260 case REG_CC_SETTER:
2261 case REG_CC_USER:
2262 case REG_LABEL_TARGET:
2263 case REG_LABEL_OPERAND:
2264 case REG_TM:
2265 /* These types of register notes use an INSN_LIST rather than an
2266 EXPR_LIST, so that copying is done right and dumps look
2267 better. */
2268 note = alloc_INSN_LIST (datum, list);
2269 PUT_REG_NOTE_KIND (note, kind);
2270 break;
2272 default:
2273 note = alloc_EXPR_LIST (kind, datum, list);
2274 break;
2277 return note;
2280 /* Add register note with kind KIND and datum DATUM to INSN. */
2282 void
2283 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2285 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2288 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2290 void
2291 add_int_reg_note (rtx insn, enum reg_note kind, int datum)
2293 gcc_checking_assert (int_reg_note_p (kind));
2294 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2295 datum, REG_NOTES (insn));
2298 /* Add a register note like NOTE to INSN. */
2300 void
2301 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2303 if (GET_CODE (note) == INT_LIST)
2304 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2305 else
2306 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2309 /* Duplicate NOTE and return the copy. */
2311 duplicate_reg_note (rtx note)
2313 reg_note kind = REG_NOTE_KIND (note);
2315 if (GET_CODE (note) == INT_LIST)
2316 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2317 else if (GET_CODE (note) == EXPR_LIST)
2318 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2319 else
2320 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2323 /* Remove register note NOTE from the REG_NOTES of INSN. */
2325 void
2326 remove_note (rtx_insn *insn, const_rtx note)
2328 rtx link;
2330 if (note == NULL_RTX)
2331 return;
2333 if (REG_NOTES (insn) == note)
2334 REG_NOTES (insn) = XEXP (note, 1);
2335 else
2336 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2337 if (XEXP (link, 1) == note)
2339 XEXP (link, 1) = XEXP (note, 1);
2340 break;
2343 switch (REG_NOTE_KIND (note))
2345 case REG_EQUAL:
2346 case REG_EQUIV:
2347 df_notes_rescan (insn);
2348 break;
2349 default:
2350 break;
2354 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2356 void
2357 remove_reg_equal_equiv_notes (rtx_insn *insn)
2359 rtx *loc;
2361 loc = &REG_NOTES (insn);
2362 while (*loc)
2364 enum reg_note kind = REG_NOTE_KIND (*loc);
2365 if (kind == REG_EQUAL || kind == REG_EQUIV)
2366 *loc = XEXP (*loc, 1);
2367 else
2368 loc = &XEXP (*loc, 1);
2372 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2374 void
2375 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2377 df_ref eq_use;
2379 if (!df)
2380 return;
2382 /* This loop is a little tricky. We cannot just go down the chain because
2383 it is being modified by some actions in the loop. So we just iterate
2384 over the head. We plan to drain the list anyway. */
2385 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2387 rtx_insn *insn = DF_REF_INSN (eq_use);
2388 rtx note = find_reg_equal_equiv_note (insn);
2390 /* This assert is generally triggered when someone deletes a REG_EQUAL
2391 or REG_EQUIV note by hacking the list manually rather than calling
2392 remove_note. */
2393 gcc_assert (note);
2395 remove_note (insn, note);
2399 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2400 return 1 if it is found. A simple equality test is used to determine if
2401 NODE matches. */
2403 bool
2404 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2406 const_rtx x;
2408 for (x = listp; x; x = XEXP (x, 1))
2409 if (node == XEXP (x, 0))
2410 return true;
2412 return false;
2415 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2416 remove that entry from the list if it is found.
2418 A simple equality test is used to determine if NODE matches. */
2420 void
2421 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2423 rtx_expr_list *temp = *listp;
2424 rtx_expr_list *prev = NULL;
2426 while (temp)
2428 if (node == temp->element ())
2430 /* Splice the node out of the list. */
2431 if (prev)
2432 XEXP (prev, 1) = temp->next ();
2433 else
2434 *listp = temp->next ();
2436 return;
2439 prev = temp;
2440 temp = temp->next ();
2444 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2445 remove that entry from the list if it is found.
2447 A simple equality test is used to determine if NODE matches. */
2449 void
2450 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2452 rtx_insn_list *temp = *listp;
2453 rtx_insn_list *prev = NULL;
2455 while (temp)
2457 if (node == temp->insn ())
2459 /* Splice the node out of the list. */
2460 if (prev)
2461 XEXP (prev, 1) = temp->next ();
2462 else
2463 *listp = temp->next ();
2465 return;
2468 prev = temp;
2469 temp = temp->next ();
2473 /* Nonzero if X contains any volatile instructions. These are instructions
2474 which may cause unpredictable machine state instructions, and thus no
2475 instructions or register uses should be moved or combined across them.
2476 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2479 volatile_insn_p (const_rtx x)
2481 const RTX_CODE code = GET_CODE (x);
2482 switch (code)
2484 case LABEL_REF:
2485 case SYMBOL_REF:
2486 case CONST:
2487 CASE_CONST_ANY:
2488 case CC0:
2489 case PC:
2490 case REG:
2491 case SCRATCH:
2492 case CLOBBER:
2493 case ADDR_VEC:
2494 case ADDR_DIFF_VEC:
2495 case CALL:
2496 case MEM:
2497 return 0;
2499 case UNSPEC_VOLATILE:
2500 return 1;
2502 case ASM_INPUT:
2503 case ASM_OPERANDS:
2504 if (MEM_VOLATILE_P (x))
2505 return 1;
2507 default:
2508 break;
2511 /* Recursively scan the operands of this expression. */
2514 const char *const fmt = GET_RTX_FORMAT (code);
2515 int i;
2517 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2519 if (fmt[i] == 'e')
2521 if (volatile_insn_p (XEXP (x, i)))
2522 return 1;
2524 else if (fmt[i] == 'E')
2526 int j;
2527 for (j = 0; j < XVECLEN (x, i); j++)
2528 if (volatile_insn_p (XVECEXP (x, i, j)))
2529 return 1;
2533 return 0;
2536 /* Nonzero if X contains any volatile memory references
2537 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2540 volatile_refs_p (const_rtx x)
2542 const RTX_CODE code = GET_CODE (x);
2543 switch (code)
2545 case LABEL_REF:
2546 case SYMBOL_REF:
2547 case CONST:
2548 CASE_CONST_ANY:
2549 case CC0:
2550 case PC:
2551 case REG:
2552 case SCRATCH:
2553 case CLOBBER:
2554 case ADDR_VEC:
2555 case ADDR_DIFF_VEC:
2556 return 0;
2558 case UNSPEC_VOLATILE:
2559 return 1;
2561 case MEM:
2562 case ASM_INPUT:
2563 case ASM_OPERANDS:
2564 if (MEM_VOLATILE_P (x))
2565 return 1;
2567 default:
2568 break;
2571 /* Recursively scan the operands of this expression. */
2574 const char *const fmt = GET_RTX_FORMAT (code);
2575 int i;
2577 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2579 if (fmt[i] == 'e')
2581 if (volatile_refs_p (XEXP (x, i)))
2582 return 1;
2584 else if (fmt[i] == 'E')
2586 int j;
2587 for (j = 0; j < XVECLEN (x, i); j++)
2588 if (volatile_refs_p (XVECEXP (x, i, j)))
2589 return 1;
2593 return 0;
2596 /* Similar to above, except that it also rejects register pre- and post-
2597 incrementing. */
2600 side_effects_p (const_rtx x)
2602 const RTX_CODE code = GET_CODE (x);
2603 switch (code)
2605 case LABEL_REF:
2606 case SYMBOL_REF:
2607 case CONST:
2608 CASE_CONST_ANY:
2609 case CC0:
2610 case PC:
2611 case REG:
2612 case SCRATCH:
2613 case ADDR_VEC:
2614 case ADDR_DIFF_VEC:
2615 case VAR_LOCATION:
2616 return 0;
2618 case CLOBBER:
2619 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2620 when some combination can't be done. If we see one, don't think
2621 that we can simplify the expression. */
2622 return (GET_MODE (x) != VOIDmode);
2624 case PRE_INC:
2625 case PRE_DEC:
2626 case POST_INC:
2627 case POST_DEC:
2628 case PRE_MODIFY:
2629 case POST_MODIFY:
2630 case CALL:
2631 case UNSPEC_VOLATILE:
2632 return 1;
2634 case MEM:
2635 case ASM_INPUT:
2636 case ASM_OPERANDS:
2637 if (MEM_VOLATILE_P (x))
2638 return 1;
2640 default:
2641 break;
2644 /* Recursively scan the operands of this expression. */
2647 const char *fmt = GET_RTX_FORMAT (code);
2648 int i;
2650 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2652 if (fmt[i] == 'e')
2654 if (side_effects_p (XEXP (x, i)))
2655 return 1;
2657 else if (fmt[i] == 'E')
2659 int j;
2660 for (j = 0; j < XVECLEN (x, i); j++)
2661 if (side_effects_p (XVECEXP (x, i, j)))
2662 return 1;
2666 return 0;
2669 /* Return nonzero if evaluating rtx X might cause a trap.
2670 FLAGS controls how to consider MEMs. A nonzero means the context
2671 of the access may have changed from the original, such that the
2672 address may have become invalid. */
2675 may_trap_p_1 (const_rtx x, unsigned flags)
2677 int i;
2678 enum rtx_code code;
2679 const char *fmt;
2681 /* We make no distinction currently, but this function is part of
2682 the internal target-hooks ABI so we keep the parameter as
2683 "unsigned flags". */
2684 bool code_changed = flags != 0;
2686 if (x == 0)
2687 return 0;
2688 code = GET_CODE (x);
2689 switch (code)
2691 /* Handle these cases quickly. */
2692 CASE_CONST_ANY:
2693 case SYMBOL_REF:
2694 case LABEL_REF:
2695 case CONST:
2696 case PC:
2697 case CC0:
2698 case REG:
2699 case SCRATCH:
2700 return 0;
2702 case UNSPEC:
2703 return targetm.unspec_may_trap_p (x, flags);
2705 case UNSPEC_VOLATILE:
2706 case ASM_INPUT:
2707 case TRAP_IF:
2708 return 1;
2710 case ASM_OPERANDS:
2711 return MEM_VOLATILE_P (x);
2713 /* Memory ref can trap unless it's a static var or a stack slot. */
2714 case MEM:
2715 /* Recognize specific pattern of stack checking probes. */
2716 if (flag_stack_check
2717 && MEM_VOLATILE_P (x)
2718 && XEXP (x, 0) == stack_pointer_rtx)
2719 return 1;
2720 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2721 reference; moving it out of context such as when moving code
2722 when optimizing, might cause its address to become invalid. */
2723 code_changed
2724 || !MEM_NOTRAP_P (x))
2726 HOST_WIDE_INT size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : 0;
2727 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2728 GET_MODE (x), code_changed);
2731 return 0;
2733 /* Division by a non-constant might trap. */
2734 case DIV:
2735 case MOD:
2736 case UDIV:
2737 case UMOD:
2738 if (HONOR_SNANS (x))
2739 return 1;
2740 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2741 return flag_trapping_math;
2742 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2743 return 1;
2744 break;
2746 case EXPR_LIST:
2747 /* An EXPR_LIST is used to represent a function call. This
2748 certainly may trap. */
2749 return 1;
2751 case GE:
2752 case GT:
2753 case LE:
2754 case LT:
2755 case LTGT:
2756 case COMPARE:
2757 /* Some floating point comparisons may trap. */
2758 if (!flag_trapping_math)
2759 break;
2760 /* ??? There is no machine independent way to check for tests that trap
2761 when COMPARE is used, though many targets do make this distinction.
2762 For instance, sparc uses CCFPE for compares which generate exceptions
2763 and CCFP for compares which do not generate exceptions. */
2764 if (HONOR_NANS (x))
2765 return 1;
2766 /* But often the compare has some CC mode, so check operand
2767 modes as well. */
2768 if (HONOR_NANS (XEXP (x, 0))
2769 || HONOR_NANS (XEXP (x, 1)))
2770 return 1;
2771 break;
2773 case EQ:
2774 case NE:
2775 if (HONOR_SNANS (x))
2776 return 1;
2777 /* Often comparison is CC mode, so check operand modes. */
2778 if (HONOR_SNANS (XEXP (x, 0))
2779 || HONOR_SNANS (XEXP (x, 1)))
2780 return 1;
2781 break;
2783 case FIX:
2784 /* Conversion of floating point might trap. */
2785 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2786 return 1;
2787 break;
2789 case NEG:
2790 case ABS:
2791 case SUBREG:
2792 /* These operations don't trap even with floating point. */
2793 break;
2795 default:
2796 /* Any floating arithmetic may trap. */
2797 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2798 return 1;
2801 fmt = GET_RTX_FORMAT (code);
2802 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2804 if (fmt[i] == 'e')
2806 if (may_trap_p_1 (XEXP (x, i), flags))
2807 return 1;
2809 else if (fmt[i] == 'E')
2811 int j;
2812 for (j = 0; j < XVECLEN (x, i); j++)
2813 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2814 return 1;
2817 return 0;
2820 /* Return nonzero if evaluating rtx X might cause a trap. */
2823 may_trap_p (const_rtx x)
2825 return may_trap_p_1 (x, 0);
2828 /* Same as above, but additionally return nonzero if evaluating rtx X might
2829 cause a fault. We define a fault for the purpose of this function as a
2830 erroneous execution condition that cannot be encountered during the normal
2831 execution of a valid program; the typical example is an unaligned memory
2832 access on a strict alignment machine. The compiler guarantees that it
2833 doesn't generate code that will fault from a valid program, but this
2834 guarantee doesn't mean anything for individual instructions. Consider
2835 the following example:
2837 struct S { int d; union { char *cp; int *ip; }; };
2839 int foo(struct S *s)
2841 if (s->d == 1)
2842 return *s->ip;
2843 else
2844 return *s->cp;
2847 on a strict alignment machine. In a valid program, foo will never be
2848 invoked on a structure for which d is equal to 1 and the underlying
2849 unique field of the union not aligned on a 4-byte boundary, but the
2850 expression *s->ip might cause a fault if considered individually.
2852 At the RTL level, potentially problematic expressions will almost always
2853 verify may_trap_p; for example, the above dereference can be emitted as
2854 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2855 However, suppose that foo is inlined in a caller that causes s->cp to
2856 point to a local character variable and guarantees that s->d is not set
2857 to 1; foo may have been effectively translated into pseudo-RTL as:
2859 if ((reg:SI) == 1)
2860 (set (reg:SI) (mem:SI (%fp - 7)))
2861 else
2862 (set (reg:QI) (mem:QI (%fp - 7)))
2864 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2865 memory reference to a stack slot, but it will certainly cause a fault
2866 on a strict alignment machine. */
2869 may_trap_or_fault_p (const_rtx x)
2871 return may_trap_p_1 (x, 1);
2874 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2875 i.e., an inequality. */
2878 inequality_comparisons_p (const_rtx x)
2880 const char *fmt;
2881 int len, i;
2882 const enum rtx_code code = GET_CODE (x);
2884 switch (code)
2886 case REG:
2887 case SCRATCH:
2888 case PC:
2889 case CC0:
2890 CASE_CONST_ANY:
2891 case CONST:
2892 case LABEL_REF:
2893 case SYMBOL_REF:
2894 return 0;
2896 case LT:
2897 case LTU:
2898 case GT:
2899 case GTU:
2900 case LE:
2901 case LEU:
2902 case GE:
2903 case GEU:
2904 return 1;
2906 default:
2907 break;
2910 len = GET_RTX_LENGTH (code);
2911 fmt = GET_RTX_FORMAT (code);
2913 for (i = 0; i < len; i++)
2915 if (fmt[i] == 'e')
2917 if (inequality_comparisons_p (XEXP (x, i)))
2918 return 1;
2920 else if (fmt[i] == 'E')
2922 int j;
2923 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2924 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2925 return 1;
2929 return 0;
2932 /* Replace any occurrence of FROM in X with TO. The function does
2933 not enter into CONST_DOUBLE for the replace.
2935 Note that copying is not done so X must not be shared unless all copies
2936 are to be modified.
2938 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
2939 those pointer-equal ones. */
2942 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
2944 int i, j;
2945 const char *fmt;
2947 if (x == from)
2948 return to;
2950 /* Allow this function to make replacements in EXPR_LISTs. */
2951 if (x == 0)
2952 return 0;
2954 if (all_regs
2955 && REG_P (x)
2956 && REG_P (from)
2957 && REGNO (x) == REGNO (from))
2959 gcc_assert (GET_MODE (x) == GET_MODE (from));
2960 return to;
2962 else if (GET_CODE (x) == SUBREG)
2964 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
2966 if (CONST_INT_P (new_rtx))
2968 x = simplify_subreg (GET_MODE (x), new_rtx,
2969 GET_MODE (SUBREG_REG (x)),
2970 SUBREG_BYTE (x));
2971 gcc_assert (x);
2973 else
2974 SUBREG_REG (x) = new_rtx;
2976 return x;
2978 else if (GET_CODE (x) == ZERO_EXTEND)
2980 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
2982 if (CONST_INT_P (new_rtx))
2984 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2985 new_rtx, GET_MODE (XEXP (x, 0)));
2986 gcc_assert (x);
2988 else
2989 XEXP (x, 0) = new_rtx;
2991 return x;
2994 fmt = GET_RTX_FORMAT (GET_CODE (x));
2995 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2997 if (fmt[i] == 'e')
2998 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
2999 else if (fmt[i] == 'E')
3000 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3001 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3002 from, to, all_regs);
3005 return x;
3008 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3009 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3011 void
3012 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3014 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3015 rtx x = *loc;
3016 if (JUMP_TABLE_DATA_P (x))
3018 x = PATTERN (x);
3019 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3020 int len = GET_NUM_ELEM (vec);
3021 for (int i = 0; i < len; ++i)
3023 rtx ref = RTVEC_ELT (vec, i);
3024 if (XEXP (ref, 0) == old_label)
3026 XEXP (ref, 0) = new_label;
3027 if (update_label_nuses)
3029 ++LABEL_NUSES (new_label);
3030 --LABEL_NUSES (old_label);
3034 return;
3037 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3038 field. This is not handled by the iterator because it doesn't
3039 handle unprinted ('0') fields. */
3040 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3041 JUMP_LABEL (x) = new_label;
3043 subrtx_ptr_iterator::array_type array;
3044 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3046 rtx *loc = *iter;
3047 if (rtx x = *loc)
3049 if (GET_CODE (x) == SYMBOL_REF
3050 && CONSTANT_POOL_ADDRESS_P (x))
3052 rtx c = get_pool_constant (x);
3053 if (rtx_referenced_p (old_label, c))
3055 /* Create a copy of constant C; replace the label inside
3056 but do not update LABEL_NUSES because uses in constant pool
3057 are not counted. */
3058 rtx new_c = copy_rtx (c);
3059 replace_label (&new_c, old_label, new_label, false);
3061 /* Add the new constant NEW_C to constant pool and replace
3062 the old reference to constant by new reference. */
3063 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3064 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3068 if ((GET_CODE (x) == LABEL_REF
3069 || GET_CODE (x) == INSN_LIST)
3070 && XEXP (x, 0) == old_label)
3072 XEXP (x, 0) = new_label;
3073 if (update_label_nuses)
3075 ++LABEL_NUSES (new_label);
3076 --LABEL_NUSES (old_label);
3083 void
3084 replace_label_in_insn (rtx_insn *insn, rtx old_label, rtx new_label,
3085 bool update_label_nuses)
3087 rtx insn_as_rtx = insn;
3088 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3089 gcc_checking_assert (insn_as_rtx == insn);
3092 /* Return true if X is referenced in BODY. */
3094 bool
3095 rtx_referenced_p (const_rtx x, const_rtx body)
3097 subrtx_iterator::array_type array;
3098 FOR_EACH_SUBRTX (iter, array, body, ALL)
3099 if (const_rtx y = *iter)
3101 /* Check if a label_ref Y refers to label X. */
3102 if (GET_CODE (y) == LABEL_REF
3103 && LABEL_P (x)
3104 && label_ref_label (y) == x)
3105 return true;
3107 if (rtx_equal_p (x, y))
3108 return true;
3110 /* If Y is a reference to pool constant traverse the constant. */
3111 if (GET_CODE (y) == SYMBOL_REF
3112 && CONSTANT_POOL_ADDRESS_P (y))
3113 iter.substitute (get_pool_constant (y));
3115 return false;
3118 /* If INSN is a tablejump return true and store the label (before jump table) to
3119 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3121 bool
3122 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3123 rtx_jump_table_data **tablep)
3125 if (!JUMP_P (insn))
3126 return false;
3128 rtx target = JUMP_LABEL (insn);
3129 if (target == NULL_RTX || ANY_RETURN_P (target))
3130 return false;
3132 rtx_insn *label = as_a<rtx_insn *> (target);
3133 rtx_insn *table = next_insn (label);
3134 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3135 return false;
3137 if (labelp)
3138 *labelp = label;
3139 if (tablep)
3140 *tablep = as_a <rtx_jump_table_data *> (table);
3141 return true;
3144 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3145 constant that is not in the constant pool and not in the condition
3146 of an IF_THEN_ELSE. */
3148 static int
3149 computed_jump_p_1 (const_rtx x)
3151 const enum rtx_code code = GET_CODE (x);
3152 int i, j;
3153 const char *fmt;
3155 switch (code)
3157 case LABEL_REF:
3158 case PC:
3159 return 0;
3161 case CONST:
3162 CASE_CONST_ANY:
3163 case SYMBOL_REF:
3164 case REG:
3165 return 1;
3167 case MEM:
3168 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3169 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3171 case IF_THEN_ELSE:
3172 return (computed_jump_p_1 (XEXP (x, 1))
3173 || computed_jump_p_1 (XEXP (x, 2)));
3175 default:
3176 break;
3179 fmt = GET_RTX_FORMAT (code);
3180 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3182 if (fmt[i] == 'e'
3183 && computed_jump_p_1 (XEXP (x, i)))
3184 return 1;
3186 else if (fmt[i] == 'E')
3187 for (j = 0; j < XVECLEN (x, i); j++)
3188 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3189 return 1;
3192 return 0;
3195 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3197 Tablejumps and casesi insns are not considered indirect jumps;
3198 we can recognize them by a (use (label_ref)). */
3201 computed_jump_p (const rtx_insn *insn)
3203 int i;
3204 if (JUMP_P (insn))
3206 rtx pat = PATTERN (insn);
3208 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3209 if (JUMP_LABEL (insn) != NULL)
3210 return 0;
3212 if (GET_CODE (pat) == PARALLEL)
3214 int len = XVECLEN (pat, 0);
3215 int has_use_labelref = 0;
3217 for (i = len - 1; i >= 0; i--)
3218 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3219 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3220 == LABEL_REF))
3222 has_use_labelref = 1;
3223 break;
3226 if (! has_use_labelref)
3227 for (i = len - 1; i >= 0; i--)
3228 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3229 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3230 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3231 return 1;
3233 else if (GET_CODE (pat) == SET
3234 && SET_DEST (pat) == pc_rtx
3235 && computed_jump_p_1 (SET_SRC (pat)))
3236 return 1;
3238 return 0;
3243 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3244 the equivalent add insn and pass the result to FN, using DATA as the
3245 final argument. */
3247 static int
3248 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3250 rtx x = XEXP (mem, 0);
3251 switch (GET_CODE (x))
3253 case PRE_INC:
3254 case POST_INC:
3256 int size = GET_MODE_SIZE (GET_MODE (mem));
3257 rtx r1 = XEXP (x, 0);
3258 rtx c = gen_int_mode (size, GET_MODE (r1));
3259 return fn (mem, x, r1, r1, c, data);
3262 case PRE_DEC:
3263 case POST_DEC:
3265 int size = GET_MODE_SIZE (GET_MODE (mem));
3266 rtx r1 = XEXP (x, 0);
3267 rtx c = gen_int_mode (-size, GET_MODE (r1));
3268 return fn (mem, x, r1, r1, c, data);
3271 case PRE_MODIFY:
3272 case POST_MODIFY:
3274 rtx r1 = XEXP (x, 0);
3275 rtx add = XEXP (x, 1);
3276 return fn (mem, x, r1, add, NULL, data);
3279 default:
3280 gcc_unreachable ();
3284 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3285 For each such autoinc operation found, call FN, passing it
3286 the innermost enclosing MEM, the operation itself, the RTX modified
3287 by the operation, two RTXs (the second may be NULL) that, once
3288 added, represent the value to be held by the modified RTX
3289 afterwards, and DATA. FN is to return 0 to continue the
3290 traversal or any other value to have it returned to the caller of
3291 for_each_inc_dec. */
3294 for_each_inc_dec (rtx x,
3295 for_each_inc_dec_fn fn,
3296 void *data)
3298 subrtx_var_iterator::array_type array;
3299 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3301 rtx mem = *iter;
3302 if (mem
3303 && MEM_P (mem)
3304 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3306 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3307 if (res != 0)
3308 return res;
3309 iter.skip_subrtxes ();
3312 return 0;
3316 /* Searches X for any reference to REGNO, returning the rtx of the
3317 reference found if any. Otherwise, returns NULL_RTX. */
3320 regno_use_in (unsigned int regno, rtx x)
3322 const char *fmt;
3323 int i, j;
3324 rtx tem;
3326 if (REG_P (x) && REGNO (x) == regno)
3327 return x;
3329 fmt = GET_RTX_FORMAT (GET_CODE (x));
3330 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3332 if (fmt[i] == 'e')
3334 if ((tem = regno_use_in (regno, XEXP (x, i))))
3335 return tem;
3337 else if (fmt[i] == 'E')
3338 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3339 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3340 return tem;
3343 return NULL_RTX;
3346 /* Return a value indicating whether OP, an operand of a commutative
3347 operation, is preferred as the first or second operand. The more
3348 positive the value, the stronger the preference for being the first
3349 operand. */
3352 commutative_operand_precedence (rtx op)
3354 enum rtx_code code = GET_CODE (op);
3356 /* Constants always become the second operand. Prefer "nice" constants. */
3357 if (code == CONST_INT)
3358 return -8;
3359 if (code == CONST_WIDE_INT)
3360 return -7;
3361 if (code == CONST_DOUBLE)
3362 return -7;
3363 if (code == CONST_FIXED)
3364 return -7;
3365 op = avoid_constant_pool_reference (op);
3366 code = GET_CODE (op);
3368 switch (GET_RTX_CLASS (code))
3370 case RTX_CONST_OBJ:
3371 if (code == CONST_INT)
3372 return -6;
3373 if (code == CONST_WIDE_INT)
3374 return -6;
3375 if (code == CONST_DOUBLE)
3376 return -5;
3377 if (code == CONST_FIXED)
3378 return -5;
3379 return -4;
3381 case RTX_EXTRA:
3382 /* SUBREGs of objects should come second. */
3383 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3384 return -3;
3385 return 0;
3387 case RTX_OBJ:
3388 /* Complex expressions should be the first, so decrease priority
3389 of objects. Prefer pointer objects over non pointer objects. */
3390 if ((REG_P (op) && REG_POINTER (op))
3391 || (MEM_P (op) && MEM_POINTER (op)))
3392 return -1;
3393 return -2;
3395 case RTX_COMM_ARITH:
3396 /* Prefer operands that are themselves commutative to be first.
3397 This helps to make things linear. In particular,
3398 (and (and (reg) (reg)) (not (reg))) is canonical. */
3399 return 4;
3401 case RTX_BIN_ARITH:
3402 /* If only one operand is a binary expression, it will be the first
3403 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3404 is canonical, although it will usually be further simplified. */
3405 return 2;
3407 case RTX_UNARY:
3408 /* Then prefer NEG and NOT. */
3409 if (code == NEG || code == NOT)
3410 return 1;
3411 /* FALLTHRU */
3413 default:
3414 return 0;
3418 /* Return 1 iff it is necessary to swap operands of commutative operation
3419 in order to canonicalize expression. */
3421 bool
3422 swap_commutative_operands_p (rtx x, rtx y)
3424 return (commutative_operand_precedence (x)
3425 < commutative_operand_precedence (y));
3428 /* Return 1 if X is an autoincrement side effect and the register is
3429 not the stack pointer. */
3431 auto_inc_p (const_rtx x)
3433 switch (GET_CODE (x))
3435 case PRE_INC:
3436 case POST_INC:
3437 case PRE_DEC:
3438 case POST_DEC:
3439 case PRE_MODIFY:
3440 case POST_MODIFY:
3441 /* There are no REG_INC notes for SP. */
3442 if (XEXP (x, 0) != stack_pointer_rtx)
3443 return 1;
3444 default:
3445 break;
3447 return 0;
3450 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3452 loc_mentioned_in_p (rtx *loc, const_rtx in)
3454 enum rtx_code code;
3455 const char *fmt;
3456 int i, j;
3458 if (!in)
3459 return 0;
3461 code = GET_CODE (in);
3462 fmt = GET_RTX_FORMAT (code);
3463 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3465 if (fmt[i] == 'e')
3467 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3468 return 1;
3470 else if (fmt[i] == 'E')
3471 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3472 if (loc == &XVECEXP (in, i, j)
3473 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3474 return 1;
3476 return 0;
3479 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3480 and SUBREG_BYTE, return the bit offset where the subreg begins
3481 (counting from the least significant bit of the operand). */
3483 unsigned int
3484 subreg_lsb_1 (machine_mode outer_mode,
3485 machine_mode inner_mode,
3486 unsigned int subreg_byte)
3488 unsigned int bitpos;
3489 unsigned int byte;
3490 unsigned int word;
3492 /* A paradoxical subreg begins at bit position 0. */
3493 if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
3494 return 0;
3496 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3497 /* If the subreg crosses a word boundary ensure that
3498 it also begins and ends on a word boundary. */
3499 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3500 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3501 && (subreg_byte % UNITS_PER_WORD
3502 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3504 if (WORDS_BIG_ENDIAN)
3505 word = (GET_MODE_SIZE (inner_mode)
3506 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3507 else
3508 word = subreg_byte / UNITS_PER_WORD;
3509 bitpos = word * BITS_PER_WORD;
3511 if (BYTES_BIG_ENDIAN)
3512 byte = (GET_MODE_SIZE (inner_mode)
3513 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3514 else
3515 byte = subreg_byte % UNITS_PER_WORD;
3516 bitpos += byte * BITS_PER_UNIT;
3518 return bitpos;
3521 /* Given a subreg X, return the bit offset where the subreg begins
3522 (counting from the least significant bit of the reg). */
3524 unsigned int
3525 subreg_lsb (const_rtx x)
3527 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3528 SUBREG_BYTE (x));
3531 /* Fill in information about a subreg of a hard register.
3532 xregno - A regno of an inner hard subreg_reg (or what will become one).
3533 xmode - The mode of xregno.
3534 offset - The byte offset.
3535 ymode - The mode of a top level SUBREG (or what may become one).
3536 info - Pointer to structure to fill in.
3538 Rather than considering one particular inner register (and thus one
3539 particular "outer" register) in isolation, this function really uses
3540 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3541 function does not check whether adding INFO->offset to XREGNO gives
3542 a valid hard register; even if INFO->offset + XREGNO is out of range,
3543 there might be another register of the same type that is in range.
3544 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3545 register, since that can depend on things like whether the final
3546 register number is even or odd. Callers that want to check whether
3547 this particular subreg can be replaced by a simple (reg ...) should
3548 use simplify_subreg_regno. */
3550 void
3551 subreg_get_info (unsigned int xregno, machine_mode xmode,
3552 unsigned int offset, machine_mode ymode,
3553 struct subreg_info *info)
3555 int nregs_xmode, nregs_ymode;
3556 int mode_multiple, nregs_multiple;
3557 int offset_adj, y_offset, y_offset_adj;
3558 int regsize_xmode, regsize_ymode;
3559 bool rknown;
3561 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3563 rknown = false;
3565 /* If there are holes in a non-scalar mode in registers, we expect
3566 that it is made up of its units concatenated together. */
3567 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3569 machine_mode xmode_unit;
3571 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3572 xmode_unit = GET_MODE_INNER (xmode);
3573 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3574 gcc_assert (nregs_xmode
3575 == (GET_MODE_NUNITS (xmode)
3576 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3577 gcc_assert (hard_regno_nregs[xregno][xmode]
3578 == (hard_regno_nregs[xregno][xmode_unit]
3579 * GET_MODE_NUNITS (xmode)));
3581 /* You can only ask for a SUBREG of a value with holes in the middle
3582 if you don't cross the holes. (Such a SUBREG should be done by
3583 picking a different register class, or doing it in memory if
3584 necessary.) An example of a value with holes is XCmode on 32-bit
3585 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3586 3 for each part, but in memory it's two 128-bit parts.
3587 Padding is assumed to be at the end (not necessarily the 'high part')
3588 of each unit. */
3589 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3590 < GET_MODE_NUNITS (xmode))
3591 && (offset / GET_MODE_SIZE (xmode_unit)
3592 != ((offset + GET_MODE_SIZE (ymode) - 1)
3593 / GET_MODE_SIZE (xmode_unit))))
3595 info->representable_p = false;
3596 rknown = true;
3599 else
3600 nregs_xmode = hard_regno_nregs[xregno][xmode];
3602 nregs_ymode = hard_regno_nregs[xregno][ymode];
3604 /* Paradoxical subregs are otherwise valid. */
3605 if (!rknown
3606 && offset == 0
3607 && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
3609 info->representable_p = true;
3610 /* If this is a big endian paradoxical subreg, which uses more
3611 actual hard registers than the original register, we must
3612 return a negative offset so that we find the proper highpart
3613 of the register. */
3614 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3615 ? REG_WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3616 info->offset = nregs_xmode - nregs_ymode;
3617 else
3618 info->offset = 0;
3619 info->nregs = nregs_ymode;
3620 return;
3623 /* If registers store different numbers of bits in the different
3624 modes, we cannot generally form this subreg. */
3625 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3626 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3627 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3628 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3630 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3631 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3632 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3634 info->representable_p = false;
3635 info->nregs
3636 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3637 info->offset = offset / regsize_xmode;
3638 return;
3640 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3642 info->representable_p = false;
3643 info->nregs
3644 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3645 info->offset = offset / regsize_xmode;
3646 return;
3648 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3649 would go outside of XMODE. */
3650 if (!rknown
3651 && GET_MODE_SIZE (ymode) + offset > GET_MODE_SIZE (xmode))
3653 info->representable_p = false;
3654 info->nregs = nregs_ymode;
3655 info->offset = offset / regsize_xmode;
3656 return;
3658 /* Quick exit for the simple and common case of extracting whole
3659 subregisters from a multiregister value. */
3660 /* ??? It would be better to integrate this into the code below,
3661 if we can generalize the concept enough and figure out how
3662 odd-sized modes can coexist with the other weird cases we support. */
3663 if (!rknown
3664 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3665 && regsize_xmode == regsize_ymode
3666 && (offset % regsize_ymode) == 0)
3668 info->representable_p = true;
3669 info->nregs = nregs_ymode;
3670 info->offset = offset / regsize_ymode;
3671 gcc_assert (info->offset + info->nregs <= nregs_xmode);
3672 return;
3676 /* Lowpart subregs are otherwise valid. */
3677 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3679 info->representable_p = true;
3680 rknown = true;
3682 if (offset == 0 || nregs_xmode == nregs_ymode)
3684 info->offset = 0;
3685 info->nregs = nregs_ymode;
3686 return;
3690 /* This should always pass, otherwise we don't know how to verify
3691 the constraint. These conditions may be relaxed but
3692 subreg_regno_offset would need to be redesigned. */
3693 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3694 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3696 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN
3697 && GET_MODE_SIZE (xmode) > UNITS_PER_WORD)
3699 HOST_WIDE_INT xsize = GET_MODE_SIZE (xmode);
3700 HOST_WIDE_INT ysize = GET_MODE_SIZE (ymode);
3701 HOST_WIDE_INT off_low = offset & (ysize - 1);
3702 HOST_WIDE_INT off_high = offset & ~(ysize - 1);
3703 offset = (xsize - ysize - off_high) | off_low;
3705 /* The XMODE value can be seen as a vector of NREGS_XMODE
3706 values. The subreg must represent a lowpart of given field.
3707 Compute what field it is. */
3708 offset_adj = offset;
3709 offset_adj -= subreg_lowpart_offset (ymode,
3710 mode_for_size (GET_MODE_BITSIZE (xmode)
3711 / nregs_xmode,
3712 MODE_INT, 0));
3714 /* Size of ymode must not be greater than the size of xmode. */
3715 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3716 gcc_assert (mode_multiple != 0);
3718 y_offset = offset / GET_MODE_SIZE (ymode);
3719 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3720 nregs_multiple = nregs_xmode / nregs_ymode;
3722 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3723 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3725 if (!rknown)
3727 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3728 rknown = true;
3730 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3731 info->nregs = nregs_ymode;
3734 /* This function returns the regno offset of a subreg expression.
3735 xregno - A regno of an inner hard subreg_reg (or what will become one).
3736 xmode - The mode of xregno.
3737 offset - The byte offset.
3738 ymode - The mode of a top level SUBREG (or what may become one).
3739 RETURN - The regno offset which would be used. */
3740 unsigned int
3741 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3742 unsigned int offset, machine_mode ymode)
3744 struct subreg_info info;
3745 subreg_get_info (xregno, xmode, offset, ymode, &info);
3746 return info.offset;
3749 /* This function returns true when the offset is representable via
3750 subreg_offset in the given regno.
3751 xregno - A regno of an inner hard subreg_reg (or what will become one).
3752 xmode - The mode of xregno.
3753 offset - The byte offset.
3754 ymode - The mode of a top level SUBREG (or what may become one).
3755 RETURN - Whether the offset is representable. */
3756 bool
3757 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3758 unsigned int offset, machine_mode ymode)
3760 struct subreg_info info;
3761 subreg_get_info (xregno, xmode, offset, ymode, &info);
3762 return info.representable_p;
3765 /* Return the number of a YMODE register to which
3767 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3769 can be simplified. Return -1 if the subreg can't be simplified.
3771 XREGNO is a hard register number. */
3774 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3775 unsigned int offset, machine_mode ymode)
3777 struct subreg_info info;
3778 unsigned int yregno;
3780 #ifdef CANNOT_CHANGE_MODE_CLASS
3781 /* Give the backend a chance to disallow the mode change. */
3782 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3783 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3784 && REG_CANNOT_CHANGE_MODE_P (xregno, xmode, ymode)
3785 /* We can use mode change in LRA for some transformations. */
3786 && ! lra_in_progress)
3787 return -1;
3788 #endif
3790 /* We shouldn't simplify stack-related registers. */
3791 if ((!reload_completed || frame_pointer_needed)
3792 && xregno == FRAME_POINTER_REGNUM)
3793 return -1;
3795 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3796 && xregno == ARG_POINTER_REGNUM)
3797 return -1;
3799 if (xregno == STACK_POINTER_REGNUM
3800 /* We should convert hard stack register in LRA if it is
3801 possible. */
3802 && ! lra_in_progress)
3803 return -1;
3805 /* Try to get the register offset. */
3806 subreg_get_info (xregno, xmode, offset, ymode, &info);
3807 if (!info.representable_p)
3808 return -1;
3810 /* Make sure that the offsetted register value is in range. */
3811 yregno = xregno + info.offset;
3812 if (!HARD_REGISTER_NUM_P (yregno))
3813 return -1;
3815 /* See whether (reg:YMODE YREGNO) is valid.
3817 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3818 This is a kludge to work around how complex FP arguments are passed
3819 on IA-64 and should be fixed. See PR target/49226. */
3820 if (!HARD_REGNO_MODE_OK (yregno, ymode)
3821 && HARD_REGNO_MODE_OK (xregno, xmode))
3822 return -1;
3824 return (int) yregno;
3827 /* Return the final regno that a subreg expression refers to. */
3828 unsigned int
3829 subreg_regno (const_rtx x)
3831 unsigned int ret;
3832 rtx subreg = SUBREG_REG (x);
3833 int regno = REGNO (subreg);
3835 ret = regno + subreg_regno_offset (regno,
3836 GET_MODE (subreg),
3837 SUBREG_BYTE (x),
3838 GET_MODE (x));
3839 return ret;
3843 /* Return the number of registers that a subreg expression refers
3844 to. */
3845 unsigned int
3846 subreg_nregs (const_rtx x)
3848 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3851 /* Return the number of registers that a subreg REG with REGNO
3852 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3853 changed so that the regno can be passed in. */
3855 unsigned int
3856 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
3858 struct subreg_info info;
3859 rtx subreg = SUBREG_REG (x);
3861 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3862 &info);
3863 return info.nregs;
3866 struct parms_set_data
3868 int nregs;
3869 HARD_REG_SET regs;
3872 /* Helper function for noticing stores to parameter registers. */
3873 static void
3874 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3876 struct parms_set_data *const d = (struct parms_set_data *) data;
3877 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3878 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3880 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3881 d->nregs--;
3885 /* Look backward for first parameter to be loaded.
3886 Note that loads of all parameters will not necessarily be
3887 found if CSE has eliminated some of them (e.g., an argument
3888 to the outer function is passed down as a parameter).
3889 Do not skip BOUNDARY. */
3890 rtx_insn *
3891 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
3893 struct parms_set_data parm;
3894 rtx p;
3895 rtx_insn *before, *first_set;
3897 /* Since different machines initialize their parameter registers
3898 in different orders, assume nothing. Collect the set of all
3899 parameter registers. */
3900 CLEAR_HARD_REG_SET (parm.regs);
3901 parm.nregs = 0;
3902 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3903 if (GET_CODE (XEXP (p, 0)) == USE
3904 && REG_P (XEXP (XEXP (p, 0), 0))
3905 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
3907 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3909 /* We only care about registers which can hold function
3910 arguments. */
3911 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3912 continue;
3914 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3915 parm.nregs++;
3917 before = call_insn;
3918 first_set = call_insn;
3920 /* Search backward for the first set of a register in this set. */
3921 while (parm.nregs && before != boundary)
3923 before = PREV_INSN (before);
3925 /* It is possible that some loads got CSEed from one call to
3926 another. Stop in that case. */
3927 if (CALL_P (before))
3928 break;
3930 /* Our caller needs either ensure that we will find all sets
3931 (in case code has not been optimized yet), or take care
3932 for possible labels in a way by setting boundary to preceding
3933 CODE_LABEL. */
3934 if (LABEL_P (before))
3936 gcc_assert (before == boundary);
3937 break;
3940 if (INSN_P (before))
3942 int nregs_old = parm.nregs;
3943 note_stores (PATTERN (before), parms_set, &parm);
3944 /* If we found something that did not set a parameter reg,
3945 we're done. Do not keep going, as that might result
3946 in hoisting an insn before the setting of a pseudo
3947 that is used by the hoisted insn. */
3948 if (nregs_old != parm.nregs)
3949 first_set = before;
3950 else
3951 break;
3954 return first_set;
3957 /* Return true if we should avoid inserting code between INSN and preceding
3958 call instruction. */
3960 bool
3961 keep_with_call_p (const rtx_insn *insn)
3963 rtx set;
3965 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3967 if (REG_P (SET_DEST (set))
3968 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3969 && fixed_regs[REGNO (SET_DEST (set))]
3970 && general_operand (SET_SRC (set), VOIDmode))
3971 return true;
3972 if (REG_P (SET_SRC (set))
3973 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
3974 && REG_P (SET_DEST (set))
3975 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3976 return true;
3977 /* There may be a stack pop just after the call and before the store
3978 of the return register. Search for the actual store when deciding
3979 if we can break or not. */
3980 if (SET_DEST (set) == stack_pointer_rtx)
3982 /* This CONST_CAST is okay because next_nonnote_insn just
3983 returns its argument and we assign it to a const_rtx
3984 variable. */
3985 const rtx_insn *i2
3986 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
3987 if (i2 && keep_with_call_p (i2))
3988 return true;
3991 return false;
3994 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3995 to non-complex jumps. That is, direct unconditional, conditional,
3996 and tablejumps, but not computed jumps or returns. It also does
3997 not apply to the fallthru case of a conditional jump. */
3999 bool
4000 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4002 rtx tmp = JUMP_LABEL (jump_insn);
4003 rtx_jump_table_data *table;
4005 if (label == tmp)
4006 return true;
4008 if (tablejump_p (jump_insn, NULL, &table))
4010 rtvec vec = table->get_labels ();
4011 int i, veclen = GET_NUM_ELEM (vec);
4013 for (i = 0; i < veclen; ++i)
4014 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4015 return true;
4018 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4019 return true;
4021 return false;
4025 /* Return an estimate of the cost of computing rtx X.
4026 One use is in cse, to decide which expression to keep in the hash table.
4027 Another is in rtl generation, to pick the cheapest way to multiply.
4028 Other uses like the latter are expected in the future.
4030 X appears as operand OPNO in an expression with code OUTER_CODE.
4031 SPEED specifies whether costs optimized for speed or size should
4032 be returned. */
4035 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4036 int opno, bool speed)
4038 int i, j;
4039 enum rtx_code code;
4040 const char *fmt;
4041 int total;
4042 int factor;
4044 if (x == 0)
4045 return 0;
4047 if (GET_MODE (x) != VOIDmode)
4048 mode = GET_MODE (x);
4050 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4051 many insns, taking N times as long. */
4052 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4053 if (factor == 0)
4054 factor = 1;
4056 /* Compute the default costs of certain things.
4057 Note that targetm.rtx_costs can override the defaults. */
4059 code = GET_CODE (x);
4060 switch (code)
4062 case MULT:
4063 /* Multiplication has time-complexity O(N*N), where N is the
4064 number of units (translated from digits) when using
4065 schoolbook long multiplication. */
4066 total = factor * factor * COSTS_N_INSNS (5);
4067 break;
4068 case DIV:
4069 case UDIV:
4070 case MOD:
4071 case UMOD:
4072 /* Similarly, complexity for schoolbook long division. */
4073 total = factor * factor * COSTS_N_INSNS (7);
4074 break;
4075 case USE:
4076 /* Used in combine.c as a marker. */
4077 total = 0;
4078 break;
4079 case SET:
4080 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4081 the mode for the factor. */
4082 mode = GET_MODE (SET_DEST (x));
4083 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4084 if (factor == 0)
4085 factor = 1;
4086 /* FALLTHRU */
4087 default:
4088 total = factor * COSTS_N_INSNS (1);
4091 switch (code)
4093 case REG:
4094 return 0;
4096 case SUBREG:
4097 total = 0;
4098 /* If we can't tie these modes, make this expensive. The larger
4099 the mode, the more expensive it is. */
4100 if (! MODES_TIEABLE_P (mode, GET_MODE (SUBREG_REG (x))))
4101 return COSTS_N_INSNS (2 + factor);
4102 break;
4104 default:
4105 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4106 return total;
4107 break;
4110 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4111 which is already in total. */
4113 fmt = GET_RTX_FORMAT (code);
4114 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4115 if (fmt[i] == 'e')
4116 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4117 else if (fmt[i] == 'E')
4118 for (j = 0; j < XVECLEN (x, i); j++)
4119 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4121 return total;
4124 /* Fill in the structure C with information about both speed and size rtx
4125 costs for X, which is operand OPNO in an expression with code OUTER. */
4127 void
4128 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4129 struct full_rtx_costs *c)
4131 c->speed = rtx_cost (x, mode, outer, opno, true);
4132 c->size = rtx_cost (x, mode, outer, opno, false);
4136 /* Return cost of address expression X.
4137 Expect that X is properly formed address reference.
4139 SPEED parameter specify whether costs optimized for speed or size should
4140 be returned. */
4143 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4145 /* We may be asked for cost of various unusual addresses, such as operands
4146 of push instruction. It is not worthwhile to complicate writing
4147 of the target hook by such cases. */
4149 if (!memory_address_addr_space_p (mode, x, as))
4150 return 1000;
4152 return targetm.address_cost (x, mode, as, speed);
4155 /* If the target doesn't override, compute the cost as with arithmetic. */
4158 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4160 return rtx_cost (x, Pmode, MEM, 0, speed);
4164 unsigned HOST_WIDE_INT
4165 nonzero_bits (const_rtx x, machine_mode mode)
4167 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
4170 unsigned int
4171 num_sign_bit_copies (const_rtx x, machine_mode mode)
4173 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
4176 /* Return true if nonzero_bits1 might recurse into both operands
4177 of X. */
4179 static inline bool
4180 nonzero_bits_binary_arith_p (const_rtx x)
4182 if (!ARITHMETIC_P (x))
4183 return false;
4184 switch (GET_CODE (x))
4186 case AND:
4187 case XOR:
4188 case IOR:
4189 case UMIN:
4190 case UMAX:
4191 case SMIN:
4192 case SMAX:
4193 case PLUS:
4194 case MINUS:
4195 case MULT:
4196 case DIV:
4197 case UDIV:
4198 case MOD:
4199 case UMOD:
4200 return true;
4201 default:
4202 return false;
4206 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4207 It avoids exponential behavior in nonzero_bits1 when X has
4208 identical subexpressions on the first or the second level. */
4210 static unsigned HOST_WIDE_INT
4211 cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
4212 machine_mode known_mode,
4213 unsigned HOST_WIDE_INT known_ret)
4215 if (x == known_x && mode == known_mode)
4216 return known_ret;
4218 /* Try to find identical subexpressions. If found call
4219 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4220 precomputed value for the subexpression as KNOWN_RET. */
4222 if (nonzero_bits_binary_arith_p (x))
4224 rtx x0 = XEXP (x, 0);
4225 rtx x1 = XEXP (x, 1);
4227 /* Check the first level. */
4228 if (x0 == x1)
4229 return nonzero_bits1 (x, mode, x0, mode,
4230 cached_nonzero_bits (x0, mode, known_x,
4231 known_mode, known_ret));
4233 /* Check the second level. */
4234 if (nonzero_bits_binary_arith_p (x0)
4235 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4236 return nonzero_bits1 (x, mode, x1, mode,
4237 cached_nonzero_bits (x1, mode, known_x,
4238 known_mode, known_ret));
4240 if (nonzero_bits_binary_arith_p (x1)
4241 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4242 return nonzero_bits1 (x, mode, x0, mode,
4243 cached_nonzero_bits (x0, mode, known_x,
4244 known_mode, known_ret));
4247 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4250 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4251 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4252 is less useful. We can't allow both, because that results in exponential
4253 run time recursion. There is a nullstone testcase that triggered
4254 this. This macro avoids accidental uses of num_sign_bit_copies. */
4255 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4257 /* Given an expression, X, compute which bits in X can be nonzero.
4258 We don't care about bits outside of those defined in MODE.
4260 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4261 an arithmetic operation, we can do better. */
4263 static unsigned HOST_WIDE_INT
4264 nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
4265 machine_mode known_mode,
4266 unsigned HOST_WIDE_INT known_ret)
4268 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4269 unsigned HOST_WIDE_INT inner_nz;
4270 enum rtx_code code;
4271 machine_mode inner_mode;
4272 unsigned int mode_width = GET_MODE_PRECISION (mode);
4274 /* For floating-point and vector values, assume all bits are needed. */
4275 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
4276 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4277 return nonzero;
4279 /* If X is wider than MODE, use its mode instead. */
4280 if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
4282 mode = GET_MODE (x);
4283 nonzero = GET_MODE_MASK (mode);
4284 mode_width = GET_MODE_PRECISION (mode);
4287 if (mode_width > HOST_BITS_PER_WIDE_INT)
4288 /* Our only callers in this case look for single bit values. So
4289 just return the mode mask. Those tests will then be false. */
4290 return nonzero;
4292 /* If MODE is wider than X, but both are a single word for both the host
4293 and target machines, we can compute this from which bits of the
4294 object might be nonzero in its own mode, taking into account the fact
4295 that on many CISC machines, accessing an object in a wider mode
4296 causes the high-order bits to become undefined. So they are
4297 not known to be zero. */
4299 if (!WORD_REGISTER_OPERATIONS
4300 && GET_MODE (x) != VOIDmode
4301 && GET_MODE (x) != mode
4302 && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
4303 && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
4304 && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
4306 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
4307 known_x, known_mode, known_ret);
4308 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
4309 return nonzero;
4312 /* Please keep nonzero_bits_binary_arith_p above in sync with
4313 the code in the switch below. */
4314 code = GET_CODE (x);
4315 switch (code)
4317 case REG:
4318 #if defined(POINTERS_EXTEND_UNSIGNED)
4319 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4320 all the bits above ptr_mode are known to be zero. */
4321 /* As we do not know which address space the pointer is referring to,
4322 we can do this only if the target does not support different pointer
4323 or address modes depending on the address space. */
4324 if (target_default_pointer_address_modes_p ()
4325 && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4326 && REG_POINTER (x)
4327 && !targetm.have_ptr_extend ())
4328 nonzero &= GET_MODE_MASK (ptr_mode);
4329 #endif
4331 /* Include declared information about alignment of pointers. */
4332 /* ??? We don't properly preserve REG_POINTER changes across
4333 pointer-to-integer casts, so we can't trust it except for
4334 things that we know must be pointers. See execute/960116-1.c. */
4335 if ((x == stack_pointer_rtx
4336 || x == frame_pointer_rtx
4337 || x == arg_pointer_rtx)
4338 && REGNO_POINTER_ALIGN (REGNO (x)))
4340 unsigned HOST_WIDE_INT alignment
4341 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4343 #ifdef PUSH_ROUNDING
4344 /* If PUSH_ROUNDING is defined, it is possible for the
4345 stack to be momentarily aligned only to that amount,
4346 so we pick the least alignment. */
4347 if (x == stack_pointer_rtx && PUSH_ARGS)
4348 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
4349 alignment);
4350 #endif
4352 nonzero &= ~(alignment - 1);
4356 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4357 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
4358 known_mode, known_ret,
4359 &nonzero_for_hook);
4361 if (new_rtx)
4362 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4363 known_mode, known_ret);
4365 return nonzero_for_hook;
4368 case CONST_INT:
4369 /* If X is negative in MODE, sign-extend the value. */
4370 if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
4371 && mode_width < BITS_PER_WORD
4372 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
4373 != 0)
4374 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4376 return UINTVAL (x);
4378 case MEM:
4379 /* In many, if not most, RISC machines, reading a byte from memory
4380 zeros the rest of the register. Noticing that fact saves a lot
4381 of extra zero-extends. */
4382 if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
4383 nonzero &= GET_MODE_MASK (GET_MODE (x));
4384 break;
4386 case EQ: case NE:
4387 case UNEQ: case LTGT:
4388 case GT: case GTU: case UNGT:
4389 case LT: case LTU: case UNLT:
4390 case GE: case GEU: case UNGE:
4391 case LE: case LEU: case UNLE:
4392 case UNORDERED: case ORDERED:
4393 /* If this produces an integer result, we know which bits are set.
4394 Code here used to clear bits outside the mode of X, but that is
4395 now done above. */
4396 /* Mind that MODE is the mode the caller wants to look at this
4397 operation in, and not the actual operation mode. We can wind
4398 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4399 that describes the results of a vector compare. */
4400 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
4401 && mode_width <= HOST_BITS_PER_WIDE_INT)
4402 nonzero = STORE_FLAG_VALUE;
4403 break;
4405 case NEG:
4406 #if 0
4407 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4408 and num_sign_bit_copies. */
4409 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4410 == GET_MODE_PRECISION (GET_MODE (x)))
4411 nonzero = 1;
4412 #endif
4414 if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
4415 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
4416 break;
4418 case ABS:
4419 #if 0
4420 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4421 and num_sign_bit_copies. */
4422 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
4423 == GET_MODE_PRECISION (GET_MODE (x)))
4424 nonzero = 1;
4425 #endif
4426 break;
4428 case TRUNCATE:
4429 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4430 known_x, known_mode, known_ret)
4431 & GET_MODE_MASK (mode));
4432 break;
4434 case ZERO_EXTEND:
4435 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4436 known_x, known_mode, known_ret);
4437 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4438 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4439 break;
4441 case SIGN_EXTEND:
4442 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4443 Otherwise, show all the bits in the outer mode but not the inner
4444 may be nonzero. */
4445 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4446 known_x, known_mode, known_ret);
4447 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4449 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4450 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4451 inner_nz |= (GET_MODE_MASK (mode)
4452 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4455 nonzero &= inner_nz;
4456 break;
4458 case AND:
4459 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4460 known_x, known_mode, known_ret)
4461 & cached_nonzero_bits (XEXP (x, 1), mode,
4462 known_x, known_mode, known_ret);
4463 break;
4465 case XOR: case IOR:
4466 case UMIN: case UMAX: case SMIN: case SMAX:
4468 unsigned HOST_WIDE_INT nonzero0
4469 = cached_nonzero_bits (XEXP (x, 0), mode,
4470 known_x, known_mode, known_ret);
4472 /* Don't call nonzero_bits for the second time if it cannot change
4473 anything. */
4474 if ((nonzero & nonzero0) != nonzero)
4475 nonzero &= nonzero0
4476 | cached_nonzero_bits (XEXP (x, 1), mode,
4477 known_x, known_mode, known_ret);
4479 break;
4481 case PLUS: case MINUS:
4482 case MULT:
4483 case DIV: case UDIV:
4484 case MOD: case UMOD:
4485 /* We can apply the rules of arithmetic to compute the number of
4486 high- and low-order zero bits of these operations. We start by
4487 computing the width (position of the highest-order nonzero bit)
4488 and the number of low-order zero bits for each value. */
4490 unsigned HOST_WIDE_INT nz0
4491 = cached_nonzero_bits (XEXP (x, 0), mode,
4492 known_x, known_mode, known_ret);
4493 unsigned HOST_WIDE_INT nz1
4494 = cached_nonzero_bits (XEXP (x, 1), mode,
4495 known_x, known_mode, known_ret);
4496 int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
4497 int width0 = floor_log2 (nz0) + 1;
4498 int width1 = floor_log2 (nz1) + 1;
4499 int low0 = ctz_or_zero (nz0);
4500 int low1 = ctz_or_zero (nz1);
4501 unsigned HOST_WIDE_INT op0_maybe_minusp
4502 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4503 unsigned HOST_WIDE_INT op1_maybe_minusp
4504 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4505 unsigned int result_width = mode_width;
4506 int result_low = 0;
4508 switch (code)
4510 case PLUS:
4511 result_width = MAX (width0, width1) + 1;
4512 result_low = MIN (low0, low1);
4513 break;
4514 case MINUS:
4515 result_low = MIN (low0, low1);
4516 break;
4517 case MULT:
4518 result_width = width0 + width1;
4519 result_low = low0 + low1;
4520 break;
4521 case DIV:
4522 if (width1 == 0)
4523 break;
4524 if (!op0_maybe_minusp && !op1_maybe_minusp)
4525 result_width = width0;
4526 break;
4527 case UDIV:
4528 if (width1 == 0)
4529 break;
4530 result_width = width0;
4531 break;
4532 case MOD:
4533 if (width1 == 0)
4534 break;
4535 if (!op0_maybe_minusp && !op1_maybe_minusp)
4536 result_width = MIN (width0, width1);
4537 result_low = MIN (low0, low1);
4538 break;
4539 case UMOD:
4540 if (width1 == 0)
4541 break;
4542 result_width = MIN (width0, width1);
4543 result_low = MIN (low0, low1);
4544 break;
4545 default:
4546 gcc_unreachable ();
4549 if (result_width < mode_width)
4550 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4552 if (result_low > 0)
4553 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4555 break;
4557 case ZERO_EXTRACT:
4558 if (CONST_INT_P (XEXP (x, 1))
4559 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4560 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4561 break;
4563 case SUBREG:
4564 /* If this is a SUBREG formed for a promoted variable that has
4565 been zero-extended, we know that at least the high-order bits
4566 are zero, though others might be too. */
4567 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4568 nonzero = GET_MODE_MASK (GET_MODE (x))
4569 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
4570 known_x, known_mode, known_ret);
4572 /* If the inner mode is a single word for both the host and target
4573 machines, we can compute this from which bits of the inner
4574 object might be nonzero. */
4575 inner_mode = GET_MODE (SUBREG_REG (x));
4576 if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
4577 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)
4579 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4580 known_x, known_mode, known_ret);
4582 /* On many CISC machines, accessing an object in a wider mode
4583 causes the high-order bits to become undefined. So they are
4584 not known to be zero. */
4585 rtx_code extend_op;
4586 if ((!WORD_REGISTER_OPERATIONS
4587 /* If this is a typical RISC machine, we only have to worry
4588 about the way loads are extended. */
4589 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4590 ? val_signbit_known_set_p (inner_mode, nonzero)
4591 : extend_op != ZERO_EXTEND)
4592 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4593 && GET_MODE_PRECISION (GET_MODE (x))
4594 > GET_MODE_PRECISION (inner_mode))
4595 nonzero
4596 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4598 break;
4600 case ASHIFTRT:
4601 case LSHIFTRT:
4602 case ASHIFT:
4603 case ROTATE:
4604 /* The nonzero bits are in two classes: any bits within MODE
4605 that aren't in GET_MODE (x) are always significant. The rest of the
4606 nonzero bits are those that are significant in the operand of
4607 the shift when shifted the appropriate number of bits. This
4608 shows that high-order bits are cleared by the right shift and
4609 low-order bits by left shifts. */
4610 if (CONST_INT_P (XEXP (x, 1))
4611 && INTVAL (XEXP (x, 1)) >= 0
4612 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4613 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
4615 machine_mode inner_mode = GET_MODE (x);
4616 unsigned int width = GET_MODE_PRECISION (inner_mode);
4617 int count = INTVAL (XEXP (x, 1));
4618 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
4619 unsigned HOST_WIDE_INT op_nonzero
4620 = cached_nonzero_bits (XEXP (x, 0), mode,
4621 known_x, known_mode, known_ret);
4622 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4623 unsigned HOST_WIDE_INT outer = 0;
4625 if (mode_width > width)
4626 outer = (op_nonzero & nonzero & ~mode_mask);
4628 if (code == LSHIFTRT)
4629 inner >>= count;
4630 else if (code == ASHIFTRT)
4632 inner >>= count;
4634 /* If the sign bit may have been nonzero before the shift, we
4635 need to mark all the places it could have been copied to
4636 by the shift as possibly nonzero. */
4637 if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
4638 inner |= ((HOST_WIDE_INT_1U << count) - 1)
4639 << (width - count);
4641 else if (code == ASHIFT)
4642 inner <<= count;
4643 else
4644 inner = ((inner << (count % width)
4645 | (inner >> (width - (count % width)))) & mode_mask);
4647 nonzero &= (outer | inner);
4649 break;
4651 case FFS:
4652 case POPCOUNT:
4653 /* This is at most the number of bits in the mode. */
4654 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4655 break;
4657 case CLZ:
4658 /* If CLZ has a known value at zero, then the nonzero bits are
4659 that value, plus the number of bits in the mode minus one. */
4660 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4661 nonzero
4662 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4663 else
4664 nonzero = -1;
4665 break;
4667 case CTZ:
4668 /* If CTZ has a known value at zero, then the nonzero bits are
4669 that value, plus the number of bits in the mode minus one. */
4670 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4671 nonzero
4672 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4673 else
4674 nonzero = -1;
4675 break;
4677 case CLRSB:
4678 /* This is at most the number of bits in the mode minus 1. */
4679 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4680 break;
4682 case PARITY:
4683 nonzero = 1;
4684 break;
4686 case IF_THEN_ELSE:
4688 unsigned HOST_WIDE_INT nonzero_true
4689 = cached_nonzero_bits (XEXP (x, 1), mode,
4690 known_x, known_mode, known_ret);
4692 /* Don't call nonzero_bits for the second time if it cannot change
4693 anything. */
4694 if ((nonzero & nonzero_true) != nonzero)
4695 nonzero &= nonzero_true
4696 | cached_nonzero_bits (XEXP (x, 2), mode,
4697 known_x, known_mode, known_ret);
4699 break;
4701 default:
4702 break;
4705 return nonzero;
4708 /* See the macro definition above. */
4709 #undef cached_num_sign_bit_copies
4712 /* Return true if num_sign_bit_copies1 might recurse into both operands
4713 of X. */
4715 static inline bool
4716 num_sign_bit_copies_binary_arith_p (const_rtx x)
4718 if (!ARITHMETIC_P (x))
4719 return false;
4720 switch (GET_CODE (x))
4722 case IOR:
4723 case AND:
4724 case XOR:
4725 case SMIN:
4726 case SMAX:
4727 case UMIN:
4728 case UMAX:
4729 case PLUS:
4730 case MINUS:
4731 case MULT:
4732 return true;
4733 default:
4734 return false;
4738 /* The function cached_num_sign_bit_copies is a wrapper around
4739 num_sign_bit_copies1. It avoids exponential behavior in
4740 num_sign_bit_copies1 when X has identical subexpressions on the
4741 first or the second level. */
4743 static unsigned int
4744 cached_num_sign_bit_copies (const_rtx x, machine_mode mode, const_rtx known_x,
4745 machine_mode known_mode,
4746 unsigned int known_ret)
4748 if (x == known_x && mode == known_mode)
4749 return known_ret;
4751 /* Try to find identical subexpressions. If found call
4752 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4753 the precomputed value for the subexpression as KNOWN_RET. */
4755 if (num_sign_bit_copies_binary_arith_p (x))
4757 rtx x0 = XEXP (x, 0);
4758 rtx x1 = XEXP (x, 1);
4760 /* Check the first level. */
4761 if (x0 == x1)
4762 return
4763 num_sign_bit_copies1 (x, mode, x0, mode,
4764 cached_num_sign_bit_copies (x0, mode, known_x,
4765 known_mode,
4766 known_ret));
4768 /* Check the second level. */
4769 if (num_sign_bit_copies_binary_arith_p (x0)
4770 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4771 return
4772 num_sign_bit_copies1 (x, mode, x1, mode,
4773 cached_num_sign_bit_copies (x1, mode, known_x,
4774 known_mode,
4775 known_ret));
4777 if (num_sign_bit_copies_binary_arith_p (x1)
4778 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4779 return
4780 num_sign_bit_copies1 (x, mode, x0, mode,
4781 cached_num_sign_bit_copies (x0, mode, known_x,
4782 known_mode,
4783 known_ret));
4786 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4789 /* Return the number of bits at the high-order end of X that are known to
4790 be equal to the sign bit. X will be used in mode MODE; if MODE is
4791 VOIDmode, X will be used in its own mode. The returned value will always
4792 be between 1 and the number of bits in MODE. */
4794 static unsigned int
4795 num_sign_bit_copies1 (const_rtx x, machine_mode mode, const_rtx known_x,
4796 machine_mode known_mode,
4797 unsigned int known_ret)
4799 enum rtx_code code = GET_CODE (x);
4800 machine_mode inner_mode;
4801 int num0, num1, result;
4802 unsigned HOST_WIDE_INT nonzero;
4804 /* If we weren't given a mode, use the mode of X. If the mode is still
4805 VOIDmode, we don't know anything. Likewise if one of the modes is
4806 floating-point. */
4808 if (mode == VOIDmode)
4809 mode = GET_MODE (x);
4811 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))
4812 || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
4813 return 1;
4815 /* For a smaller mode, just ignore the high bits. */
4816 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4817 if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
4819 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4820 known_x, known_mode, known_ret);
4821 return MAX (1,
4822 num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
4825 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
4827 /* If this machine does not do all register operations on the entire
4828 register and MODE is wider than the mode of X, we can say nothing
4829 at all about the high-order bits. */
4830 if (!WORD_REGISTER_OPERATIONS)
4831 return 1;
4833 /* Likewise on machines that do, if the mode of the object is smaller
4834 than a word and loads of that size don't sign extend, we can say
4835 nothing about the high order bits. */
4836 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
4837 && load_extend_op (GET_MODE (x)) != SIGN_EXTEND)
4838 return 1;
4841 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
4842 the code in the switch below. */
4843 switch (code)
4845 case REG:
4847 #if defined(POINTERS_EXTEND_UNSIGNED)
4848 /* If pointers extend signed and this is a pointer in Pmode, say that
4849 all the bits above ptr_mode are known to be sign bit copies. */
4850 /* As we do not know which address space the pointer is referring to,
4851 we can do this only if the target does not support different pointer
4852 or address modes depending on the address space. */
4853 if (target_default_pointer_address_modes_p ()
4854 && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4855 && mode == Pmode && REG_POINTER (x)
4856 && !targetm.have_ptr_extend ())
4857 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
4858 #endif
4861 unsigned int copies_for_hook = 1, copies = 1;
4862 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4863 known_mode, known_ret,
4864 &copies_for_hook);
4866 if (new_rtx)
4867 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
4868 known_mode, known_ret);
4870 if (copies > 1 || copies_for_hook > 1)
4871 return MAX (copies, copies_for_hook);
4873 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4875 break;
4877 case MEM:
4878 /* Some RISC machines sign-extend all loads of smaller than a word. */
4879 if (load_extend_op (GET_MODE (x)) == SIGN_EXTEND)
4880 return MAX (1, ((int) bitwidth
4881 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
4882 break;
4884 case CONST_INT:
4885 /* If the constant is negative, take its 1's complement and remask.
4886 Then see how many zero bits we have. */
4887 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4888 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4889 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4890 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4892 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4894 case SUBREG:
4895 /* If this is a SUBREG for a promoted object that is sign-extended
4896 and we are looking at it in a wider mode, we know that at least the
4897 high-order bits are known to be sign bit copies. */
4899 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
4901 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4902 known_x, known_mode, known_ret);
4903 return MAX ((int) bitwidth
4904 - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
4905 num0);
4908 /* For a smaller object, just ignore the high bits. */
4909 inner_mode = GET_MODE (SUBREG_REG (x));
4910 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
4912 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4913 known_x, known_mode, known_ret);
4914 return
4915 MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth));
4918 /* For paradoxical SUBREGs on machines where all register operations
4919 affect the entire register, just look inside. Note that we are
4920 passing MODE to the recursive call, so the number of sign bit copies
4921 will remain relative to that mode, not the inner mode. */
4923 /* This works only if loads sign extend. Otherwise, if we get a
4924 reload for the inner part, it may be loaded from the stack, and
4925 then we lose all sign bit copies that existed before the store
4926 to the stack. */
4928 if (WORD_REGISTER_OPERATIONS
4929 && load_extend_op (inner_mode) == SIGN_EXTEND
4930 && paradoxical_subreg_p (x)
4931 && (MEM_P (SUBREG_REG (x)) || REG_P (SUBREG_REG (x))))
4932 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4933 known_x, known_mode, known_ret);
4934 break;
4936 case SIGN_EXTRACT:
4937 if (CONST_INT_P (XEXP (x, 1)))
4938 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4939 break;
4941 case SIGN_EXTEND:
4942 return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4943 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4944 known_x, known_mode, known_ret));
4946 case TRUNCATE:
4947 /* For a smaller object, just ignore the high bits. */
4948 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4949 known_x, known_mode, known_ret);
4950 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
4951 - bitwidth)));
4953 case NOT:
4954 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4955 known_x, known_mode, known_ret);
4957 case ROTATE: case ROTATERT:
4958 /* If we are rotating left by a number of bits less than the number
4959 of sign bit copies, we can just subtract that amount from the
4960 number. */
4961 if (CONST_INT_P (XEXP (x, 1))
4962 && INTVAL (XEXP (x, 1)) >= 0
4963 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4965 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4966 known_x, known_mode, known_ret);
4967 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4968 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4970 break;
4972 case NEG:
4973 /* In general, this subtracts one sign bit copy. But if the value
4974 is known to be positive, the number of sign bit copies is the
4975 same as that of the input. Finally, if the input has just one bit
4976 that might be nonzero, all the bits are copies of the sign bit. */
4977 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4978 known_x, known_mode, known_ret);
4979 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4980 return num0 > 1 ? num0 - 1 : 1;
4982 nonzero = nonzero_bits (XEXP (x, 0), mode);
4983 if (nonzero == 1)
4984 return bitwidth;
4986 if (num0 > 1
4987 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
4988 num0--;
4990 return num0;
4992 case IOR: case AND: case XOR:
4993 case SMIN: case SMAX: case UMIN: case UMAX:
4994 /* Logical operations will preserve the number of sign-bit copies.
4995 MIN and MAX operations always return one of the operands. */
4996 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4997 known_x, known_mode, known_ret);
4998 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4999 known_x, known_mode, known_ret);
5001 /* If num1 is clearing some of the top bits then regardless of
5002 the other term, we are guaranteed to have at least that many
5003 high-order zero bits. */
5004 if (code == AND
5005 && num1 > 1
5006 && bitwidth <= HOST_BITS_PER_WIDE_INT
5007 && CONST_INT_P (XEXP (x, 1))
5008 && (UINTVAL (XEXP (x, 1))
5009 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5010 return num1;
5012 /* Similarly for IOR when setting high-order bits. */
5013 if (code == IOR
5014 && num1 > 1
5015 && bitwidth <= HOST_BITS_PER_WIDE_INT
5016 && CONST_INT_P (XEXP (x, 1))
5017 && (UINTVAL (XEXP (x, 1))
5018 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5019 return num1;
5021 return MIN (num0, num1);
5023 case PLUS: case MINUS:
5024 /* For addition and subtraction, we can have a 1-bit carry. However,
5025 if we are subtracting 1 from a positive number, there will not
5026 be such a carry. Furthermore, if the positive number is known to
5027 be 0 or 1, we know the result is either -1 or 0. */
5029 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5030 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5032 nonzero = nonzero_bits (XEXP (x, 0), mode);
5033 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5034 return (nonzero == 1 || nonzero == 0 ? bitwidth
5035 : bitwidth - floor_log2 (nonzero) - 1);
5038 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5039 known_x, known_mode, known_ret);
5040 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5041 known_x, known_mode, known_ret);
5042 result = MAX (1, MIN (num0, num1) - 1);
5044 return result;
5046 case MULT:
5047 /* The number of bits of the product is the sum of the number of
5048 bits of both terms. However, unless one of the terms if known
5049 to be positive, we must allow for an additional bit since negating
5050 a negative number can remove one sign bit copy. */
5052 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5053 known_x, known_mode, known_ret);
5054 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5055 known_x, known_mode, known_ret);
5057 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5058 if (result > 0
5059 && (bitwidth > HOST_BITS_PER_WIDE_INT
5060 || (((nonzero_bits (XEXP (x, 0), mode)
5061 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5062 && ((nonzero_bits (XEXP (x, 1), mode)
5063 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5064 != 0))))
5065 result--;
5067 return MAX (1, result);
5069 case UDIV:
5070 /* The result must be <= the first operand. If the first operand
5071 has the high bit set, we know nothing about the number of sign
5072 bit copies. */
5073 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5074 return 1;
5075 else if ((nonzero_bits (XEXP (x, 0), mode)
5076 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5077 return 1;
5078 else
5079 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5080 known_x, known_mode, known_ret);
5082 case UMOD:
5083 /* The result must be <= the second operand. If the second operand
5084 has (or just might have) the high bit set, we know nothing about
5085 the number of sign bit copies. */
5086 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5087 return 1;
5088 else if ((nonzero_bits (XEXP (x, 1), mode)
5089 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5090 return 1;
5091 else
5092 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5093 known_x, known_mode, known_ret);
5095 case DIV:
5096 /* Similar to unsigned division, except that we have to worry about
5097 the case where the divisor is negative, in which case we have
5098 to add 1. */
5099 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5100 known_x, known_mode, known_ret);
5101 if (result > 1
5102 && (bitwidth > HOST_BITS_PER_WIDE_INT
5103 || (nonzero_bits (XEXP (x, 1), mode)
5104 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5105 result--;
5107 return result;
5109 case MOD:
5110 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5111 known_x, known_mode, known_ret);
5112 if (result > 1
5113 && (bitwidth > HOST_BITS_PER_WIDE_INT
5114 || (nonzero_bits (XEXP (x, 1), mode)
5115 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5116 result--;
5118 return result;
5120 case ASHIFTRT:
5121 /* Shifts by a constant add to the number of bits equal to the
5122 sign bit. */
5123 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5124 known_x, known_mode, known_ret);
5125 if (CONST_INT_P (XEXP (x, 1))
5126 && INTVAL (XEXP (x, 1)) > 0
5127 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
5128 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5130 return num0;
5132 case ASHIFT:
5133 /* Left shifts destroy copies. */
5134 if (!CONST_INT_P (XEXP (x, 1))
5135 || INTVAL (XEXP (x, 1)) < 0
5136 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5137 || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
5138 return 1;
5140 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5141 known_x, known_mode, known_ret);
5142 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5144 case IF_THEN_ELSE:
5145 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5146 known_x, known_mode, known_ret);
5147 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5148 known_x, known_mode, known_ret);
5149 return MIN (num0, num1);
5151 case EQ: case NE: case GE: case GT: case LE: case LT:
5152 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5153 case GEU: case GTU: case LEU: case LTU:
5154 case UNORDERED: case ORDERED:
5155 /* If the constant is negative, take its 1's complement and remask.
5156 Then see how many zero bits we have. */
5157 nonzero = STORE_FLAG_VALUE;
5158 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5159 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5160 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5162 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5164 default:
5165 break;
5168 /* If we haven't been able to figure it out by one of the above rules,
5169 see if some of the high-order bits are known to be zero. If so,
5170 count those bits and return one less than that amount. If we can't
5171 safely compute the mask for this mode, always return BITWIDTH. */
5173 bitwidth = GET_MODE_PRECISION (mode);
5174 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5175 return 1;
5177 nonzero = nonzero_bits (x, mode);
5178 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5179 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5182 /* Calculate the rtx_cost of a single instruction. A return value of
5183 zero indicates an instruction pattern without a known cost. */
5186 insn_rtx_cost (rtx pat, bool speed)
5188 int i, cost;
5189 rtx set;
5191 /* Extract the single set rtx from the instruction pattern.
5192 We can't use single_set since we only have the pattern. */
5193 if (GET_CODE (pat) == SET)
5194 set = pat;
5195 else if (GET_CODE (pat) == PARALLEL)
5197 set = NULL_RTX;
5198 for (i = 0; i < XVECLEN (pat, 0); i++)
5200 rtx x = XVECEXP (pat, 0, i);
5201 if (GET_CODE (x) == SET)
5203 if (set)
5204 return 0;
5205 set = x;
5208 if (!set)
5209 return 0;
5211 else
5212 return 0;
5214 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5215 return cost > 0 ? cost : COSTS_N_INSNS (1);
5218 /* Returns estimate on cost of computing SEQ. */
5220 unsigned
5221 seq_cost (const rtx_insn *seq, bool speed)
5223 unsigned cost = 0;
5224 rtx set;
5226 for (; seq; seq = NEXT_INSN (seq))
5228 set = single_set (seq);
5229 if (set)
5230 cost += set_rtx_cost (set, speed);
5231 else
5232 cost++;
5235 return cost;
5238 /* Given an insn INSN and condition COND, return the condition in a
5239 canonical form to simplify testing by callers. Specifically:
5241 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5242 (2) Both operands will be machine operands; (cc0) will have been replaced.
5243 (3) If an operand is a constant, it will be the second operand.
5244 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5245 for GE, GEU, and LEU.
5247 If the condition cannot be understood, or is an inequality floating-point
5248 comparison which needs to be reversed, 0 will be returned.
5250 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5252 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5253 insn used in locating the condition was found. If a replacement test
5254 of the condition is desired, it should be placed in front of that
5255 insn and we will be sure that the inputs are still valid.
5257 If WANT_REG is nonzero, we wish the condition to be relative to that
5258 register, if possible. Therefore, do not canonicalize the condition
5259 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5260 to be a compare to a CC mode register.
5262 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5263 and at INSN. */
5266 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5267 rtx_insn **earliest,
5268 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5270 enum rtx_code code;
5271 rtx_insn *prev = insn;
5272 const_rtx set;
5273 rtx tem;
5274 rtx op0, op1;
5275 int reverse_code = 0;
5276 machine_mode mode;
5277 basic_block bb = BLOCK_FOR_INSN (insn);
5279 code = GET_CODE (cond);
5280 mode = GET_MODE (cond);
5281 op0 = XEXP (cond, 0);
5282 op1 = XEXP (cond, 1);
5284 if (reverse)
5285 code = reversed_comparison_code (cond, insn);
5286 if (code == UNKNOWN)
5287 return 0;
5289 if (earliest)
5290 *earliest = insn;
5292 /* If we are comparing a register with zero, see if the register is set
5293 in the previous insn to a COMPARE or a comparison operation. Perform
5294 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5295 in cse.c */
5297 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5298 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5299 && op1 == CONST0_RTX (GET_MODE (op0))
5300 && op0 != want_reg)
5302 /* Set nonzero when we find something of interest. */
5303 rtx x = 0;
5305 /* If comparison with cc0, import actual comparison from compare
5306 insn. */
5307 if (op0 == cc0_rtx)
5309 if ((prev = prev_nonnote_insn (prev)) == 0
5310 || !NONJUMP_INSN_P (prev)
5311 || (set = single_set (prev)) == 0
5312 || SET_DEST (set) != cc0_rtx)
5313 return 0;
5315 op0 = SET_SRC (set);
5316 op1 = CONST0_RTX (GET_MODE (op0));
5317 if (earliest)
5318 *earliest = prev;
5321 /* If this is a COMPARE, pick up the two things being compared. */
5322 if (GET_CODE (op0) == COMPARE)
5324 op1 = XEXP (op0, 1);
5325 op0 = XEXP (op0, 0);
5326 continue;
5328 else if (!REG_P (op0))
5329 break;
5331 /* Go back to the previous insn. Stop if it is not an INSN. We also
5332 stop if it isn't a single set or if it has a REG_INC note because
5333 we don't want to bother dealing with it. */
5335 prev = prev_nonnote_nondebug_insn (prev);
5337 if (prev == 0
5338 || !NONJUMP_INSN_P (prev)
5339 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5340 /* In cfglayout mode, there do not have to be labels at the
5341 beginning of a block, or jumps at the end, so the previous
5342 conditions would not stop us when we reach bb boundary. */
5343 || BLOCK_FOR_INSN (prev) != bb)
5344 break;
5346 set = set_of (op0, prev);
5348 if (set
5349 && (GET_CODE (set) != SET
5350 || !rtx_equal_p (SET_DEST (set), op0)))
5351 break;
5353 /* If this is setting OP0, get what it sets it to if it looks
5354 relevant. */
5355 if (set)
5357 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5358 #ifdef FLOAT_STORE_FLAG_VALUE
5359 REAL_VALUE_TYPE fsfv;
5360 #endif
5362 /* ??? We may not combine comparisons done in a CCmode with
5363 comparisons not done in a CCmode. This is to aid targets
5364 like Alpha that have an IEEE compliant EQ instruction, and
5365 a non-IEEE compliant BEQ instruction. The use of CCmode is
5366 actually artificial, simply to prevent the combination, but
5367 should not affect other platforms.
5369 However, we must allow VOIDmode comparisons to match either
5370 CCmode or non-CCmode comparison, because some ports have
5371 modeless comparisons inside branch patterns.
5373 ??? This mode check should perhaps look more like the mode check
5374 in simplify_comparison in combine. */
5375 if (((GET_MODE_CLASS (mode) == MODE_CC)
5376 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5377 && mode != VOIDmode
5378 && inner_mode != VOIDmode)
5379 break;
5380 if (GET_CODE (SET_SRC (set)) == COMPARE
5381 || (((code == NE
5382 || (code == LT
5383 && val_signbit_known_set_p (inner_mode,
5384 STORE_FLAG_VALUE))
5385 #ifdef FLOAT_STORE_FLAG_VALUE
5386 || (code == LT
5387 && SCALAR_FLOAT_MODE_P (inner_mode)
5388 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5389 REAL_VALUE_NEGATIVE (fsfv)))
5390 #endif
5392 && COMPARISON_P (SET_SRC (set))))
5393 x = SET_SRC (set);
5394 else if (((code == EQ
5395 || (code == GE
5396 && val_signbit_known_set_p (inner_mode,
5397 STORE_FLAG_VALUE))
5398 #ifdef FLOAT_STORE_FLAG_VALUE
5399 || (code == GE
5400 && SCALAR_FLOAT_MODE_P (inner_mode)
5401 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5402 REAL_VALUE_NEGATIVE (fsfv)))
5403 #endif
5405 && COMPARISON_P (SET_SRC (set)))
5407 reverse_code = 1;
5408 x = SET_SRC (set);
5410 else if ((code == EQ || code == NE)
5411 && GET_CODE (SET_SRC (set)) == XOR)
5412 /* Handle sequences like:
5414 (set op0 (xor X Y))
5415 ...(eq|ne op0 (const_int 0))...
5417 in which case:
5419 (eq op0 (const_int 0)) reduces to (eq X Y)
5420 (ne op0 (const_int 0)) reduces to (ne X Y)
5422 This is the form used by MIPS16, for example. */
5423 x = SET_SRC (set);
5424 else
5425 break;
5428 else if (reg_set_p (op0, prev))
5429 /* If this sets OP0, but not directly, we have to give up. */
5430 break;
5432 if (x)
5434 /* If the caller is expecting the condition to be valid at INSN,
5435 make sure X doesn't change before INSN. */
5436 if (valid_at_insn_p)
5437 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5438 break;
5439 if (COMPARISON_P (x))
5440 code = GET_CODE (x);
5441 if (reverse_code)
5443 code = reversed_comparison_code (x, prev);
5444 if (code == UNKNOWN)
5445 return 0;
5446 reverse_code = 0;
5449 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5450 if (earliest)
5451 *earliest = prev;
5455 /* If constant is first, put it last. */
5456 if (CONSTANT_P (op0))
5457 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5459 /* If OP0 is the result of a comparison, we weren't able to find what
5460 was really being compared, so fail. */
5461 if (!allow_cc_mode
5462 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5463 return 0;
5465 /* Canonicalize any ordered comparison with integers involving equality
5466 if we can do computations in the relevant mode and we do not
5467 overflow. */
5469 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
5470 && CONST_INT_P (op1)
5471 && GET_MODE (op0) != VOIDmode
5472 && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
5474 HOST_WIDE_INT const_val = INTVAL (op1);
5475 unsigned HOST_WIDE_INT uconst_val = const_val;
5476 unsigned HOST_WIDE_INT max_val
5477 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
5479 switch (code)
5481 case LE:
5482 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5483 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
5484 break;
5486 /* When cross-compiling, const_val might be sign-extended from
5487 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5488 case GE:
5489 if ((const_val & max_val)
5490 != (HOST_WIDE_INT_1U
5491 << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
5492 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
5493 break;
5495 case LEU:
5496 if (uconst_val < max_val)
5497 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
5498 break;
5500 case GEU:
5501 if (uconst_val != 0)
5502 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
5503 break;
5505 default:
5506 break;
5510 /* Never return CC0; return zero instead. */
5511 if (CC0_P (op0))
5512 return 0;
5514 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5517 /* Given a jump insn JUMP, return the condition that will cause it to branch
5518 to its JUMP_LABEL. If the condition cannot be understood, or is an
5519 inequality floating-point comparison which needs to be reversed, 0 will
5520 be returned.
5522 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5523 insn used in locating the condition was found. If a replacement test
5524 of the condition is desired, it should be placed in front of that
5525 insn and we will be sure that the inputs are still valid. If EARLIEST
5526 is null, the returned condition will be valid at INSN.
5528 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5529 compare CC mode register.
5531 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5534 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5535 int valid_at_insn_p)
5537 rtx cond;
5538 int reverse;
5539 rtx set;
5541 /* If this is not a standard conditional jump, we can't parse it. */
5542 if (!JUMP_P (jump)
5543 || ! any_condjump_p (jump))
5544 return 0;
5545 set = pc_set (jump);
5547 cond = XEXP (SET_SRC (set), 0);
5549 /* If this branches to JUMP_LABEL when the condition is false, reverse
5550 the condition. */
5551 reverse
5552 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5553 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5555 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5556 allow_cc_mode, valid_at_insn_p);
5559 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5560 TARGET_MODE_REP_EXTENDED.
5562 Note that we assume that the property of
5563 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5564 narrower than mode B. I.e., if A is a mode narrower than B then in
5565 order to be able to operate on it in mode B, mode A needs to
5566 satisfy the requirements set by the representation of mode B. */
5568 static void
5569 init_num_sign_bit_copies_in_rep (void)
5571 machine_mode mode, in_mode;
5573 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
5574 in_mode = GET_MODE_WIDER_MODE (mode))
5575 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
5576 mode = GET_MODE_WIDER_MODE (mode))
5578 machine_mode i;
5580 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5581 extends to the next widest mode. */
5582 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5583 || GET_MODE_WIDER_MODE (mode) == in_mode);
5585 /* We are in in_mode. Count how many bits outside of mode
5586 have to be copies of the sign-bit. */
5587 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
5589 machine_mode wider = GET_MODE_WIDER_MODE (i);
5591 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5592 /* We can only check sign-bit copies starting from the
5593 top-bit. In order to be able to check the bits we
5594 have already seen we pretend that subsequent bits
5595 have to be sign-bit copies too. */
5596 || num_sign_bit_copies_in_rep [in_mode][mode])
5597 num_sign_bit_copies_in_rep [in_mode][mode]
5598 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5603 /* Suppose that truncation from the machine mode of X to MODE is not a
5604 no-op. See if there is anything special about X so that we can
5605 assume it already contains a truncated value of MODE. */
5607 bool
5608 truncated_to_mode (machine_mode mode, const_rtx x)
5610 /* This register has already been used in MODE without explicit
5611 truncation. */
5612 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5613 return true;
5615 /* See if we already satisfy the requirements of MODE. If yes we
5616 can just switch to MODE. */
5617 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5618 && (num_sign_bit_copies (x, GET_MODE (x))
5619 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5620 return true;
5622 return false;
5625 /* Return true if RTX code CODE has a single sequence of zero or more
5626 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5627 entry in that case. */
5629 static bool
5630 setup_reg_subrtx_bounds (unsigned int code)
5632 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5633 unsigned int i = 0;
5634 for (; format[i] != 'e'; ++i)
5636 if (!format[i])
5637 /* No subrtxes. Leave start and count as 0. */
5638 return true;
5639 if (format[i] == 'E' || format[i] == 'V')
5640 return false;
5643 /* Record the sequence of 'e's. */
5644 rtx_all_subrtx_bounds[code].start = i;
5646 ++i;
5647 while (format[i] == 'e');
5648 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5649 /* rtl-iter.h relies on this. */
5650 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5652 for (; format[i]; ++i)
5653 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5654 return false;
5656 return true;
5659 /* Initialize rtx_all_subrtx_bounds. */
5660 void
5661 init_rtlanal (void)
5663 int i;
5664 for (i = 0; i < NUM_RTX_CODE; i++)
5666 if (!setup_reg_subrtx_bounds (i))
5667 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5668 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5669 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5672 init_num_sign_bit_copies_in_rep ();
5675 /* Check whether this is a constant pool constant. */
5676 bool
5677 constant_pool_constant_p (rtx x)
5679 x = avoid_constant_pool_reference (x);
5680 return CONST_DOUBLE_P (x);
5683 /* If M is a bitmask that selects a field of low-order bits within an item but
5684 not the entire word, return the length of the field. Return -1 otherwise.
5685 M is used in machine mode MODE. */
5688 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5690 if (mode != VOIDmode)
5692 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
5693 return -1;
5694 m &= GET_MODE_MASK (mode);
5697 return exact_log2 (m + 1);
5700 /* Return the mode of MEM's address. */
5702 machine_mode
5703 get_address_mode (rtx mem)
5705 machine_mode mode;
5707 gcc_assert (MEM_P (mem));
5708 mode = GET_MODE (XEXP (mem, 0));
5709 if (mode != VOIDmode)
5710 return mode;
5711 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5714 /* Split up a CONST_DOUBLE or integer constant rtx
5715 into two rtx's for single words,
5716 storing in *FIRST the word that comes first in memory in the target
5717 and in *SECOND the other.
5719 TODO: This function needs to be rewritten to work on any size
5720 integer. */
5722 void
5723 split_double (rtx value, rtx *first, rtx *second)
5725 if (CONST_INT_P (value))
5727 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5729 /* In this case the CONST_INT holds both target words.
5730 Extract the bits from it into two word-sized pieces.
5731 Sign extend each half to HOST_WIDE_INT. */
5732 unsigned HOST_WIDE_INT low, high;
5733 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5734 unsigned bits_per_word = BITS_PER_WORD;
5736 /* Set sign_bit to the most significant bit of a word. */
5737 sign_bit = 1;
5738 sign_bit <<= bits_per_word - 1;
5740 /* Set mask so that all bits of the word are set. We could
5741 have used 1 << BITS_PER_WORD instead of basing the
5742 calculation on sign_bit. However, on machines where
5743 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5744 compiler warning, even though the code would never be
5745 executed. */
5746 mask = sign_bit << 1;
5747 mask--;
5749 /* Set sign_extend as any remaining bits. */
5750 sign_extend = ~mask;
5752 /* Pick the lower word and sign-extend it. */
5753 low = INTVAL (value);
5754 low &= mask;
5755 if (low & sign_bit)
5756 low |= sign_extend;
5758 /* Pick the higher word, shifted to the least significant
5759 bits, and sign-extend it. */
5760 high = INTVAL (value);
5761 high >>= bits_per_word - 1;
5762 high >>= 1;
5763 high &= mask;
5764 if (high & sign_bit)
5765 high |= sign_extend;
5767 /* Store the words in the target machine order. */
5768 if (WORDS_BIG_ENDIAN)
5770 *first = GEN_INT (high);
5771 *second = GEN_INT (low);
5773 else
5775 *first = GEN_INT (low);
5776 *second = GEN_INT (high);
5779 else
5781 /* The rule for using CONST_INT for a wider mode
5782 is that we regard the value as signed.
5783 So sign-extend it. */
5784 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
5785 if (WORDS_BIG_ENDIAN)
5787 *first = high;
5788 *second = value;
5790 else
5792 *first = value;
5793 *second = high;
5797 else if (GET_CODE (value) == CONST_WIDE_INT)
5799 /* All of this is scary code and needs to be converted to
5800 properly work with any size integer. */
5801 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
5802 if (WORDS_BIG_ENDIAN)
5804 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5805 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5807 else
5809 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
5810 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
5813 else if (!CONST_DOUBLE_P (value))
5815 if (WORDS_BIG_ENDIAN)
5817 *first = const0_rtx;
5818 *second = value;
5820 else
5822 *first = value;
5823 *second = const0_rtx;
5826 else if (GET_MODE (value) == VOIDmode
5827 /* This is the old way we did CONST_DOUBLE integers. */
5828 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
5830 /* In an integer, the words are defined as most and least significant.
5831 So order them by the target's convention. */
5832 if (WORDS_BIG_ENDIAN)
5834 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
5835 *second = GEN_INT (CONST_DOUBLE_LOW (value));
5837 else
5839 *first = GEN_INT (CONST_DOUBLE_LOW (value));
5840 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
5843 else
5845 long l[2];
5847 /* Note, this converts the REAL_VALUE_TYPE to the target's
5848 format, splits up the floating point double and outputs
5849 exactly 32 bits of it into each of l[0] and l[1] --
5850 not necessarily BITS_PER_WORD bits. */
5851 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
5853 /* If 32 bits is an entire word for the target, but not for the host,
5854 then sign-extend on the host so that the number will look the same
5855 way on the host that it would on the target. See for instance
5856 simplify_unary_operation. The #if is needed to avoid compiler
5857 warnings. */
5859 #if HOST_BITS_PER_LONG > 32
5860 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
5862 if (l[0] & ((long) 1 << 31))
5863 l[0] |= ((unsigned long) (-1) << 32);
5864 if (l[1] & ((long) 1 << 31))
5865 l[1] |= ((unsigned long) (-1) << 32);
5867 #endif
5869 *first = GEN_INT (l[0]);
5870 *second = GEN_INT (l[1]);
5874 /* Return true if X is a sign_extract or zero_extract from the least
5875 significant bit. */
5877 static bool
5878 lsb_bitfield_op_p (rtx x)
5880 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
5882 machine_mode mode = GET_MODE (XEXP (x, 0));
5883 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
5884 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
5886 return (pos == (BITS_BIG_ENDIAN ? GET_MODE_PRECISION (mode) - len : 0));
5888 return false;
5891 /* Strip outer address "mutations" from LOC and return a pointer to the
5892 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5893 stripped expression there.
5895 "Mutations" either convert between modes or apply some kind of
5896 extension, truncation or alignment. */
5898 rtx *
5899 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
5901 for (;;)
5903 enum rtx_code code = GET_CODE (*loc);
5904 if (GET_RTX_CLASS (code) == RTX_UNARY)
5905 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5906 used to convert between pointer sizes. */
5907 loc = &XEXP (*loc, 0);
5908 else if (lsb_bitfield_op_p (*loc))
5909 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5910 acts as a combined truncation and extension. */
5911 loc = &XEXP (*loc, 0);
5912 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
5913 /* (and ... (const_int -X)) is used to align to X bytes. */
5914 loc = &XEXP (*loc, 0);
5915 else if (code == SUBREG
5916 && !OBJECT_P (SUBREG_REG (*loc))
5917 && subreg_lowpart_p (*loc))
5918 /* (subreg (operator ...) ...) inside and is used for mode
5919 conversion too. */
5920 loc = &SUBREG_REG (*loc);
5921 else
5922 return loc;
5923 if (outer_code)
5924 *outer_code = code;
5928 /* Return true if CODE applies some kind of scale. The scaled value is
5929 is the first operand and the scale is the second. */
5931 static bool
5932 binary_scale_code_p (enum rtx_code code)
5934 return (code == MULT
5935 || code == ASHIFT
5936 /* Needed by ARM targets. */
5937 || code == ASHIFTRT
5938 || code == LSHIFTRT
5939 || code == ROTATE
5940 || code == ROTATERT);
5943 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5944 (see address_info). Return null otherwise. */
5946 static rtx *
5947 get_base_term (rtx *inner)
5949 if (GET_CODE (*inner) == LO_SUM)
5950 inner = strip_address_mutations (&XEXP (*inner, 0));
5951 if (REG_P (*inner)
5952 || MEM_P (*inner)
5953 || GET_CODE (*inner) == SUBREG
5954 || GET_CODE (*inner) == SCRATCH)
5955 return inner;
5956 return 0;
5959 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5960 (see address_info). Return null otherwise. */
5962 static rtx *
5963 get_index_term (rtx *inner)
5965 /* At present, only constant scales are allowed. */
5966 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
5967 inner = strip_address_mutations (&XEXP (*inner, 0));
5968 if (REG_P (*inner)
5969 || MEM_P (*inner)
5970 || GET_CODE (*inner) == SUBREG
5971 || GET_CODE (*inner) == SCRATCH)
5972 return inner;
5973 return 0;
5976 /* Set the segment part of address INFO to LOC, given that INNER is the
5977 unmutated value. */
5979 static void
5980 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
5982 gcc_assert (!info->segment);
5983 info->segment = loc;
5984 info->segment_term = inner;
5987 /* Set the base part of address INFO to LOC, given that INNER is the
5988 unmutated value. */
5990 static void
5991 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
5993 gcc_assert (!info->base);
5994 info->base = loc;
5995 info->base_term = inner;
5998 /* Set the index part of address INFO to LOC, given that INNER is the
5999 unmutated value. */
6001 static void
6002 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6004 gcc_assert (!info->index);
6005 info->index = loc;
6006 info->index_term = inner;
6009 /* Set the displacement part of address INFO to LOC, given that INNER
6010 is the constant term. */
6012 static void
6013 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6015 gcc_assert (!info->disp);
6016 info->disp = loc;
6017 info->disp_term = inner;
6020 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6021 rest of INFO accordingly. */
6023 static void
6024 decompose_incdec_address (struct address_info *info)
6026 info->autoinc_p = true;
6028 rtx *base = &XEXP (*info->inner, 0);
6029 set_address_base (info, base, base);
6030 gcc_checking_assert (info->base == info->base_term);
6032 /* These addresses are only valid when the size of the addressed
6033 value is known. */
6034 gcc_checking_assert (info->mode != VOIDmode);
6037 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6038 of INFO accordingly. */
6040 static void
6041 decompose_automod_address (struct address_info *info)
6043 info->autoinc_p = true;
6045 rtx *base = &XEXP (*info->inner, 0);
6046 set_address_base (info, base, base);
6047 gcc_checking_assert (info->base == info->base_term);
6049 rtx plus = XEXP (*info->inner, 1);
6050 gcc_assert (GET_CODE (plus) == PLUS);
6052 info->base_term2 = &XEXP (plus, 0);
6053 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6055 rtx *step = &XEXP (plus, 1);
6056 rtx *inner_step = strip_address_mutations (step);
6057 if (CONSTANT_P (*inner_step))
6058 set_address_disp (info, step, inner_step);
6059 else
6060 set_address_index (info, step, inner_step);
6063 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6064 values in [PTR, END). Return a pointer to the end of the used array. */
6066 static rtx **
6067 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6069 rtx x = *loc;
6070 if (GET_CODE (x) == PLUS)
6072 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6073 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6075 else
6077 gcc_assert (ptr != end);
6078 *ptr++ = loc;
6080 return ptr;
6083 /* Evaluate the likelihood of X being a base or index value, returning
6084 positive if it is likely to be a base, negative if it is likely to be
6085 an index, and 0 if we can't tell. Make the magnitude of the return
6086 value reflect the amount of confidence we have in the answer.
6088 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6090 static int
6091 baseness (rtx x, machine_mode mode, addr_space_t as,
6092 enum rtx_code outer_code, enum rtx_code index_code)
6094 /* Believe *_POINTER unless the address shape requires otherwise. */
6095 if (REG_P (x) && REG_POINTER (x))
6096 return 2;
6097 if (MEM_P (x) && MEM_POINTER (x))
6098 return 2;
6100 if (REG_P (x) && HARD_REGISTER_P (x))
6102 /* X is a hard register. If it only fits one of the base
6103 or index classes, choose that interpretation. */
6104 int regno = REGNO (x);
6105 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6106 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6107 if (base_p != index_p)
6108 return base_p ? 1 : -1;
6110 return 0;
6113 /* INFO->INNER describes a normal, non-automodified address.
6114 Fill in the rest of INFO accordingly. */
6116 static void
6117 decompose_normal_address (struct address_info *info)
6119 /* Treat the address as the sum of up to four values. */
6120 rtx *ops[4];
6121 size_t n_ops = extract_plus_operands (info->inner, ops,
6122 ops + ARRAY_SIZE (ops)) - ops;
6124 /* If there is more than one component, any base component is in a PLUS. */
6125 if (n_ops > 1)
6126 info->base_outer_code = PLUS;
6128 /* Try to classify each sum operand now. Leave those that could be
6129 either a base or an index in OPS. */
6130 rtx *inner_ops[4];
6131 size_t out = 0;
6132 for (size_t in = 0; in < n_ops; ++in)
6134 rtx *loc = ops[in];
6135 rtx *inner = strip_address_mutations (loc);
6136 if (CONSTANT_P (*inner))
6137 set_address_disp (info, loc, inner);
6138 else if (GET_CODE (*inner) == UNSPEC)
6139 set_address_segment (info, loc, inner);
6140 else
6142 /* The only other possibilities are a base or an index. */
6143 rtx *base_term = get_base_term (inner);
6144 rtx *index_term = get_index_term (inner);
6145 gcc_assert (base_term || index_term);
6146 if (!base_term)
6147 set_address_index (info, loc, index_term);
6148 else if (!index_term)
6149 set_address_base (info, loc, base_term);
6150 else
6152 gcc_assert (base_term == index_term);
6153 ops[out] = loc;
6154 inner_ops[out] = base_term;
6155 ++out;
6160 /* Classify the remaining OPS members as bases and indexes. */
6161 if (out == 1)
6163 /* If we haven't seen a base or an index yet, assume that this is
6164 the base. If we were confident that another term was the base
6165 or index, treat the remaining operand as the other kind. */
6166 if (!info->base)
6167 set_address_base (info, ops[0], inner_ops[0]);
6168 else
6169 set_address_index (info, ops[0], inner_ops[0]);
6171 else if (out == 2)
6173 /* In the event of a tie, assume the base comes first. */
6174 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6175 GET_CODE (*ops[1]))
6176 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6177 GET_CODE (*ops[0])))
6179 set_address_base (info, ops[0], inner_ops[0]);
6180 set_address_index (info, ops[1], inner_ops[1]);
6182 else
6184 set_address_base (info, ops[1], inner_ops[1]);
6185 set_address_index (info, ops[0], inner_ops[0]);
6188 else
6189 gcc_assert (out == 0);
6192 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6193 or VOIDmode if not known. AS is the address space associated with LOC.
6194 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6196 void
6197 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6198 addr_space_t as, enum rtx_code outer_code)
6200 memset (info, 0, sizeof (*info));
6201 info->mode = mode;
6202 info->as = as;
6203 info->addr_outer_code = outer_code;
6204 info->outer = loc;
6205 info->inner = strip_address_mutations (loc, &outer_code);
6206 info->base_outer_code = outer_code;
6207 switch (GET_CODE (*info->inner))
6209 case PRE_DEC:
6210 case PRE_INC:
6211 case POST_DEC:
6212 case POST_INC:
6213 decompose_incdec_address (info);
6214 break;
6216 case PRE_MODIFY:
6217 case POST_MODIFY:
6218 decompose_automod_address (info);
6219 break;
6221 default:
6222 decompose_normal_address (info);
6223 break;
6227 /* Describe address operand LOC in INFO. */
6229 void
6230 decompose_lea_address (struct address_info *info, rtx *loc)
6232 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6235 /* Describe the address of MEM X in INFO. */
6237 void
6238 decompose_mem_address (struct address_info *info, rtx x)
6240 gcc_assert (MEM_P (x));
6241 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6242 MEM_ADDR_SPACE (x), MEM);
6245 /* Update INFO after a change to the address it describes. */
6247 void
6248 update_address (struct address_info *info)
6250 decompose_address (info, info->outer, info->mode, info->as,
6251 info->addr_outer_code);
6254 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6255 more complicated than that. */
6257 HOST_WIDE_INT
6258 get_index_scale (const struct address_info *info)
6260 rtx index = *info->index;
6261 if (GET_CODE (index) == MULT
6262 && CONST_INT_P (XEXP (index, 1))
6263 && info->index_term == &XEXP (index, 0))
6264 return INTVAL (XEXP (index, 1));
6266 if (GET_CODE (index) == ASHIFT
6267 && CONST_INT_P (XEXP (index, 1))
6268 && info->index_term == &XEXP (index, 0))
6269 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6271 if (info->index == info->index_term)
6272 return 1;
6274 return 0;
6277 /* Return the "index code" of INFO, in the form required by
6278 ok_for_base_p_1. */
6280 enum rtx_code
6281 get_index_code (const struct address_info *info)
6283 if (info->index)
6284 return GET_CODE (*info->index);
6286 if (info->disp)
6287 return GET_CODE (*info->disp);
6289 return SCRATCH;
6292 /* Return true if RTL X contains a SYMBOL_REF. */
6294 bool
6295 contains_symbol_ref_p (const_rtx x)
6297 subrtx_iterator::array_type array;
6298 FOR_EACH_SUBRTX (iter, array, x, ALL)
6299 if (SYMBOL_REF_P (*iter))
6300 return true;
6302 return false;
6305 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6307 bool
6308 contains_symbolic_reference_p (const_rtx x)
6310 subrtx_iterator::array_type array;
6311 FOR_EACH_SUBRTX (iter, array, x, ALL)
6312 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6313 return true;
6315 return false;
6318 /* Return true if X contains a thread-local symbol. */
6320 bool
6321 tls_referenced_p (const_rtx x)
6323 if (!targetm.have_tls)
6324 return false;
6326 subrtx_iterator::array_type array;
6327 FOR_EACH_SUBRTX (iter, array, x, ALL)
6328 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6329 return true;
6330 return false;