gcc/
[official-gcc.git] / gcc / alias.c
blob215ad63f6960ca5688121843049f67a85bf93a84
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "tree.h"
36 #include "fold-const.h"
37 #include "varasm.h"
38 #include "hashtab.h"
39 #include "hard-reg-set.h"
40 #include "function.h"
41 #include "flags.h"
42 #include "statistics.h"
43 #include "real.h"
44 #include "fixed-value.h"
45 #include "insn-config.h"
46 #include "expmed.h"
47 #include "dojump.h"
48 #include "explow.h"
49 #include "calls.h"
50 #include "emit-rtl.h"
51 #include "stmt.h"
52 #include "expr.h"
53 #include "tm_p.h"
54 #include "regs.h"
55 #include "diagnostic-core.h"
56 #include "cselib.h"
57 #include "hash-map.h"
58 #include "langhooks.h"
59 #include "timevar.h"
60 #include "dumpfile.h"
61 #include "target.h"
62 #include "dominance.h"
63 #include "cfg.h"
64 #include "cfganal.h"
65 #include "predict.h"
66 #include "basic-block.h"
67 #include "df.h"
68 #include "tree-ssa-alias.h"
69 #include "internal-fn.h"
70 #include "gimple-expr.h"
71 #include "is-a.h"
72 #include "gimple.h"
73 #include "gimple-ssa.h"
74 #include "rtl-iter.h"
76 /* The aliasing API provided here solves related but different problems:
78 Say there exists (in c)
80 struct X {
81 struct Y y1;
82 struct Z z2;
83 } x1, *px1, *px2;
85 struct Y y2, *py;
86 struct Z z2, *pz;
89 py = &x1.y1;
90 px2 = &x1;
92 Consider the four questions:
94 Can a store to x1 interfere with px2->y1?
95 Can a store to x1 interfere with px2->z2?
96 Can a store to x1 change the value pointed to by with py?
97 Can a store to x1 change the value pointed to by with pz?
99 The answer to these questions can be yes, yes, yes, and maybe.
101 The first two questions can be answered with a simple examination
102 of the type system. If structure X contains a field of type Y then
103 a store through a pointer to an X can overwrite any field that is
104 contained (recursively) in an X (unless we know that px1 != px2).
106 The last two questions can be solved in the same way as the first
107 two questions but this is too conservative. The observation is
108 that in some cases we can know which (if any) fields are addressed
109 and if those addresses are used in bad ways. This analysis may be
110 language specific. In C, arbitrary operations may be applied to
111 pointers. However, there is some indication that this may be too
112 conservative for some C++ types.
114 The pass ipa-type-escape does this analysis for the types whose
115 instances do not escape across the compilation boundary.
117 Historically in GCC, these two problems were combined and a single
118 data structure that was used to represent the solution to these
119 problems. We now have two similar but different data structures,
120 The data structure to solve the last two questions is similar to
121 the first, but does not contain the fields whose address are never
122 taken. For types that do escape the compilation unit, the data
123 structures will have identical information.
126 /* The alias sets assigned to MEMs assist the back-end in determining
127 which MEMs can alias which other MEMs. In general, two MEMs in
128 different alias sets cannot alias each other, with one important
129 exception. Consider something like:
131 struct S { int i; double d; };
133 a store to an `S' can alias something of either type `int' or type
134 `double'. (However, a store to an `int' cannot alias a `double'
135 and vice versa.) We indicate this via a tree structure that looks
136 like:
137 struct S
140 |/_ _\|
141 int double
143 (The arrows are directed and point downwards.)
144 In this situation we say the alias set for `struct S' is the
145 `superset' and that those for `int' and `double' are `subsets'.
147 To see whether two alias sets can point to the same memory, we must
148 see if either alias set is a subset of the other. We need not trace
149 past immediate descendants, however, since we propagate all
150 grandchildren up one level.
152 Alias set zero is implicitly a superset of all other alias sets.
153 However, this is no actual entry for alias set zero. It is an
154 error to attempt to explicitly construct a subset of zero. */
156 struct alias_set_traits : default_hashmap_traits
158 template<typename T>
159 static bool
160 is_empty (T &e)
162 return e.m_key == INT_MIN;
165 template<typename T>
166 static bool
167 is_deleted (T &e)
169 return e.m_key == (INT_MIN + 1);
172 template<typename T> static void mark_empty (T &e) { e.m_key = INT_MIN; }
174 template<typename T>
175 static void
176 mark_deleted (T &e)
178 e.m_key = INT_MIN + 1;
182 struct GTY(()) alias_set_entry_d {
183 /* The alias set number, as stored in MEM_ALIAS_SET. */
184 alias_set_type alias_set;
186 /* Nonzero if would have a child of zero: this effectively makes this
187 alias set the same as alias set zero. */
188 int has_zero_child;
190 /* The children of the alias set. These are not just the immediate
191 children, but, in fact, all descendants. So, if we have:
193 struct T { struct S s; float f; }
195 continuing our example above, the children here will be all of
196 `int', `double', `float', and `struct S'. */
197 hash_map<int, int, alias_set_traits> *children;
199 typedef struct alias_set_entry_d *alias_set_entry;
201 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
202 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
203 static void record_set (rtx, const_rtx, void *);
204 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
205 machine_mode);
206 static rtx find_base_value (rtx);
207 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
208 static alias_set_entry get_alias_set_entry (alias_set_type);
209 static tree decl_for_component_ref (tree);
210 static int write_dependence_p (const_rtx,
211 const_rtx, machine_mode, rtx,
212 bool, bool, bool);
214 static void memory_modified_1 (rtx, const_rtx, void *);
216 /* Set up all info needed to perform alias analysis on memory references. */
218 /* Returns the size in bytes of the mode of X. */
219 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
221 /* Cap the number of passes we make over the insns propagating alias
222 information through set chains.
223 ??? 10 is a completely arbitrary choice. This should be based on the
224 maximum loop depth in the CFG, but we do not have this information
225 available (even if current_loops _is_ available). */
226 #define MAX_ALIAS_LOOP_PASSES 10
228 /* reg_base_value[N] gives an address to which register N is related.
229 If all sets after the first add or subtract to the current value
230 or otherwise modify it so it does not point to a different top level
231 object, reg_base_value[N] is equal to the address part of the source
232 of the first set.
234 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
235 expressions represent three types of base:
237 1. incoming arguments. There is just one ADDRESS to represent all
238 arguments, since we do not know at this level whether accesses
239 based on different arguments can alias. The ADDRESS has id 0.
241 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
242 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
243 Each of these rtxes has a separate ADDRESS associated with it,
244 each with a negative id.
246 GCC is (and is required to be) precise in which register it
247 chooses to access a particular region of stack. We can therefore
248 assume that accesses based on one of these rtxes do not alias
249 accesses based on another of these rtxes.
251 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
252 Each such piece of memory has a separate ADDRESS associated
253 with it, each with an id greater than 0.
255 Accesses based on one ADDRESS do not alias accesses based on other
256 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
257 alias globals either; the ADDRESSes have Pmode to indicate this.
258 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
259 indicate this. */
261 static GTY(()) vec<rtx, va_gc> *reg_base_value;
262 static rtx *new_reg_base_value;
264 /* The single VOIDmode ADDRESS that represents all argument bases.
265 It has id 0. */
266 static GTY(()) rtx arg_base_value;
268 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
269 static int unique_id;
271 /* We preserve the copy of old array around to avoid amount of garbage
272 produced. About 8% of garbage produced were attributed to this
273 array. */
274 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
276 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
277 registers. */
278 #define UNIQUE_BASE_VALUE_SP -1
279 #define UNIQUE_BASE_VALUE_ARGP -2
280 #define UNIQUE_BASE_VALUE_FP -3
281 #define UNIQUE_BASE_VALUE_HFP -4
283 #define static_reg_base_value \
284 (this_target_rtl->x_static_reg_base_value)
286 #define REG_BASE_VALUE(X) \
287 (REGNO (X) < vec_safe_length (reg_base_value) \
288 ? (*reg_base_value)[REGNO (X)] : 0)
290 /* Vector indexed by N giving the initial (unchanging) value known for
291 pseudo-register N. This vector is initialized in init_alias_analysis,
292 and does not change until end_alias_analysis is called. */
293 static GTY(()) vec<rtx, va_gc> *reg_known_value;
295 /* Vector recording for each reg_known_value whether it is due to a
296 REG_EQUIV note. Future passes (viz., reload) may replace the
297 pseudo with the equivalent expression and so we account for the
298 dependences that would be introduced if that happens.
300 The REG_EQUIV notes created in assign_parms may mention the arg
301 pointer, and there are explicit insns in the RTL that modify the
302 arg pointer. Thus we must ensure that such insns don't get
303 scheduled across each other because that would invalidate the
304 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
305 wrong, but solving the problem in the scheduler will likely give
306 better code, so we do it here. */
307 static sbitmap reg_known_equiv_p;
309 /* True when scanning insns from the start of the rtl to the
310 NOTE_INSN_FUNCTION_BEG note. */
311 static bool copying_arguments;
314 /* The splay-tree used to store the various alias set entries. */
315 static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
317 /* Build a decomposed reference object for querying the alias-oracle
318 from the MEM rtx and store it in *REF.
319 Returns false if MEM is not suitable for the alias-oracle. */
321 static bool
322 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
324 tree expr = MEM_EXPR (mem);
325 tree base;
327 if (!expr)
328 return false;
330 ao_ref_init (ref, expr);
332 /* Get the base of the reference and see if we have to reject or
333 adjust it. */
334 base = ao_ref_base (ref);
335 if (base == NULL_TREE)
336 return false;
338 /* The tree oracle doesn't like bases that are neither decls
339 nor indirect references of SSA names. */
340 if (!(DECL_P (base)
341 || (TREE_CODE (base) == MEM_REF
342 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
343 || (TREE_CODE (base) == TARGET_MEM_REF
344 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
345 return false;
347 /* If this is a reference based on a partitioned decl replace the
348 base with a MEM_REF of the pointer representative we
349 created during stack slot partitioning. */
350 if (TREE_CODE (base) == VAR_DECL
351 && ! is_global_var (base)
352 && cfun->gimple_df->decls_to_pointers != NULL)
354 tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
355 if (namep)
356 ref->base = build_simple_mem_ref (*namep);
359 ref->ref_alias_set = MEM_ALIAS_SET (mem);
361 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
362 is conservative, so trust it. */
363 if (!MEM_OFFSET_KNOWN_P (mem)
364 || !MEM_SIZE_KNOWN_P (mem))
365 return true;
367 /* If the base decl is a parameter we can have negative MEM_OFFSET in
368 case of promoted subregs on bigendian targets. Trust the MEM_EXPR
369 here. */
370 if (MEM_OFFSET (mem) < 0
371 && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
372 return true;
374 /* Otherwise continue and refine size and offset we got from analyzing
375 MEM_EXPR by using MEM_SIZE and MEM_OFFSET. */
377 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
378 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
380 /* The MEM may extend into adjacent fields, so adjust max_size if
381 necessary. */
382 if (ref->max_size != -1
383 && ref->size > ref->max_size)
384 ref->max_size = ref->size;
386 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
387 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
388 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
389 && (ref->offset < 0
390 || (DECL_P (ref->base)
391 && (DECL_SIZE (ref->base) == NULL_TREE
392 || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
393 || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
394 ref->offset + ref->size)))))
395 return false;
397 return true;
400 /* Query the alias-oracle on whether the two memory rtx X and MEM may
401 alias. If TBAA_P is set also apply TBAA. Returns true if the
402 two rtxen may alias, false otherwise. */
404 static bool
405 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
407 ao_ref ref1, ref2;
409 if (!ao_ref_from_mem (&ref1, x)
410 || !ao_ref_from_mem (&ref2, mem))
411 return true;
413 return refs_may_alias_p_1 (&ref1, &ref2,
414 tbaa_p
415 && MEM_ALIAS_SET (x) != 0
416 && MEM_ALIAS_SET (mem) != 0);
419 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
420 such an entry, or NULL otherwise. */
422 static inline alias_set_entry
423 get_alias_set_entry (alias_set_type alias_set)
425 return (*alias_sets)[alias_set];
428 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
429 the two MEMs cannot alias each other. */
431 static inline int
432 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
434 return (flag_strict_aliasing
435 && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
436 MEM_ALIAS_SET (mem2)));
439 /* Return true if the first alias set is a subset of the second. */
441 bool
442 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
444 alias_set_entry ase;
446 /* Everything is a subset of the "aliases everything" set. */
447 if (set2 == 0)
448 return true;
450 /* Otherwise, check if set1 is a subset of set2. */
451 ase = get_alias_set_entry (set2);
452 if (ase != 0
453 && (ase->has_zero_child
454 || ase->children->get (set1)))
455 return true;
456 return false;
459 /* Return 1 if the two specified alias sets may conflict. */
462 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
464 alias_set_entry ase;
466 /* The easy case. */
467 if (alias_sets_must_conflict_p (set1, set2))
468 return 1;
470 /* See if the first alias set is a subset of the second. */
471 ase = get_alias_set_entry (set1);
472 if (ase != 0
473 && ase->children->get (set2))
474 return 1;
476 /* Now do the same, but with the alias sets reversed. */
477 ase = get_alias_set_entry (set2);
478 if (ase != 0
479 && ase->children->get (set1))
480 return 1;
482 /* The two alias sets are distinct and neither one is the
483 child of the other. Therefore, they cannot conflict. */
484 return 0;
487 /* Return 1 if the two specified alias sets will always conflict. */
490 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
492 if (set1 == 0 || set2 == 0 || set1 == set2)
493 return 1;
495 return 0;
498 /* Return 1 if any MEM object of type T1 will always conflict (using the
499 dependency routines in this file) with any MEM object of type T2.
500 This is used when allocating temporary storage. If T1 and/or T2 are
501 NULL_TREE, it means we know nothing about the storage. */
504 objects_must_conflict_p (tree t1, tree t2)
506 alias_set_type set1, set2;
508 /* If neither has a type specified, we don't know if they'll conflict
509 because we may be using them to store objects of various types, for
510 example the argument and local variables areas of inlined functions. */
511 if (t1 == 0 && t2 == 0)
512 return 0;
514 /* If they are the same type, they must conflict. */
515 if (t1 == t2
516 /* Likewise if both are volatile. */
517 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
518 return 1;
520 set1 = t1 ? get_alias_set (t1) : 0;
521 set2 = t2 ? get_alias_set (t2) : 0;
523 /* We can't use alias_sets_conflict_p because we must make sure
524 that every subtype of t1 will conflict with every subtype of
525 t2 for which a pair of subobjects of these respective subtypes
526 overlaps on the stack. */
527 return alias_sets_must_conflict_p (set1, set2);
530 /* Return the outermost parent of component present in the chain of
531 component references handled by get_inner_reference in T with the
532 following property:
533 - the component is non-addressable, or
534 - the parent has alias set zero,
535 or NULL_TREE if no such parent exists. In the former cases, the alias
536 set of this parent is the alias set that must be used for T itself. */
538 tree
539 component_uses_parent_alias_set_from (const_tree t)
541 const_tree found = NULL_TREE;
543 while (handled_component_p (t))
545 switch (TREE_CODE (t))
547 case COMPONENT_REF:
548 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
549 found = t;
550 break;
552 case ARRAY_REF:
553 case ARRAY_RANGE_REF:
554 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
555 found = t;
556 break;
558 case REALPART_EXPR:
559 case IMAGPART_EXPR:
560 break;
562 case BIT_FIELD_REF:
563 case VIEW_CONVERT_EXPR:
564 /* Bitfields and casts are never addressable. */
565 found = t;
566 break;
568 default:
569 gcc_unreachable ();
572 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
573 found = t;
575 t = TREE_OPERAND (t, 0);
578 if (found)
579 return TREE_OPERAND (found, 0);
581 return NULL_TREE;
585 /* Return whether the pointer-type T effective for aliasing may
586 access everything and thus the reference has to be assigned
587 alias-set zero. */
589 static bool
590 ref_all_alias_ptr_type_p (const_tree t)
592 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
593 || TYPE_REF_CAN_ALIAS_ALL (t));
596 /* Return the alias set for the memory pointed to by T, which may be
597 either a type or an expression. Return -1 if there is nothing
598 special about dereferencing T. */
600 static alias_set_type
601 get_deref_alias_set_1 (tree t)
603 /* All we care about is the type. */
604 if (! TYPE_P (t))
605 t = TREE_TYPE (t);
607 /* If we have an INDIRECT_REF via a void pointer, we don't
608 know anything about what that might alias. Likewise if the
609 pointer is marked that way. */
610 if (ref_all_alias_ptr_type_p (t))
611 return 0;
613 return -1;
616 /* Return the alias set for the memory pointed to by T, which may be
617 either a type or an expression. */
619 alias_set_type
620 get_deref_alias_set (tree t)
622 /* If we're not doing any alias analysis, just assume everything
623 aliases everything else. */
624 if (!flag_strict_aliasing)
625 return 0;
627 alias_set_type set = get_deref_alias_set_1 (t);
629 /* Fall back to the alias-set of the pointed-to type. */
630 if (set == -1)
632 if (! TYPE_P (t))
633 t = TREE_TYPE (t);
634 set = get_alias_set (TREE_TYPE (t));
637 return set;
640 /* Return the pointer-type relevant for TBAA purposes from the
641 memory reference tree *T or NULL_TREE in which case *T is
642 adjusted to point to the outermost component reference that
643 can be used for assigning an alias set. */
645 static tree
646 reference_alias_ptr_type_1 (tree *t)
648 tree inner;
650 /* Get the base object of the reference. */
651 inner = *t;
652 while (handled_component_p (inner))
654 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
655 the type of any component references that wrap it to
656 determine the alias-set. */
657 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
658 *t = TREE_OPERAND (inner, 0);
659 inner = TREE_OPERAND (inner, 0);
662 /* Handle pointer dereferences here, they can override the
663 alias-set. */
664 if (INDIRECT_REF_P (inner)
665 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
666 return TREE_TYPE (TREE_OPERAND (inner, 0));
667 else if (TREE_CODE (inner) == TARGET_MEM_REF)
668 return TREE_TYPE (TMR_OFFSET (inner));
669 else if (TREE_CODE (inner) == MEM_REF
670 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
671 return TREE_TYPE (TREE_OPERAND (inner, 1));
673 /* If the innermost reference is a MEM_REF that has a
674 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
675 using the memory access type for determining the alias-set. */
676 if (TREE_CODE (inner) == MEM_REF
677 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
678 != TYPE_MAIN_VARIANT
679 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
680 return TREE_TYPE (TREE_OPERAND (inner, 1));
682 /* Otherwise, pick up the outermost object that we could have
683 a pointer to. */
684 tree tem = component_uses_parent_alias_set_from (*t);
685 if (tem)
686 *t = tem;
688 return NULL_TREE;
691 /* Return the pointer-type relevant for TBAA purposes from the
692 gimple memory reference tree T. This is the type to be used for
693 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
694 and guarantees that get_alias_set will return the same alias
695 set for T and the replacement. */
697 tree
698 reference_alias_ptr_type (tree t)
700 tree ptype = reference_alias_ptr_type_1 (&t);
701 /* If there is a given pointer type for aliasing purposes, return it. */
702 if (ptype != NULL_TREE)
703 return ptype;
705 /* Otherwise build one from the outermost component reference we
706 may use. */
707 if (TREE_CODE (t) == MEM_REF
708 || TREE_CODE (t) == TARGET_MEM_REF)
709 return TREE_TYPE (TREE_OPERAND (t, 1));
710 else
711 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
714 /* Return whether the pointer-types T1 and T2 used to determine
715 two alias sets of two references will yield the same answer
716 from get_deref_alias_set. */
718 bool
719 alias_ptr_types_compatible_p (tree t1, tree t2)
721 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
722 return true;
724 if (ref_all_alias_ptr_type_p (t1)
725 || ref_all_alias_ptr_type_p (t2))
726 return false;
728 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
729 == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
732 /* Return the alias set for T, which may be either a type or an
733 expression. Call language-specific routine for help, if needed. */
735 alias_set_type
736 get_alias_set (tree t)
738 alias_set_type set;
740 /* If we're not doing any alias analysis, just assume everything
741 aliases everything else. Also return 0 if this or its type is
742 an error. */
743 if (! flag_strict_aliasing || t == error_mark_node
744 || (! TYPE_P (t)
745 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
746 return 0;
748 /* We can be passed either an expression or a type. This and the
749 language-specific routine may make mutually-recursive calls to each other
750 to figure out what to do. At each juncture, we see if this is a tree
751 that the language may need to handle specially. First handle things that
752 aren't types. */
753 if (! TYPE_P (t))
755 /* Give the language a chance to do something with this tree
756 before we look at it. */
757 STRIP_NOPS (t);
758 set = lang_hooks.get_alias_set (t);
759 if (set != -1)
760 return set;
762 /* Get the alias pointer-type to use or the outermost object
763 that we could have a pointer to. */
764 tree ptype = reference_alias_ptr_type_1 (&t);
765 if (ptype != NULL)
766 return get_deref_alias_set (ptype);
768 /* If we've already determined the alias set for a decl, just return
769 it. This is necessary for C++ anonymous unions, whose component
770 variables don't look like union members (boo!). */
771 if (TREE_CODE (t) == VAR_DECL
772 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
773 return MEM_ALIAS_SET (DECL_RTL (t));
775 /* Now all we care about is the type. */
776 t = TREE_TYPE (t);
779 /* Variant qualifiers don't affect the alias set, so get the main
780 variant. */
781 t = TYPE_MAIN_VARIANT (t);
783 /* Always use the canonical type as well. If this is a type that
784 requires structural comparisons to identify compatible types
785 use alias set zero. */
786 if (TYPE_STRUCTURAL_EQUALITY_P (t))
788 /* Allow the language to specify another alias set for this
789 type. */
790 set = lang_hooks.get_alias_set (t);
791 if (set != -1)
792 return set;
793 return 0;
796 t = TYPE_CANONICAL (t);
798 /* The canonical type should not require structural equality checks. */
799 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
801 /* If this is a type with a known alias set, return it. */
802 if (TYPE_ALIAS_SET_KNOWN_P (t))
803 return TYPE_ALIAS_SET (t);
805 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
806 if (!COMPLETE_TYPE_P (t))
808 /* For arrays with unknown size the conservative answer is the
809 alias set of the element type. */
810 if (TREE_CODE (t) == ARRAY_TYPE)
811 return get_alias_set (TREE_TYPE (t));
813 /* But return zero as a conservative answer for incomplete types. */
814 return 0;
817 /* See if the language has special handling for this type. */
818 set = lang_hooks.get_alias_set (t);
819 if (set != -1)
820 return set;
822 /* There are no objects of FUNCTION_TYPE, so there's no point in
823 using up an alias set for them. (There are, of course, pointers
824 and references to functions, but that's different.) */
825 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
826 set = 0;
828 /* Unless the language specifies otherwise, let vector types alias
829 their components. This avoids some nasty type punning issues in
830 normal usage. And indeed lets vectors be treated more like an
831 array slice. */
832 else if (TREE_CODE (t) == VECTOR_TYPE)
833 set = get_alias_set (TREE_TYPE (t));
835 /* Unless the language specifies otherwise, treat array types the
836 same as their components. This avoids the asymmetry we get
837 through recording the components. Consider accessing a
838 character(kind=1) through a reference to a character(kind=1)[1:1].
839 Or consider if we want to assign integer(kind=4)[0:D.1387] and
840 integer(kind=4)[4] the same alias set or not.
841 Just be pragmatic here and make sure the array and its element
842 type get the same alias set assigned. */
843 else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
844 set = get_alias_set (TREE_TYPE (t));
846 /* From the former common C and C++ langhook implementation:
848 Unfortunately, there is no canonical form of a pointer type.
849 In particular, if we have `typedef int I', then `int *', and
850 `I *' are different types. So, we have to pick a canonical
851 representative. We do this below.
853 Technically, this approach is actually more conservative that
854 it needs to be. In particular, `const int *' and `int *'
855 should be in different alias sets, according to the C and C++
856 standard, since their types are not the same, and so,
857 technically, an `int **' and `const int **' cannot point at
858 the same thing.
860 But, the standard is wrong. In particular, this code is
861 legal C++:
863 int *ip;
864 int **ipp = &ip;
865 const int* const* cipp = ipp;
866 And, it doesn't make sense for that to be legal unless you
867 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
868 the pointed-to types. This issue has been reported to the
869 C++ committee.
871 In addition to the above canonicalization issue, with LTO
872 we should also canonicalize `T (*)[]' to `T *' avoiding
873 alias issues with pointer-to element types and pointer-to
874 array types.
876 Likewise we need to deal with the situation of incomplete
877 pointed-to types and make `*(struct X **)&a' and
878 `*(struct X {} **)&a' alias. Otherwise we will have to
879 guarantee that all pointer-to incomplete type variants
880 will be replaced by pointer-to complete type variants if
881 they are available.
883 With LTO the convenient situation of using `void *' to
884 access and store any pointer type will also become
885 more apparent (and `void *' is just another pointer-to
886 incomplete type). Assigning alias-set zero to `void *'
887 and all pointer-to incomplete types is a not appealing
888 solution. Assigning an effective alias-set zero only
889 affecting pointers might be - by recording proper subset
890 relationships of all pointer alias-sets.
892 Pointer-to function types are another grey area which
893 needs caution. Globbing them all into one alias-set
894 or the above effective zero set would work.
896 For now just assign the same alias-set to all pointers.
897 That's simple and avoids all the above problems. */
898 else if (POINTER_TYPE_P (t)
899 && t != ptr_type_node)
900 set = get_alias_set (ptr_type_node);
902 /* Otherwise make a new alias set for this type. */
903 else
905 /* Each canonical type gets its own alias set, so canonical types
906 shouldn't form a tree. It doesn't really matter for types
907 we handle specially above, so only check it where it possibly
908 would result in a bogus alias set. */
909 gcc_checking_assert (TYPE_CANONICAL (t) == t);
911 set = new_alias_set ();
914 TYPE_ALIAS_SET (t) = set;
916 /* If this is an aggregate type or a complex type, we must record any
917 component aliasing information. */
918 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
919 record_component_aliases (t);
921 return set;
924 /* Return a brand-new alias set. */
926 alias_set_type
927 new_alias_set (void)
929 if (flag_strict_aliasing)
931 if (alias_sets == 0)
932 vec_safe_push (alias_sets, (alias_set_entry) 0);
933 vec_safe_push (alias_sets, (alias_set_entry) 0);
934 return alias_sets->length () - 1;
936 else
937 return 0;
940 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
941 not everything that aliases SUPERSET also aliases SUBSET. For example,
942 in C, a store to an `int' can alias a load of a structure containing an
943 `int', and vice versa. But it can't alias a load of a 'double' member
944 of the same structure. Here, the structure would be the SUPERSET and
945 `int' the SUBSET. This relationship is also described in the comment at
946 the beginning of this file.
948 This function should be called only once per SUPERSET/SUBSET pair.
950 It is illegal for SUPERSET to be zero; everything is implicitly a
951 subset of alias set zero. */
953 void
954 record_alias_subset (alias_set_type superset, alias_set_type subset)
956 alias_set_entry superset_entry;
957 alias_set_entry subset_entry;
959 /* It is possible in complex type situations for both sets to be the same,
960 in which case we can ignore this operation. */
961 if (superset == subset)
962 return;
964 gcc_assert (superset);
966 superset_entry = get_alias_set_entry (superset);
967 if (superset_entry == 0)
969 /* Create an entry for the SUPERSET, so that we have a place to
970 attach the SUBSET. */
971 superset_entry = ggc_cleared_alloc<alias_set_entry_d> ();
972 superset_entry->alias_set = superset;
973 superset_entry->children
974 = hash_map<int, int, alias_set_traits>::create_ggc (64);
975 superset_entry->has_zero_child = 0;
976 (*alias_sets)[superset] = superset_entry;
979 if (subset == 0)
980 superset_entry->has_zero_child = 1;
981 else
983 subset_entry = get_alias_set_entry (subset);
984 /* If there is an entry for the subset, enter all of its children
985 (if they are not already present) as children of the SUPERSET. */
986 if (subset_entry)
988 if (subset_entry->has_zero_child)
989 superset_entry->has_zero_child = 1;
991 hash_map<int, int, alias_set_traits>::iterator iter
992 = subset_entry->children->begin ();
993 for (; iter != subset_entry->children->end (); ++iter)
994 superset_entry->children->put ((*iter).first, (*iter).second);
997 /* Enter the SUBSET itself as a child of the SUPERSET. */
998 superset_entry->children->put (subset, 0);
1002 /* Record that component types of TYPE, if any, are part of that type for
1003 aliasing purposes. For record types, we only record component types
1004 for fields that are not marked non-addressable. For array types, we
1005 only record the component type if it is not marked non-aliased. */
1007 void
1008 record_component_aliases (tree type)
1010 alias_set_type superset = get_alias_set (type);
1011 tree field;
1013 if (superset == 0)
1014 return;
1016 switch (TREE_CODE (type))
1018 case RECORD_TYPE:
1019 case UNION_TYPE:
1020 case QUAL_UNION_TYPE:
1021 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1022 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1023 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
1024 break;
1026 case COMPLEX_TYPE:
1027 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1028 break;
1030 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1031 element type. */
1033 default:
1034 break;
1038 /* Allocate an alias set for use in storing and reading from the varargs
1039 spill area. */
1041 static GTY(()) alias_set_type varargs_set = -1;
1043 alias_set_type
1044 get_varargs_alias_set (void)
1046 #if 1
1047 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1048 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1049 consistently use the varargs alias set for loads from the varargs
1050 area. So don't use it anywhere. */
1051 return 0;
1052 #else
1053 if (varargs_set == -1)
1054 varargs_set = new_alias_set ();
1056 return varargs_set;
1057 #endif
1060 /* Likewise, but used for the fixed portions of the frame, e.g., register
1061 save areas. */
1063 static GTY(()) alias_set_type frame_set = -1;
1065 alias_set_type
1066 get_frame_alias_set (void)
1068 if (frame_set == -1)
1069 frame_set = new_alias_set ();
1071 return frame_set;
1074 /* Create a new, unique base with id ID. */
1076 static rtx
1077 unique_base_value (HOST_WIDE_INT id)
1079 return gen_rtx_ADDRESS (Pmode, id);
1082 /* Return true if accesses based on any other base value cannot alias
1083 those based on X. */
1085 static bool
1086 unique_base_value_p (rtx x)
1088 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1091 /* Return true if X is known to be a base value. */
1093 static bool
1094 known_base_value_p (rtx x)
1096 switch (GET_CODE (x))
1098 case LABEL_REF:
1099 case SYMBOL_REF:
1100 return true;
1102 case ADDRESS:
1103 /* Arguments may or may not be bases; we don't know for sure. */
1104 return GET_MODE (x) != VOIDmode;
1106 default:
1107 return false;
1111 /* Inside SRC, the source of a SET, find a base address. */
1113 static rtx
1114 find_base_value (rtx src)
1116 unsigned int regno;
1118 #if defined (FIND_BASE_TERM)
1119 /* Try machine-dependent ways to find the base term. */
1120 src = FIND_BASE_TERM (src);
1121 #endif
1123 switch (GET_CODE (src))
1125 case SYMBOL_REF:
1126 case LABEL_REF:
1127 return src;
1129 case REG:
1130 regno = REGNO (src);
1131 /* At the start of a function, argument registers have known base
1132 values which may be lost later. Returning an ADDRESS
1133 expression here allows optimization based on argument values
1134 even when the argument registers are used for other purposes. */
1135 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1136 return new_reg_base_value[regno];
1138 /* If a pseudo has a known base value, return it. Do not do this
1139 for non-fixed hard regs since it can result in a circular
1140 dependency chain for registers which have values at function entry.
1142 The test above is not sufficient because the scheduler may move
1143 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1144 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1145 && regno < vec_safe_length (reg_base_value))
1147 /* If we're inside init_alias_analysis, use new_reg_base_value
1148 to reduce the number of relaxation iterations. */
1149 if (new_reg_base_value && new_reg_base_value[regno]
1150 && DF_REG_DEF_COUNT (regno) == 1)
1151 return new_reg_base_value[regno];
1153 if ((*reg_base_value)[regno])
1154 return (*reg_base_value)[regno];
1157 return 0;
1159 case MEM:
1160 /* Check for an argument passed in memory. Only record in the
1161 copying-arguments block; it is too hard to track changes
1162 otherwise. */
1163 if (copying_arguments
1164 && (XEXP (src, 0) == arg_pointer_rtx
1165 || (GET_CODE (XEXP (src, 0)) == PLUS
1166 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1167 return arg_base_value;
1168 return 0;
1170 case CONST:
1171 src = XEXP (src, 0);
1172 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1173 break;
1175 /* ... fall through ... */
1177 case PLUS:
1178 case MINUS:
1180 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1182 /* If either operand is a REG that is a known pointer, then it
1183 is the base. */
1184 if (REG_P (src_0) && REG_POINTER (src_0))
1185 return find_base_value (src_0);
1186 if (REG_P (src_1) && REG_POINTER (src_1))
1187 return find_base_value (src_1);
1189 /* If either operand is a REG, then see if we already have
1190 a known value for it. */
1191 if (REG_P (src_0))
1193 temp = find_base_value (src_0);
1194 if (temp != 0)
1195 src_0 = temp;
1198 if (REG_P (src_1))
1200 temp = find_base_value (src_1);
1201 if (temp!= 0)
1202 src_1 = temp;
1205 /* If either base is named object or a special address
1206 (like an argument or stack reference), then use it for the
1207 base term. */
1208 if (src_0 != 0 && known_base_value_p (src_0))
1209 return src_0;
1211 if (src_1 != 0 && known_base_value_p (src_1))
1212 return src_1;
1214 /* Guess which operand is the base address:
1215 If either operand is a symbol, then it is the base. If
1216 either operand is a CONST_INT, then the other is the base. */
1217 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1218 return find_base_value (src_0);
1219 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1220 return find_base_value (src_1);
1222 return 0;
1225 case LO_SUM:
1226 /* The standard form is (lo_sum reg sym) so look only at the
1227 second operand. */
1228 return find_base_value (XEXP (src, 1));
1230 case AND:
1231 /* If the second operand is constant set the base
1232 address to the first operand. */
1233 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1234 return find_base_value (XEXP (src, 0));
1235 return 0;
1237 case TRUNCATE:
1238 /* As we do not know which address space the pointer is referring to, we can
1239 handle this only if the target does not support different pointer or
1240 address modes depending on the address space. */
1241 if (!target_default_pointer_address_modes_p ())
1242 break;
1243 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1244 break;
1245 /* Fall through. */
1246 case HIGH:
1247 case PRE_INC:
1248 case PRE_DEC:
1249 case POST_INC:
1250 case POST_DEC:
1251 case PRE_MODIFY:
1252 case POST_MODIFY:
1253 return find_base_value (XEXP (src, 0));
1255 case ZERO_EXTEND:
1256 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1257 /* As we do not know which address space the pointer is referring to, we can
1258 handle this only if the target does not support different pointer or
1259 address modes depending on the address space. */
1260 if (!target_default_pointer_address_modes_p ())
1261 break;
1264 rtx temp = find_base_value (XEXP (src, 0));
1266 if (temp != 0 && CONSTANT_P (temp))
1267 temp = convert_memory_address (Pmode, temp);
1269 return temp;
1272 default:
1273 break;
1276 return 0;
1279 /* Called from init_alias_analysis indirectly through note_stores,
1280 or directly if DEST is a register with a REG_NOALIAS note attached.
1281 SET is null in the latter case. */
1283 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1284 register N has been set in this function. */
1285 static sbitmap reg_seen;
1287 static void
1288 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1290 unsigned regno;
1291 rtx src;
1292 int n;
1294 if (!REG_P (dest))
1295 return;
1297 regno = REGNO (dest);
1299 gcc_checking_assert (regno < reg_base_value->length ());
1301 n = REG_NREGS (dest);
1302 if (n != 1)
1304 while (--n >= 0)
1306 bitmap_set_bit (reg_seen, regno + n);
1307 new_reg_base_value[regno + n] = 0;
1309 return;
1312 if (set)
1314 /* A CLOBBER wipes out any old value but does not prevent a previously
1315 unset register from acquiring a base address (i.e. reg_seen is not
1316 set). */
1317 if (GET_CODE (set) == CLOBBER)
1319 new_reg_base_value[regno] = 0;
1320 return;
1322 src = SET_SRC (set);
1324 else
1326 /* There's a REG_NOALIAS note against DEST. */
1327 if (bitmap_bit_p (reg_seen, regno))
1329 new_reg_base_value[regno] = 0;
1330 return;
1332 bitmap_set_bit (reg_seen, regno);
1333 new_reg_base_value[regno] = unique_base_value (unique_id++);
1334 return;
1337 /* If this is not the first set of REGNO, see whether the new value
1338 is related to the old one. There are two cases of interest:
1340 (1) The register might be assigned an entirely new value
1341 that has the same base term as the original set.
1343 (2) The set might be a simple self-modification that
1344 cannot change REGNO's base value.
1346 If neither case holds, reject the original base value as invalid.
1347 Note that the following situation is not detected:
1349 extern int x, y; int *p = &x; p += (&y-&x);
1351 ANSI C does not allow computing the difference of addresses
1352 of distinct top level objects. */
1353 if (new_reg_base_value[regno] != 0
1354 && find_base_value (src) != new_reg_base_value[regno])
1355 switch (GET_CODE (src))
1357 case LO_SUM:
1358 case MINUS:
1359 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1360 new_reg_base_value[regno] = 0;
1361 break;
1362 case PLUS:
1363 /* If the value we add in the PLUS is also a valid base value,
1364 this might be the actual base value, and the original value
1365 an index. */
1367 rtx other = NULL_RTX;
1369 if (XEXP (src, 0) == dest)
1370 other = XEXP (src, 1);
1371 else if (XEXP (src, 1) == dest)
1372 other = XEXP (src, 0);
1374 if (! other || find_base_value (other))
1375 new_reg_base_value[regno] = 0;
1376 break;
1378 case AND:
1379 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1380 new_reg_base_value[regno] = 0;
1381 break;
1382 default:
1383 new_reg_base_value[regno] = 0;
1384 break;
1386 /* If this is the first set of a register, record the value. */
1387 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1388 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1389 new_reg_base_value[regno] = find_base_value (src);
1391 bitmap_set_bit (reg_seen, regno);
1394 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1395 using hard registers with non-null REG_BASE_VALUE for renaming. */
1397 get_reg_base_value (unsigned int regno)
1399 return (*reg_base_value)[regno];
1402 /* If a value is known for REGNO, return it. */
1405 get_reg_known_value (unsigned int regno)
1407 if (regno >= FIRST_PSEUDO_REGISTER)
1409 regno -= FIRST_PSEUDO_REGISTER;
1410 if (regno < vec_safe_length (reg_known_value))
1411 return (*reg_known_value)[regno];
1413 return NULL;
1416 /* Set it. */
1418 static void
1419 set_reg_known_value (unsigned int regno, rtx val)
1421 if (regno >= FIRST_PSEUDO_REGISTER)
1423 regno -= FIRST_PSEUDO_REGISTER;
1424 if (regno < vec_safe_length (reg_known_value))
1425 (*reg_known_value)[regno] = val;
1429 /* Similarly for reg_known_equiv_p. */
1431 bool
1432 get_reg_known_equiv_p (unsigned int regno)
1434 if (regno >= FIRST_PSEUDO_REGISTER)
1436 regno -= FIRST_PSEUDO_REGISTER;
1437 if (regno < vec_safe_length (reg_known_value))
1438 return bitmap_bit_p (reg_known_equiv_p, regno);
1440 return false;
1443 static void
1444 set_reg_known_equiv_p (unsigned int regno, bool val)
1446 if (regno >= FIRST_PSEUDO_REGISTER)
1448 regno -= FIRST_PSEUDO_REGISTER;
1449 if (regno < vec_safe_length (reg_known_value))
1451 if (val)
1452 bitmap_set_bit (reg_known_equiv_p, regno);
1453 else
1454 bitmap_clear_bit (reg_known_equiv_p, regno);
1460 /* Returns a canonical version of X, from the point of view alias
1461 analysis. (For example, if X is a MEM whose address is a register,
1462 and the register has a known value (say a SYMBOL_REF), then a MEM
1463 whose address is the SYMBOL_REF is returned.) */
1466 canon_rtx (rtx x)
1468 /* Recursively look for equivalences. */
1469 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1471 rtx t = get_reg_known_value (REGNO (x));
1472 if (t == x)
1473 return x;
1474 if (t)
1475 return canon_rtx (t);
1478 if (GET_CODE (x) == PLUS)
1480 rtx x0 = canon_rtx (XEXP (x, 0));
1481 rtx x1 = canon_rtx (XEXP (x, 1));
1483 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1485 if (CONST_INT_P (x0))
1486 return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1487 else if (CONST_INT_P (x1))
1488 return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1489 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1493 /* This gives us much better alias analysis when called from
1494 the loop optimizer. Note we want to leave the original
1495 MEM alone, but need to return the canonicalized MEM with
1496 all the flags with their original values. */
1497 else if (MEM_P (x))
1498 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1500 return x;
1503 /* Return 1 if X and Y are identical-looking rtx's.
1504 Expect that X and Y has been already canonicalized.
1506 We use the data in reg_known_value above to see if two registers with
1507 different numbers are, in fact, equivalent. */
1509 static int
1510 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1512 int i;
1513 int j;
1514 enum rtx_code code;
1515 const char *fmt;
1517 if (x == 0 && y == 0)
1518 return 1;
1519 if (x == 0 || y == 0)
1520 return 0;
1522 if (x == y)
1523 return 1;
1525 code = GET_CODE (x);
1526 /* Rtx's of different codes cannot be equal. */
1527 if (code != GET_CODE (y))
1528 return 0;
1530 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1531 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1533 if (GET_MODE (x) != GET_MODE (y))
1534 return 0;
1536 /* Some RTL can be compared without a recursive examination. */
1537 switch (code)
1539 case REG:
1540 return REGNO (x) == REGNO (y);
1542 case LABEL_REF:
1543 return LABEL_REF_LABEL (x) == LABEL_REF_LABEL (y);
1545 case SYMBOL_REF:
1546 return XSTR (x, 0) == XSTR (y, 0);
1548 case ENTRY_VALUE:
1549 /* This is magic, don't go through canonicalization et al. */
1550 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1552 case VALUE:
1553 CASE_CONST_UNIQUE:
1554 /* Pointer equality guarantees equality for these nodes. */
1555 return 0;
1557 default:
1558 break;
1561 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1562 if (code == PLUS)
1563 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1564 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1565 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1566 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1567 /* For commutative operations, the RTX match if the operand match in any
1568 order. Also handle the simple binary and unary cases without a loop. */
1569 if (COMMUTATIVE_P (x))
1571 rtx xop0 = canon_rtx (XEXP (x, 0));
1572 rtx yop0 = canon_rtx (XEXP (y, 0));
1573 rtx yop1 = canon_rtx (XEXP (y, 1));
1575 return ((rtx_equal_for_memref_p (xop0, yop0)
1576 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1577 || (rtx_equal_for_memref_p (xop0, yop1)
1578 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1580 else if (NON_COMMUTATIVE_P (x))
1582 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1583 canon_rtx (XEXP (y, 0)))
1584 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1585 canon_rtx (XEXP (y, 1))));
1587 else if (UNARY_P (x))
1588 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1589 canon_rtx (XEXP (y, 0)));
1591 /* Compare the elements. If any pair of corresponding elements
1592 fail to match, return 0 for the whole things.
1594 Limit cases to types which actually appear in addresses. */
1596 fmt = GET_RTX_FORMAT (code);
1597 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1599 switch (fmt[i])
1601 case 'i':
1602 if (XINT (x, i) != XINT (y, i))
1603 return 0;
1604 break;
1606 case 'E':
1607 /* Two vectors must have the same length. */
1608 if (XVECLEN (x, i) != XVECLEN (y, i))
1609 return 0;
1611 /* And the corresponding elements must match. */
1612 for (j = 0; j < XVECLEN (x, i); j++)
1613 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1614 canon_rtx (XVECEXP (y, i, j))) == 0)
1615 return 0;
1616 break;
1618 case 'e':
1619 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1620 canon_rtx (XEXP (y, i))) == 0)
1621 return 0;
1622 break;
1624 /* This can happen for asm operands. */
1625 case 's':
1626 if (strcmp (XSTR (x, i), XSTR (y, i)))
1627 return 0;
1628 break;
1630 /* This can happen for an asm which clobbers memory. */
1631 case '0':
1632 break;
1634 /* It is believed that rtx's at this level will never
1635 contain anything but integers and other rtx's,
1636 except for within LABEL_REFs and SYMBOL_REFs. */
1637 default:
1638 gcc_unreachable ();
1641 return 1;
1644 static rtx
1645 find_base_term (rtx x)
1647 cselib_val *val;
1648 struct elt_loc_list *l, *f;
1649 rtx ret;
1651 #if defined (FIND_BASE_TERM)
1652 /* Try machine-dependent ways to find the base term. */
1653 x = FIND_BASE_TERM (x);
1654 #endif
1656 switch (GET_CODE (x))
1658 case REG:
1659 return REG_BASE_VALUE (x);
1661 case TRUNCATE:
1662 /* As we do not know which address space the pointer is referring to, we can
1663 handle this only if the target does not support different pointer or
1664 address modes depending on the address space. */
1665 if (!target_default_pointer_address_modes_p ())
1666 return 0;
1667 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1668 return 0;
1669 /* Fall through. */
1670 case HIGH:
1671 case PRE_INC:
1672 case PRE_DEC:
1673 case POST_INC:
1674 case POST_DEC:
1675 case PRE_MODIFY:
1676 case POST_MODIFY:
1677 return find_base_term (XEXP (x, 0));
1679 case ZERO_EXTEND:
1680 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1681 /* As we do not know which address space the pointer is referring to, we can
1682 handle this only if the target does not support different pointer or
1683 address modes depending on the address space. */
1684 if (!target_default_pointer_address_modes_p ())
1685 return 0;
1688 rtx temp = find_base_term (XEXP (x, 0));
1690 if (temp != 0 && CONSTANT_P (temp))
1691 temp = convert_memory_address (Pmode, temp);
1693 return temp;
1696 case VALUE:
1697 val = CSELIB_VAL_PTR (x);
1698 ret = NULL_RTX;
1700 if (!val)
1701 return ret;
1703 if (cselib_sp_based_value_p (val))
1704 return static_reg_base_value[STACK_POINTER_REGNUM];
1706 f = val->locs;
1707 /* Temporarily reset val->locs to avoid infinite recursion. */
1708 val->locs = NULL;
1710 for (l = f; l; l = l->next)
1711 if (GET_CODE (l->loc) == VALUE
1712 && CSELIB_VAL_PTR (l->loc)->locs
1713 && !CSELIB_VAL_PTR (l->loc)->locs->next
1714 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1715 continue;
1716 else if ((ret = find_base_term (l->loc)) != 0)
1717 break;
1719 val->locs = f;
1720 return ret;
1722 case LO_SUM:
1723 /* The standard form is (lo_sum reg sym) so look only at the
1724 second operand. */
1725 return find_base_term (XEXP (x, 1));
1727 case CONST:
1728 x = XEXP (x, 0);
1729 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1730 return 0;
1731 /* Fall through. */
1732 case PLUS:
1733 case MINUS:
1735 rtx tmp1 = XEXP (x, 0);
1736 rtx tmp2 = XEXP (x, 1);
1738 /* This is a little bit tricky since we have to determine which of
1739 the two operands represents the real base address. Otherwise this
1740 routine may return the index register instead of the base register.
1742 That may cause us to believe no aliasing was possible, when in
1743 fact aliasing is possible.
1745 We use a few simple tests to guess the base register. Additional
1746 tests can certainly be added. For example, if one of the operands
1747 is a shift or multiply, then it must be the index register and the
1748 other operand is the base register. */
1750 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1751 return find_base_term (tmp2);
1753 /* If either operand is known to be a pointer, then prefer it
1754 to determine the base term. */
1755 if (REG_P (tmp1) && REG_POINTER (tmp1))
1757 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1758 std::swap (tmp1, tmp2);
1759 /* If second argument is constant which has base term, prefer it
1760 over variable tmp1. See PR64025. */
1761 else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
1762 std::swap (tmp1, tmp2);
1764 /* Go ahead and find the base term for both operands. If either base
1765 term is from a pointer or is a named object or a special address
1766 (like an argument or stack reference), then use it for the
1767 base term. */
1768 rtx base = find_base_term (tmp1);
1769 if (base != NULL_RTX
1770 && ((REG_P (tmp1) && REG_POINTER (tmp1))
1771 || known_base_value_p (base)))
1772 return base;
1773 base = find_base_term (tmp2);
1774 if (base != NULL_RTX
1775 && ((REG_P (tmp2) && REG_POINTER (tmp2))
1776 || known_base_value_p (base)))
1777 return base;
1779 /* We could not determine which of the two operands was the
1780 base register and which was the index. So we can determine
1781 nothing from the base alias check. */
1782 return 0;
1785 case AND:
1786 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1787 return find_base_term (XEXP (x, 0));
1788 return 0;
1790 case SYMBOL_REF:
1791 case LABEL_REF:
1792 return x;
1794 default:
1795 return 0;
1799 /* Return true if accesses to address X may alias accesses based
1800 on the stack pointer. */
1802 bool
1803 may_be_sp_based_p (rtx x)
1805 rtx base = find_base_term (x);
1806 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1809 /* Return 0 if the addresses X and Y are known to point to different
1810 objects, 1 if they might be pointers to the same object. */
1812 static int
1813 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1814 machine_mode x_mode, machine_mode y_mode)
1816 /* If the address itself has no known base see if a known equivalent
1817 value has one. If either address still has no known base, nothing
1818 is known about aliasing. */
1819 if (x_base == 0)
1821 rtx x_c;
1823 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1824 return 1;
1826 x_base = find_base_term (x_c);
1827 if (x_base == 0)
1828 return 1;
1831 if (y_base == 0)
1833 rtx y_c;
1834 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1835 return 1;
1837 y_base = find_base_term (y_c);
1838 if (y_base == 0)
1839 return 1;
1842 /* If the base addresses are equal nothing is known about aliasing. */
1843 if (rtx_equal_p (x_base, y_base))
1844 return 1;
1846 /* The base addresses are different expressions. If they are not accessed
1847 via AND, there is no conflict. We can bring knowledge of object
1848 alignment into play here. For example, on alpha, "char a, b;" can
1849 alias one another, though "char a; long b;" cannot. AND addesses may
1850 implicitly alias surrounding objects; i.e. unaligned access in DImode
1851 via AND address can alias all surrounding object types except those
1852 with aligment 8 or higher. */
1853 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1854 return 1;
1855 if (GET_CODE (x) == AND
1856 && (!CONST_INT_P (XEXP (x, 1))
1857 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1858 return 1;
1859 if (GET_CODE (y) == AND
1860 && (!CONST_INT_P (XEXP (y, 1))
1861 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1862 return 1;
1864 /* Differing symbols not accessed via AND never alias. */
1865 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1866 return 0;
1868 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1869 return 0;
1871 return 1;
1874 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1875 that of V. */
1877 static bool
1878 refs_newer_value_p (const_rtx expr, rtx v)
1880 int minuid = CSELIB_VAL_PTR (v)->uid;
1881 subrtx_iterator::array_type array;
1882 FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
1883 if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid > minuid)
1884 return true;
1885 return false;
1888 /* Convert the address X into something we can use. This is done by returning
1889 it unchanged unless it is a value; in the latter case we call cselib to get
1890 a more useful rtx. */
1893 get_addr (rtx x)
1895 cselib_val *v;
1896 struct elt_loc_list *l;
1898 if (GET_CODE (x) != VALUE)
1899 return x;
1900 v = CSELIB_VAL_PTR (x);
1901 if (v)
1903 bool have_equivs = cselib_have_permanent_equivalences ();
1904 if (have_equivs)
1905 v = canonical_cselib_val (v);
1906 for (l = v->locs; l; l = l->next)
1907 if (CONSTANT_P (l->loc))
1908 return l->loc;
1909 for (l = v->locs; l; l = l->next)
1910 if (!REG_P (l->loc) && !MEM_P (l->loc)
1911 /* Avoid infinite recursion when potentially dealing with
1912 var-tracking artificial equivalences, by skipping the
1913 equivalences themselves, and not choosing expressions
1914 that refer to newer VALUEs. */
1915 && (!have_equivs
1916 || (GET_CODE (l->loc) != VALUE
1917 && !refs_newer_value_p (l->loc, x))))
1918 return l->loc;
1919 if (have_equivs)
1921 for (l = v->locs; l; l = l->next)
1922 if (REG_P (l->loc)
1923 || (GET_CODE (l->loc) != VALUE
1924 && !refs_newer_value_p (l->loc, x)))
1925 return l->loc;
1926 /* Return the canonical value. */
1927 return v->val_rtx;
1929 if (v->locs)
1930 return v->locs->loc;
1932 return x;
1935 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1936 where SIZE is the size in bytes of the memory reference. If ADDR
1937 is not modified by the memory reference then ADDR is returned. */
1939 static rtx
1940 addr_side_effect_eval (rtx addr, int size, int n_refs)
1942 int offset = 0;
1944 switch (GET_CODE (addr))
1946 case PRE_INC:
1947 offset = (n_refs + 1) * size;
1948 break;
1949 case PRE_DEC:
1950 offset = -(n_refs + 1) * size;
1951 break;
1952 case POST_INC:
1953 offset = n_refs * size;
1954 break;
1955 case POST_DEC:
1956 offset = -n_refs * size;
1957 break;
1959 default:
1960 return addr;
1963 if (offset)
1964 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1965 gen_int_mode (offset, GET_MODE (addr)));
1966 else
1967 addr = XEXP (addr, 0);
1968 addr = canon_rtx (addr);
1970 return addr;
1973 /* Return TRUE if an object X sized at XSIZE bytes and another object
1974 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
1975 any of the sizes is zero, assume an overlap, otherwise use the
1976 absolute value of the sizes as the actual sizes. */
1978 static inline bool
1979 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
1981 return (xsize == 0 || ysize == 0
1982 || (c >= 0
1983 ? (abs (xsize) > c)
1984 : (abs (ysize) > -c)));
1987 /* Return one if X and Y (memory addresses) reference the
1988 same location in memory or if the references overlap.
1989 Return zero if they do not overlap, else return
1990 minus one in which case they still might reference the same location.
1992 C is an offset accumulator. When
1993 C is nonzero, we are testing aliases between X and Y + C.
1994 XSIZE is the size in bytes of the X reference,
1995 similarly YSIZE is the size in bytes for Y.
1996 Expect that canon_rtx has been already called for X and Y.
1998 If XSIZE or YSIZE is zero, we do not know the amount of memory being
1999 referenced (the reference was BLKmode), so make the most pessimistic
2000 assumptions.
2002 If XSIZE or YSIZE is negative, we may access memory outside the object
2003 being referenced as a side effect. This can happen when using AND to
2004 align memory references, as is done on the Alpha.
2006 Nice to notice that varying addresses cannot conflict with fp if no
2007 local variables had their addresses taken, but that's too hard now.
2009 ??? Contrary to the tree alias oracle this does not return
2010 one for X + non-constant and Y + non-constant when X and Y are equal.
2011 If that is fixed the TBAA hack for union type-punning can be removed. */
2013 static int
2014 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2016 if (GET_CODE (x) == VALUE)
2018 if (REG_P (y))
2020 struct elt_loc_list *l = NULL;
2021 if (CSELIB_VAL_PTR (x))
2022 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2023 l; l = l->next)
2024 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2025 break;
2026 if (l)
2027 x = y;
2028 else
2029 x = get_addr (x);
2031 /* Don't call get_addr if y is the same VALUE. */
2032 else if (x != y)
2033 x = get_addr (x);
2035 if (GET_CODE (y) == VALUE)
2037 if (REG_P (x))
2039 struct elt_loc_list *l = NULL;
2040 if (CSELIB_VAL_PTR (y))
2041 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2042 l; l = l->next)
2043 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2044 break;
2045 if (l)
2046 y = x;
2047 else
2048 y = get_addr (y);
2050 /* Don't call get_addr if x is the same VALUE. */
2051 else if (y != x)
2052 y = get_addr (y);
2054 if (GET_CODE (x) == HIGH)
2055 x = XEXP (x, 0);
2056 else if (GET_CODE (x) == LO_SUM)
2057 x = XEXP (x, 1);
2058 else
2059 x = addr_side_effect_eval (x, abs (xsize), 0);
2060 if (GET_CODE (y) == HIGH)
2061 y = XEXP (y, 0);
2062 else if (GET_CODE (y) == LO_SUM)
2063 y = XEXP (y, 1);
2064 else
2065 y = addr_side_effect_eval (y, abs (ysize), 0);
2067 if (rtx_equal_for_memref_p (x, y))
2069 return offset_overlap_p (c, xsize, ysize);
2072 /* This code used to check for conflicts involving stack references and
2073 globals but the base address alias code now handles these cases. */
2075 if (GET_CODE (x) == PLUS)
2077 /* The fact that X is canonicalized means that this
2078 PLUS rtx is canonicalized. */
2079 rtx x0 = XEXP (x, 0);
2080 rtx x1 = XEXP (x, 1);
2082 if (GET_CODE (y) == PLUS)
2084 /* The fact that Y is canonicalized means that this
2085 PLUS rtx is canonicalized. */
2086 rtx y0 = XEXP (y, 0);
2087 rtx y1 = XEXP (y, 1);
2089 if (rtx_equal_for_memref_p (x1, y1))
2090 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2091 if (rtx_equal_for_memref_p (x0, y0))
2092 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2093 if (CONST_INT_P (x1))
2095 if (CONST_INT_P (y1))
2096 return memrefs_conflict_p (xsize, x0, ysize, y0,
2097 c - INTVAL (x1) + INTVAL (y1));
2098 else
2099 return memrefs_conflict_p (xsize, x0, ysize, y,
2100 c - INTVAL (x1));
2102 else if (CONST_INT_P (y1))
2103 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2105 return -1;
2107 else if (CONST_INT_P (x1))
2108 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2110 else if (GET_CODE (y) == PLUS)
2112 /* The fact that Y is canonicalized means that this
2113 PLUS rtx is canonicalized. */
2114 rtx y0 = XEXP (y, 0);
2115 rtx y1 = XEXP (y, 1);
2117 if (CONST_INT_P (y1))
2118 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2119 else
2120 return -1;
2123 if (GET_CODE (x) == GET_CODE (y))
2124 switch (GET_CODE (x))
2126 case MULT:
2128 /* Handle cases where we expect the second operands to be the
2129 same, and check only whether the first operand would conflict
2130 or not. */
2131 rtx x0, y0;
2132 rtx x1 = canon_rtx (XEXP (x, 1));
2133 rtx y1 = canon_rtx (XEXP (y, 1));
2134 if (! rtx_equal_for_memref_p (x1, y1))
2135 return -1;
2136 x0 = canon_rtx (XEXP (x, 0));
2137 y0 = canon_rtx (XEXP (y, 0));
2138 if (rtx_equal_for_memref_p (x0, y0))
2139 return offset_overlap_p (c, xsize, ysize);
2141 /* Can't properly adjust our sizes. */
2142 if (!CONST_INT_P (x1))
2143 return -1;
2144 xsize /= INTVAL (x1);
2145 ysize /= INTVAL (x1);
2146 c /= INTVAL (x1);
2147 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2150 default:
2151 break;
2154 /* Deal with alignment ANDs by adjusting offset and size so as to
2155 cover the maximum range, without taking any previously known
2156 alignment into account. Make a size negative after such an
2157 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2158 assume a potential overlap, because they may end up in contiguous
2159 memory locations and the stricter-alignment access may span over
2160 part of both. */
2161 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2163 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2164 unsigned HOST_WIDE_INT uc = sc;
2165 if (sc < 0 && -uc == (uc & -uc))
2167 if (xsize > 0)
2168 xsize = -xsize;
2169 if (xsize)
2170 xsize += sc + 1;
2171 c -= sc + 1;
2172 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2173 ysize, y, c);
2176 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2178 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2179 unsigned HOST_WIDE_INT uc = sc;
2180 if (sc < 0 && -uc == (uc & -uc))
2182 if (ysize > 0)
2183 ysize = -ysize;
2184 if (ysize)
2185 ysize += sc + 1;
2186 c += sc + 1;
2187 return memrefs_conflict_p (xsize, x,
2188 ysize, canon_rtx (XEXP (y, 0)), c);
2192 if (CONSTANT_P (x))
2194 if (CONST_INT_P (x) && CONST_INT_P (y))
2196 c += (INTVAL (y) - INTVAL (x));
2197 return offset_overlap_p (c, xsize, ysize);
2200 if (GET_CODE (x) == CONST)
2202 if (GET_CODE (y) == CONST)
2203 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2204 ysize, canon_rtx (XEXP (y, 0)), c);
2205 else
2206 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2207 ysize, y, c);
2209 if (GET_CODE (y) == CONST)
2210 return memrefs_conflict_p (xsize, x, ysize,
2211 canon_rtx (XEXP (y, 0)), c);
2213 /* Assume a potential overlap for symbolic addresses that went
2214 through alignment adjustments (i.e., that have negative
2215 sizes), because we can't know how far they are from each
2216 other. */
2217 if (CONSTANT_P (y))
2218 return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2220 return -1;
2223 return -1;
2226 /* Functions to compute memory dependencies.
2228 Since we process the insns in execution order, we can build tables
2229 to keep track of what registers are fixed (and not aliased), what registers
2230 are varying in known ways, and what registers are varying in unknown
2231 ways.
2233 If both memory references are volatile, then there must always be a
2234 dependence between the two references, since their order can not be
2235 changed. A volatile and non-volatile reference can be interchanged
2236 though.
2238 We also must allow AND addresses, because they may generate accesses
2239 outside the object being referenced. This is used to generate aligned
2240 addresses from unaligned addresses, for instance, the alpha
2241 storeqi_unaligned pattern. */
2243 /* Read dependence: X is read after read in MEM takes place. There can
2244 only be a dependence here if both reads are volatile, or if either is
2245 an explicit barrier. */
2248 read_dependence (const_rtx mem, const_rtx x)
2250 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2251 return true;
2252 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2253 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2254 return true;
2255 return false;
2258 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2260 static tree
2261 decl_for_component_ref (tree x)
2265 x = TREE_OPERAND (x, 0);
2267 while (x && TREE_CODE (x) == COMPONENT_REF);
2269 return x && DECL_P (x) ? x : NULL_TREE;
2272 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2273 for the offset of the field reference. *KNOWN_P says whether the
2274 offset is known. */
2276 static void
2277 adjust_offset_for_component_ref (tree x, bool *known_p,
2278 HOST_WIDE_INT *offset)
2280 if (!*known_p)
2281 return;
2284 tree xoffset = component_ref_field_offset (x);
2285 tree field = TREE_OPERAND (x, 1);
2286 if (TREE_CODE (xoffset) != INTEGER_CST)
2288 *known_p = false;
2289 return;
2292 offset_int woffset
2293 = (wi::to_offset (xoffset)
2294 + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
2295 LOG2_BITS_PER_UNIT));
2296 if (!wi::fits_uhwi_p (woffset))
2298 *known_p = false;
2299 return;
2301 *offset += woffset.to_uhwi ();
2303 x = TREE_OPERAND (x, 0);
2305 while (x && TREE_CODE (x) == COMPONENT_REF);
2308 /* Return nonzero if we can determine the exprs corresponding to memrefs
2309 X and Y and they do not overlap.
2310 If LOOP_VARIANT is set, skip offset-based disambiguation */
2313 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2315 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2316 rtx rtlx, rtly;
2317 rtx basex, basey;
2318 bool moffsetx_known_p, moffsety_known_p;
2319 HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2320 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2322 /* Unless both have exprs, we can't tell anything. */
2323 if (exprx == 0 || expry == 0)
2324 return 0;
2326 /* For spill-slot accesses make sure we have valid offsets. */
2327 if ((exprx == get_spill_slot_decl (false)
2328 && ! MEM_OFFSET_KNOWN_P (x))
2329 || (expry == get_spill_slot_decl (false)
2330 && ! MEM_OFFSET_KNOWN_P (y)))
2331 return 0;
2333 /* If the field reference test failed, look at the DECLs involved. */
2334 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2335 if (moffsetx_known_p)
2336 moffsetx = MEM_OFFSET (x);
2337 if (TREE_CODE (exprx) == COMPONENT_REF)
2339 tree t = decl_for_component_ref (exprx);
2340 if (! t)
2341 return 0;
2342 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2343 exprx = t;
2346 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2347 if (moffsety_known_p)
2348 moffsety = MEM_OFFSET (y);
2349 if (TREE_CODE (expry) == COMPONENT_REF)
2351 tree t = decl_for_component_ref (expry);
2352 if (! t)
2353 return 0;
2354 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2355 expry = t;
2358 if (! DECL_P (exprx) || ! DECL_P (expry))
2359 return 0;
2361 /* With invalid code we can end up storing into the constant pool.
2362 Bail out to avoid ICEing when creating RTL for this.
2363 See gfortran.dg/lto/20091028-2_0.f90. */
2364 if (TREE_CODE (exprx) == CONST_DECL
2365 || TREE_CODE (expry) == CONST_DECL)
2366 return 1;
2368 rtlx = DECL_RTL (exprx);
2369 rtly = DECL_RTL (expry);
2371 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2372 can't overlap unless they are the same because we never reuse that part
2373 of the stack frame used for locals for spilled pseudos. */
2374 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2375 && ! rtx_equal_p (rtlx, rtly))
2376 return 1;
2378 /* If we have MEMs referring to different address spaces (which can
2379 potentially overlap), we cannot easily tell from the addresses
2380 whether the references overlap. */
2381 if (MEM_P (rtlx) && MEM_P (rtly)
2382 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2383 return 0;
2385 /* Get the base and offsets of both decls. If either is a register, we
2386 know both are and are the same, so use that as the base. The only
2387 we can avoid overlap is if we can deduce that they are nonoverlapping
2388 pieces of that decl, which is very rare. */
2389 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2390 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2391 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2393 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2394 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2395 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2397 /* If the bases are different, we know they do not overlap if both
2398 are constants or if one is a constant and the other a pointer into the
2399 stack frame. Otherwise a different base means we can't tell if they
2400 overlap or not. */
2401 if (! rtx_equal_p (basex, basey))
2402 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2403 || (CONSTANT_P (basex) && REG_P (basey)
2404 && REGNO_PTR_FRAME_P (REGNO (basey)))
2405 || (CONSTANT_P (basey) && REG_P (basex)
2406 && REGNO_PTR_FRAME_P (REGNO (basex))));
2408 /* Offset based disambiguation not appropriate for loop invariant */
2409 if (loop_invariant)
2410 return 0;
2412 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2413 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2414 : -1);
2415 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2416 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2417 : -1);
2419 /* If we have an offset for either memref, it can update the values computed
2420 above. */
2421 if (moffsetx_known_p)
2422 offsetx += moffsetx, sizex -= moffsetx;
2423 if (moffsety_known_p)
2424 offsety += moffsety, sizey -= moffsety;
2426 /* If a memref has both a size and an offset, we can use the smaller size.
2427 We can't do this if the offset isn't known because we must view this
2428 memref as being anywhere inside the DECL's MEM. */
2429 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2430 sizex = MEM_SIZE (x);
2431 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2432 sizey = MEM_SIZE (y);
2434 /* Put the values of the memref with the lower offset in X's values. */
2435 if (offsetx > offsety)
2437 tem = offsetx, offsetx = offsety, offsety = tem;
2438 tem = sizex, sizex = sizey, sizey = tem;
2441 /* If we don't know the size of the lower-offset value, we can't tell
2442 if they conflict. Otherwise, we do the test. */
2443 return sizex >= 0 && offsety >= offsetx + sizex;
2446 /* Helper for true_dependence and canon_true_dependence.
2447 Checks for true dependence: X is read after store in MEM takes place.
2449 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2450 NULL_RTX, and the canonical addresses of MEM and X are both computed
2451 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2453 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2455 Returns 1 if there is a true dependence, 0 otherwise. */
2457 static int
2458 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2459 const_rtx x, rtx x_addr, bool mem_canonicalized)
2461 rtx true_mem_addr;
2462 rtx base;
2463 int ret;
2465 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2466 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2468 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2469 return 1;
2471 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2472 This is used in epilogue deallocation functions, and in cselib. */
2473 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2474 return 1;
2475 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2476 return 1;
2477 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2478 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2479 return 1;
2481 if (! x_addr)
2482 x_addr = XEXP (x, 0);
2483 x_addr = get_addr (x_addr);
2485 if (! mem_addr)
2487 mem_addr = XEXP (mem, 0);
2488 if (mem_mode == VOIDmode)
2489 mem_mode = GET_MODE (mem);
2491 true_mem_addr = get_addr (mem_addr);
2493 /* Read-only memory is by definition never modified, and therefore can't
2494 conflict with anything. However, don't assume anything when AND
2495 addresses are involved and leave to the code below to determine
2496 dependence. We don't expect to find read-only set on MEM, but
2497 stupid user tricks can produce them, so don't die. */
2498 if (MEM_READONLY_P (x)
2499 && GET_CODE (x_addr) != AND
2500 && GET_CODE (true_mem_addr) != AND)
2501 return 0;
2503 /* If we have MEMs referring to different address spaces (which can
2504 potentially overlap), we cannot easily tell from the addresses
2505 whether the references overlap. */
2506 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2507 return 1;
2509 base = find_base_term (x_addr);
2510 if (base && (GET_CODE (base) == LABEL_REF
2511 || (GET_CODE (base) == SYMBOL_REF
2512 && CONSTANT_POOL_ADDRESS_P (base))))
2513 return 0;
2515 rtx mem_base = find_base_term (true_mem_addr);
2516 if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2517 GET_MODE (x), mem_mode))
2518 return 0;
2520 x_addr = canon_rtx (x_addr);
2521 if (!mem_canonicalized)
2522 mem_addr = canon_rtx (true_mem_addr);
2524 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2525 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2526 return ret;
2528 if (mems_in_disjoint_alias_sets_p (x, mem))
2529 return 0;
2531 if (nonoverlapping_memrefs_p (mem, x, false))
2532 return 0;
2534 return rtx_refs_may_alias_p (x, mem, true);
2537 /* True dependence: X is read after store in MEM takes place. */
2540 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
2542 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2543 x, NULL_RTX, /*mem_canonicalized=*/false);
2546 /* Canonical true dependence: X is read after store in MEM takes place.
2547 Variant of true_dependence which assumes MEM has already been
2548 canonicalized (hence we no longer do that here).
2549 The mem_addr argument has been added, since true_dependence_1 computed
2550 this value prior to canonicalizing. */
2553 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2554 const_rtx x, rtx x_addr)
2556 return true_dependence_1 (mem, mem_mode, mem_addr,
2557 x, x_addr, /*mem_canonicalized=*/true);
2560 /* Returns nonzero if a write to X might alias a previous read from
2561 (or, if WRITEP is true, a write to) MEM.
2562 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2563 and X_MODE the mode for that access.
2564 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2566 static int
2567 write_dependence_p (const_rtx mem,
2568 const_rtx x, machine_mode x_mode, rtx x_addr,
2569 bool mem_canonicalized, bool x_canonicalized, bool writep)
2571 rtx mem_addr;
2572 rtx true_mem_addr, true_x_addr;
2573 rtx base;
2574 int ret;
2576 gcc_checking_assert (x_canonicalized
2577 ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2578 : (x_addr == NULL_RTX && x_mode == VOIDmode));
2580 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2581 return 1;
2583 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2584 This is used in epilogue deallocation functions. */
2585 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2586 return 1;
2587 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2588 return 1;
2589 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2590 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2591 return 1;
2593 if (!x_addr)
2594 x_addr = XEXP (x, 0);
2595 true_x_addr = get_addr (x_addr);
2597 mem_addr = XEXP (mem, 0);
2598 true_mem_addr = get_addr (mem_addr);
2600 /* A read from read-only memory can't conflict with read-write memory.
2601 Don't assume anything when AND addresses are involved and leave to
2602 the code below to determine dependence. */
2603 if (!writep
2604 && MEM_READONLY_P (mem)
2605 && GET_CODE (true_x_addr) != AND
2606 && GET_CODE (true_mem_addr) != AND)
2607 return 0;
2609 /* If we have MEMs referring to different address spaces (which can
2610 potentially overlap), we cannot easily tell from the addresses
2611 whether the references overlap. */
2612 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2613 return 1;
2615 base = find_base_term (true_mem_addr);
2616 if (! writep
2617 && base
2618 && (GET_CODE (base) == LABEL_REF
2619 || (GET_CODE (base) == SYMBOL_REF
2620 && CONSTANT_POOL_ADDRESS_P (base))))
2621 return 0;
2623 rtx x_base = find_base_term (true_x_addr);
2624 if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
2625 GET_MODE (x), GET_MODE (mem)))
2626 return 0;
2628 if (!x_canonicalized)
2630 x_addr = canon_rtx (true_x_addr);
2631 x_mode = GET_MODE (x);
2633 if (!mem_canonicalized)
2634 mem_addr = canon_rtx (true_mem_addr);
2636 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2637 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2638 return ret;
2640 if (nonoverlapping_memrefs_p (x, mem, false))
2641 return 0;
2643 return rtx_refs_may_alias_p (x, mem, false);
2646 /* Anti dependence: X is written after read in MEM takes place. */
2649 anti_dependence (const_rtx mem, const_rtx x)
2651 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2652 /*mem_canonicalized=*/false,
2653 /*x_canonicalized*/false, /*writep=*/false);
2656 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2657 Also, consider X in X_MODE (which might be from an enclosing
2658 STRICT_LOW_PART / ZERO_EXTRACT).
2659 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2662 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2663 const_rtx x, machine_mode x_mode, rtx x_addr)
2665 return write_dependence_p (mem, x, x_mode, x_addr,
2666 mem_canonicalized, /*x_canonicalized=*/true,
2667 /*writep=*/false);
2670 /* Output dependence: X is written after store in MEM takes place. */
2673 output_dependence (const_rtx mem, const_rtx x)
2675 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2676 /*mem_canonicalized=*/false,
2677 /*x_canonicalized*/false, /*writep=*/true);
2682 /* Check whether X may be aliased with MEM. Don't do offset-based
2683 memory disambiguation & TBAA. */
2685 may_alias_p (const_rtx mem, const_rtx x)
2687 rtx x_addr, mem_addr;
2689 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2690 return 1;
2692 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2693 This is used in epilogue deallocation functions. */
2694 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2695 return 1;
2696 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2697 return 1;
2698 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2699 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2700 return 1;
2702 x_addr = XEXP (x, 0);
2703 x_addr = get_addr (x_addr);
2705 mem_addr = XEXP (mem, 0);
2706 mem_addr = get_addr (mem_addr);
2708 /* Read-only memory is by definition never modified, and therefore can't
2709 conflict with anything. However, don't assume anything when AND
2710 addresses are involved and leave to the code below to determine
2711 dependence. We don't expect to find read-only set on MEM, but
2712 stupid user tricks can produce them, so don't die. */
2713 if (MEM_READONLY_P (x)
2714 && GET_CODE (x_addr) != AND
2715 && GET_CODE (mem_addr) != AND)
2716 return 0;
2718 /* If we have MEMs referring to different address spaces (which can
2719 potentially overlap), we cannot easily tell from the addresses
2720 whether the references overlap. */
2721 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2722 return 1;
2724 rtx x_base = find_base_term (x_addr);
2725 rtx mem_base = find_base_term (mem_addr);
2726 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2727 GET_MODE (x), GET_MODE (mem_addr)))
2728 return 0;
2730 if (nonoverlapping_memrefs_p (mem, x, true))
2731 return 0;
2733 /* TBAA not valid for loop_invarint */
2734 return rtx_refs_may_alias_p (x, mem, false);
2737 void
2738 init_alias_target (void)
2740 int i;
2742 if (!arg_base_value)
2743 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2745 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2747 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2748 /* Check whether this register can hold an incoming pointer
2749 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2750 numbers, so translate if necessary due to register windows. */
2751 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2752 && HARD_REGNO_MODE_OK (i, Pmode))
2753 static_reg_base_value[i] = arg_base_value;
2755 static_reg_base_value[STACK_POINTER_REGNUM]
2756 = unique_base_value (UNIQUE_BASE_VALUE_SP);
2757 static_reg_base_value[ARG_POINTER_REGNUM]
2758 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2759 static_reg_base_value[FRAME_POINTER_REGNUM]
2760 = unique_base_value (UNIQUE_BASE_VALUE_FP);
2761 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
2762 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2763 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2766 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2767 to be memory reference. */
2768 static bool memory_modified;
2769 static void
2770 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2772 if (MEM_P (x))
2774 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2775 memory_modified = true;
2780 /* Return true when INSN possibly modify memory contents of MEM
2781 (i.e. address can be modified). */
2782 bool
2783 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2785 if (!INSN_P (insn))
2786 return false;
2787 memory_modified = false;
2788 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2789 return memory_modified;
2792 /* Return TRUE if the destination of a set is rtx identical to
2793 ITEM. */
2794 static inline bool
2795 set_dest_equal_p (const_rtx set, const_rtx item)
2797 rtx dest = SET_DEST (set);
2798 return rtx_equal_p (dest, item);
2801 /* Like memory_modified_in_insn_p, but return TRUE if INSN will
2802 *DEFINITELY* modify the memory contents of MEM. */
2803 bool
2804 memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2806 if (!INSN_P (insn))
2807 return false;
2808 insn = PATTERN (insn);
2809 if (GET_CODE (insn) == SET)
2810 return set_dest_equal_p (insn, mem);
2811 else if (GET_CODE (insn) == PARALLEL)
2813 int i;
2814 for (i = 0; i < XVECLEN (insn, 0); i++)
2816 rtx sub = XVECEXP (insn, 0, i);
2817 if (GET_CODE (sub) == SET
2818 && set_dest_equal_p (sub, mem))
2819 return true;
2822 return false;
2825 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2826 array. */
2828 void
2829 init_alias_analysis (void)
2831 unsigned int maxreg = max_reg_num ();
2832 int changed, pass;
2833 int i;
2834 unsigned int ui;
2835 rtx_insn *insn;
2836 rtx val;
2837 int rpo_cnt;
2838 int *rpo;
2840 timevar_push (TV_ALIAS_ANALYSIS);
2842 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2843 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2844 bitmap_clear (reg_known_equiv_p);
2846 /* If we have memory allocated from the previous run, use it. */
2847 if (old_reg_base_value)
2848 reg_base_value = old_reg_base_value;
2850 if (reg_base_value)
2851 reg_base_value->truncate (0);
2853 vec_safe_grow_cleared (reg_base_value, maxreg);
2855 new_reg_base_value = XNEWVEC (rtx, maxreg);
2856 reg_seen = sbitmap_alloc (maxreg);
2858 /* The basic idea is that each pass through this loop will use the
2859 "constant" information from the previous pass to propagate alias
2860 information through another level of assignments.
2862 The propagation is done on the CFG in reverse post-order, to propagate
2863 things forward as far as possible in each iteration.
2865 This could get expensive if the assignment chains are long. Maybe
2866 we should throttle the number of iterations, possibly based on
2867 the optimization level or flag_expensive_optimizations.
2869 We could propagate more information in the first pass by making use
2870 of DF_REG_DEF_COUNT to determine immediately that the alias information
2871 for a pseudo is "constant".
2873 A program with an uninitialized variable can cause an infinite loop
2874 here. Instead of doing a full dataflow analysis to detect such problems
2875 we just cap the number of iterations for the loop.
2877 The state of the arrays for the set chain in question does not matter
2878 since the program has undefined behavior. */
2880 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2881 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2883 pass = 0;
2886 /* Assume nothing will change this iteration of the loop. */
2887 changed = 0;
2889 /* We want to assign the same IDs each iteration of this loop, so
2890 start counting from one each iteration of the loop. */
2891 unique_id = 1;
2893 /* We're at the start of the function each iteration through the
2894 loop, so we're copying arguments. */
2895 copying_arguments = true;
2897 /* Wipe the potential alias information clean for this pass. */
2898 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2900 /* Wipe the reg_seen array clean. */
2901 bitmap_clear (reg_seen);
2903 /* Initialize the alias information for this pass. */
2904 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2905 if (static_reg_base_value[i])
2907 new_reg_base_value[i] = static_reg_base_value[i];
2908 bitmap_set_bit (reg_seen, i);
2911 /* Walk the insns adding values to the new_reg_base_value array. */
2912 for (i = 0; i < rpo_cnt; i++)
2914 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
2915 FOR_BB_INSNS (bb, insn)
2917 if (NONDEBUG_INSN_P (insn))
2919 rtx note, set;
2921 #if defined (HAVE_prologue)
2922 static const bool prologue = true;
2923 #else
2924 static const bool prologue = false;
2925 #endif
2927 /* The prologue/epilogue insns are not threaded onto the
2928 insn chain until after reload has completed. Thus,
2929 there is no sense wasting time checking if INSN is in
2930 the prologue/epilogue until after reload has completed. */
2931 if ((prologue || HAVE_epilogue) && reload_completed
2932 && prologue_epilogue_contains (insn))
2933 continue;
2935 /* If this insn has a noalias note, process it, Otherwise,
2936 scan for sets. A simple set will have no side effects
2937 which could change the base value of any other register. */
2939 if (GET_CODE (PATTERN (insn)) == SET
2940 && REG_NOTES (insn) != 0
2941 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2942 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2943 else
2944 note_stores (PATTERN (insn), record_set, NULL);
2946 set = single_set (insn);
2948 if (set != 0
2949 && REG_P (SET_DEST (set))
2950 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2952 unsigned int regno = REGNO (SET_DEST (set));
2953 rtx src = SET_SRC (set);
2954 rtx t;
2956 note = find_reg_equal_equiv_note (insn);
2957 if (note && REG_NOTE_KIND (note) == REG_EQUAL
2958 && DF_REG_DEF_COUNT (regno) != 1)
2959 note = NULL_RTX;
2961 if (note != NULL_RTX
2962 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2963 && ! rtx_varies_p (XEXP (note, 0), 1)
2964 && ! reg_overlap_mentioned_p (SET_DEST (set),
2965 XEXP (note, 0)))
2967 set_reg_known_value (regno, XEXP (note, 0));
2968 set_reg_known_equiv_p (regno,
2969 REG_NOTE_KIND (note) == REG_EQUIV);
2971 else if (DF_REG_DEF_COUNT (regno) == 1
2972 && GET_CODE (src) == PLUS
2973 && REG_P (XEXP (src, 0))
2974 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2975 && CONST_INT_P (XEXP (src, 1)))
2977 t = plus_constant (GET_MODE (src), t,
2978 INTVAL (XEXP (src, 1)));
2979 set_reg_known_value (regno, t);
2980 set_reg_known_equiv_p (regno, false);
2982 else if (DF_REG_DEF_COUNT (regno) == 1
2983 && ! rtx_varies_p (src, 1))
2985 set_reg_known_value (regno, src);
2986 set_reg_known_equiv_p (regno, false);
2990 else if (NOTE_P (insn)
2991 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2992 copying_arguments = false;
2996 /* Now propagate values from new_reg_base_value to reg_base_value. */
2997 gcc_assert (maxreg == (unsigned int) max_reg_num ());
2999 for (ui = 0; ui < maxreg; ui++)
3001 if (new_reg_base_value[ui]
3002 && new_reg_base_value[ui] != (*reg_base_value)[ui]
3003 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3005 (*reg_base_value)[ui] = new_reg_base_value[ui];
3006 changed = 1;
3010 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3011 XDELETEVEC (rpo);
3013 /* Fill in the remaining entries. */
3014 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3016 int regno = i + FIRST_PSEUDO_REGISTER;
3017 if (! val)
3018 set_reg_known_value (regno, regno_reg_rtx[regno]);
3021 /* Clean up. */
3022 free (new_reg_base_value);
3023 new_reg_base_value = 0;
3024 sbitmap_free (reg_seen);
3025 reg_seen = 0;
3026 timevar_pop (TV_ALIAS_ANALYSIS);
3029 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3030 Special API for var-tracking pass purposes. */
3032 void
3033 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3035 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3038 void
3039 end_alias_analysis (void)
3041 old_reg_base_value = reg_base_value;
3042 vec_free (reg_known_value);
3043 sbitmap_free (reg_known_equiv_p);
3046 #include "gt-alias.h"