Add GCC support to ENQCMD.
[official-gcc.git] / gcc / alias.c
blobd3cc07c721663ad2edd9bd5cc0a633b337e8a827
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2019 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "gimple-ssa.h"
33 #include "emit-rtl.h"
34 #include "alias.h"
35 #include "fold-const.h"
36 #include "varasm.h"
37 #include "cselib.h"
38 #include "langhooks.h"
39 #include "cfganal.h"
40 #include "rtl-iter.h"
41 #include "cgraph.h"
43 /* The aliasing API provided here solves related but different problems:
45 Say there exists (in c)
47 struct X {
48 struct Y y1;
49 struct Z z2;
50 } x1, *px1, *px2;
52 struct Y y2, *py;
53 struct Z z2, *pz;
56 py = &x1.y1;
57 px2 = &x1;
59 Consider the four questions:
61 Can a store to x1 interfere with px2->y1?
62 Can a store to x1 interfere with px2->z2?
63 Can a store to x1 change the value pointed to by with py?
64 Can a store to x1 change the value pointed to by with pz?
66 The answer to these questions can be yes, yes, yes, and maybe.
68 The first two questions can be answered with a simple examination
69 of the type system. If structure X contains a field of type Y then
70 a store through a pointer to an X can overwrite any field that is
71 contained (recursively) in an X (unless we know that px1 != px2).
73 The last two questions can be solved in the same way as the first
74 two questions but this is too conservative. The observation is
75 that in some cases we can know which (if any) fields are addressed
76 and if those addresses are used in bad ways. This analysis may be
77 language specific. In C, arbitrary operations may be applied to
78 pointers. However, there is some indication that this may be too
79 conservative for some C++ types.
81 The pass ipa-type-escape does this analysis for the types whose
82 instances do not escape across the compilation boundary.
84 Historically in GCC, these two problems were combined and a single
85 data structure that was used to represent the solution to these
86 problems. We now have two similar but different data structures,
87 The data structure to solve the last two questions is similar to
88 the first, but does not contain the fields whose address are never
89 taken. For types that do escape the compilation unit, the data
90 structures will have identical information.
93 /* The alias sets assigned to MEMs assist the back-end in determining
94 which MEMs can alias which other MEMs. In general, two MEMs in
95 different alias sets cannot alias each other, with one important
96 exception. Consider something like:
98 struct S { int i; double d; };
100 a store to an `S' can alias something of either type `int' or type
101 `double'. (However, a store to an `int' cannot alias a `double'
102 and vice versa.) We indicate this via a tree structure that looks
103 like:
104 struct S
107 |/_ _\|
108 int double
110 (The arrows are directed and point downwards.)
111 In this situation we say the alias set for `struct S' is the
112 `superset' and that those for `int' and `double' are `subsets'.
114 To see whether two alias sets can point to the same memory, we must
115 see if either alias set is a subset of the other. We need not trace
116 past immediate descendants, however, since we propagate all
117 grandchildren up one level.
119 Alias set zero is implicitly a superset of all other alias sets.
120 However, this is no actual entry for alias set zero. It is an
121 error to attempt to explicitly construct a subset of zero. */
123 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {};
125 struct GTY(()) alias_set_entry {
126 /* The alias set number, as stored in MEM_ALIAS_SET. */
127 alias_set_type alias_set;
129 /* Nonzero if would have a child of zero: this effectively makes this
130 alias set the same as alias set zero. */
131 bool has_zero_child;
132 /* Nonzero if alias set corresponds to pointer type itself (i.e. not to
133 aggregate contaiing pointer.
134 This is used for a special case where we need an universal pointer type
135 compatible with all other pointer types. */
136 bool is_pointer;
137 /* Nonzero if is_pointer or if one of childs have has_pointer set. */
138 bool has_pointer;
140 /* The children of the alias set. These are not just the immediate
141 children, but, in fact, all descendants. So, if we have:
143 struct T { struct S s; float f; }
145 continuing our example above, the children here will be all of
146 `int', `double', `float', and `struct S'. */
147 hash_map<alias_set_hash, int> *children;
150 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
151 static void record_set (rtx, const_rtx, void *);
152 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
153 machine_mode);
154 static rtx find_base_value (rtx);
155 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
156 static alias_set_entry *get_alias_set_entry (alias_set_type);
157 static tree decl_for_component_ref (tree);
158 static int write_dependence_p (const_rtx,
159 const_rtx, machine_mode, rtx,
160 bool, bool, bool);
161 static int compare_base_symbol_refs (const_rtx, const_rtx);
163 static void memory_modified_1 (rtx, const_rtx, void *);
165 /* Query statistics for the different low-level disambiguators.
166 A high-level query may trigger multiple of them. */
168 static struct {
169 unsigned long long num_alias_zero;
170 unsigned long long num_same_alias_set;
171 unsigned long long num_same_objects;
172 unsigned long long num_volatile;
173 unsigned long long num_dag;
174 unsigned long long num_universal;
175 unsigned long long num_disambiguated;
176 } alias_stats;
179 /* Set up all info needed to perform alias analysis on memory references. */
181 /* Returns the size in bytes of the mode of X. */
182 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
184 /* Cap the number of passes we make over the insns propagating alias
185 information through set chains.
186 ??? 10 is a completely arbitrary choice. This should be based on the
187 maximum loop depth in the CFG, but we do not have this information
188 available (even if current_loops _is_ available). */
189 #define MAX_ALIAS_LOOP_PASSES 10
191 /* reg_base_value[N] gives an address to which register N is related.
192 If all sets after the first add or subtract to the current value
193 or otherwise modify it so it does not point to a different top level
194 object, reg_base_value[N] is equal to the address part of the source
195 of the first set.
197 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
198 expressions represent three types of base:
200 1. incoming arguments. There is just one ADDRESS to represent all
201 arguments, since we do not know at this level whether accesses
202 based on different arguments can alias. The ADDRESS has id 0.
204 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
205 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
206 Each of these rtxes has a separate ADDRESS associated with it,
207 each with a negative id.
209 GCC is (and is required to be) precise in which register it
210 chooses to access a particular region of stack. We can therefore
211 assume that accesses based on one of these rtxes do not alias
212 accesses based on another of these rtxes.
214 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
215 Each such piece of memory has a separate ADDRESS associated
216 with it, each with an id greater than 0.
218 Accesses based on one ADDRESS do not alias accesses based on other
219 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
220 alias globals either; the ADDRESSes have Pmode to indicate this.
221 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
222 indicate this. */
224 static GTY(()) vec<rtx, va_gc> *reg_base_value;
225 static rtx *new_reg_base_value;
227 /* The single VOIDmode ADDRESS that represents all argument bases.
228 It has id 0. */
229 static GTY(()) rtx arg_base_value;
231 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
232 static int unique_id;
234 /* We preserve the copy of old array around to avoid amount of garbage
235 produced. About 8% of garbage produced were attributed to this
236 array. */
237 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
239 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
240 registers. */
241 #define UNIQUE_BASE_VALUE_SP -1
242 #define UNIQUE_BASE_VALUE_ARGP -2
243 #define UNIQUE_BASE_VALUE_FP -3
244 #define UNIQUE_BASE_VALUE_HFP -4
246 #define static_reg_base_value \
247 (this_target_rtl->x_static_reg_base_value)
249 #define REG_BASE_VALUE(X) \
250 (REGNO (X) < vec_safe_length (reg_base_value) \
251 ? (*reg_base_value)[REGNO (X)] : 0)
253 /* Vector indexed by N giving the initial (unchanging) value known for
254 pseudo-register N. This vector is initialized in init_alias_analysis,
255 and does not change until end_alias_analysis is called. */
256 static GTY(()) vec<rtx, va_gc> *reg_known_value;
258 /* Vector recording for each reg_known_value whether it is due to a
259 REG_EQUIV note. Future passes (viz., reload) may replace the
260 pseudo with the equivalent expression and so we account for the
261 dependences that would be introduced if that happens.
263 The REG_EQUIV notes created in assign_parms may mention the arg
264 pointer, and there are explicit insns in the RTL that modify the
265 arg pointer. Thus we must ensure that such insns don't get
266 scheduled across each other because that would invalidate the
267 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
268 wrong, but solving the problem in the scheduler will likely give
269 better code, so we do it here. */
270 static sbitmap reg_known_equiv_p;
272 /* True when scanning insns from the start of the rtl to the
273 NOTE_INSN_FUNCTION_BEG note. */
274 static bool copying_arguments;
277 /* The splay-tree used to store the various alias set entries. */
278 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets;
280 /* Build a decomposed reference object for querying the alias-oracle
281 from the MEM rtx and store it in *REF.
282 Returns false if MEM is not suitable for the alias-oracle. */
284 static bool
285 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
287 tree expr = MEM_EXPR (mem);
288 tree base;
290 if (!expr)
291 return false;
293 ao_ref_init (ref, expr);
295 /* Get the base of the reference and see if we have to reject or
296 adjust it. */
297 base = ao_ref_base (ref);
298 if (base == NULL_TREE)
299 return false;
301 /* The tree oracle doesn't like bases that are neither decls
302 nor indirect references of SSA names. */
303 if (!(DECL_P (base)
304 || (TREE_CODE (base) == MEM_REF
305 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
306 || (TREE_CODE (base) == TARGET_MEM_REF
307 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
308 return false;
310 ref->ref_alias_set = MEM_ALIAS_SET (mem);
312 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
313 is conservative, so trust it. */
314 if (!MEM_OFFSET_KNOWN_P (mem)
315 || !MEM_SIZE_KNOWN_P (mem))
316 return true;
318 /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
319 drop ref->ref. */
320 if (maybe_lt (MEM_OFFSET (mem), 0)
321 || (ref->max_size_known_p ()
322 && maybe_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT,
323 ref->max_size)))
324 ref->ref = NULL_TREE;
326 /* Refine size and offset we got from analyzing MEM_EXPR by using
327 MEM_SIZE and MEM_OFFSET. */
329 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
330 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
332 /* The MEM may extend into adjacent fields, so adjust max_size if
333 necessary. */
334 if (ref->max_size_known_p ())
335 ref->max_size = upper_bound (ref->max_size, ref->size);
337 /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of
338 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
339 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
340 && (maybe_lt (ref->offset, 0)
341 || (DECL_P (ref->base)
342 && (DECL_SIZE (ref->base) == NULL_TREE
343 || !poly_int_tree_p (DECL_SIZE (ref->base))
344 || maybe_lt (wi::to_poly_offset (DECL_SIZE (ref->base)),
345 ref->offset + ref->size)))))
346 return false;
348 return true;
351 /* Query the alias-oracle on whether the two memory rtx X and MEM may
352 alias. If TBAA_P is set also apply TBAA. Returns true if the
353 two rtxen may alias, false otherwise. */
355 static bool
356 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
358 ao_ref ref1, ref2;
360 if (!ao_ref_from_mem (&ref1, x)
361 || !ao_ref_from_mem (&ref2, mem))
362 return true;
364 return refs_may_alias_p_1 (&ref1, &ref2,
365 tbaa_p
366 && MEM_ALIAS_SET (x) != 0
367 && MEM_ALIAS_SET (mem) != 0);
370 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
371 such an entry, or NULL otherwise. */
373 static inline alias_set_entry *
374 get_alias_set_entry (alias_set_type alias_set)
376 return (*alias_sets)[alias_set];
379 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
380 the two MEMs cannot alias each other. */
382 static inline int
383 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
385 return (flag_strict_aliasing
386 && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
387 MEM_ALIAS_SET (mem2)));
390 /* Return true if the first alias set is a subset of the second. */
392 bool
393 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
395 alias_set_entry *ase2;
397 /* Disable TBAA oracle with !flag_strict_aliasing. */
398 if (!flag_strict_aliasing)
399 return true;
401 /* Everything is a subset of the "aliases everything" set. */
402 if (set2 == 0)
403 return true;
405 /* Check if set1 is a subset of set2. */
406 ase2 = get_alias_set_entry (set2);
407 if (ase2 != 0
408 && (ase2->has_zero_child
409 || (ase2->children && ase2->children->get (set1))))
410 return true;
412 /* As a special case we consider alias set of "void *" to be both subset
413 and superset of every alias set of a pointer. This extra symmetry does
414 not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p
415 to return true on the following testcase:
417 void *ptr;
418 char **ptr2=(char **)&ptr;
419 *ptr2 = ...
421 Additionally if a set contains universal pointer, we consider every pointer
422 to be a subset of it, but we do not represent this explicitely - doing so
423 would require us to update transitive closure each time we introduce new
424 pointer type. This makes aliasing_component_refs_p to return true
425 on the following testcase:
427 struct a {void *ptr;}
428 char **ptr = (char **)&a.ptr;
429 ptr = ...
431 This makes void * truly universal pointer type. See pointer handling in
432 get_alias_set for more details. */
433 if (ase2 && ase2->has_pointer)
435 alias_set_entry *ase1 = get_alias_set_entry (set1);
437 if (ase1 && ase1->is_pointer)
439 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
440 /* If one is ptr_type_node and other is pointer, then we consider
441 them subset of each other. */
442 if (set1 == voidptr_set || set2 == voidptr_set)
443 return true;
444 /* If SET2 contains universal pointer's alias set, then we consdier
445 every (non-universal) pointer. */
446 if (ase2->children && set1 != voidptr_set
447 && ase2->children->get (voidptr_set))
448 return true;
451 return false;
454 /* Return 1 if the two specified alias sets may conflict. */
457 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
459 alias_set_entry *ase1;
460 alias_set_entry *ase2;
462 /* The easy case. */
463 if (alias_sets_must_conflict_p (set1, set2))
464 return 1;
466 /* See if the first alias set is a subset of the second. */
467 ase1 = get_alias_set_entry (set1);
468 if (ase1 != 0
469 && ase1->children && ase1->children->get (set2))
471 ++alias_stats.num_dag;
472 return 1;
475 /* Now do the same, but with the alias sets reversed. */
476 ase2 = get_alias_set_entry (set2);
477 if (ase2 != 0
478 && ase2->children && ase2->children->get (set1))
480 ++alias_stats.num_dag;
481 return 1;
484 /* We want void * to be compatible with any other pointer without
485 really dropping it to alias set 0. Doing so would make it
486 compatible with all non-pointer types too.
488 This is not strictly necessary by the C/C++ language
489 standards, but avoids common type punning mistakes. In
490 addition to that, we need the existence of such universal
491 pointer to implement Fortran's C_PTR type (which is defined as
492 type compatible with all C pointers). */
493 if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer)
495 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
497 /* If one of the sets corresponds to universal pointer,
498 we consider it to conflict with anything that is
499 or contains pointer. */
500 if (set1 == voidptr_set || set2 == voidptr_set)
502 ++alias_stats.num_universal;
503 return true;
505 /* If one of sets is (non-universal) pointer and the other
506 contains universal pointer, we also get conflict. */
507 if (ase1->is_pointer && set2 != voidptr_set
508 && ase2->children && ase2->children->get (voidptr_set))
510 ++alias_stats.num_universal;
511 return true;
513 if (ase2->is_pointer && set1 != voidptr_set
514 && ase1->children && ase1->children->get (voidptr_set))
516 ++alias_stats.num_universal;
517 return true;
521 ++alias_stats.num_disambiguated;
523 /* The two alias sets are distinct and neither one is the
524 child of the other. Therefore, they cannot conflict. */
525 return 0;
528 /* Return 1 if the two specified alias sets will always conflict. */
531 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
533 /* Disable TBAA oracle with !flag_strict_aliasing. */
534 if (!flag_strict_aliasing)
535 return 1;
536 if (set1 == 0 || set2 == 0)
538 ++alias_stats.num_alias_zero;
539 return 1;
541 if (set1 == set2)
543 ++alias_stats.num_same_alias_set;
544 return 1;
547 return 0;
550 /* Return 1 if any MEM object of type T1 will always conflict (using the
551 dependency routines in this file) with any MEM object of type T2.
552 This is used when allocating temporary storage. If T1 and/or T2 are
553 NULL_TREE, it means we know nothing about the storage. */
556 objects_must_conflict_p (tree t1, tree t2)
558 alias_set_type set1, set2;
560 /* If neither has a type specified, we don't know if they'll conflict
561 because we may be using them to store objects of various types, for
562 example the argument and local variables areas of inlined functions. */
563 if (t1 == 0 && t2 == 0)
564 return 0;
566 /* If they are the same type, they must conflict. */
567 if (t1 == t2)
569 ++alias_stats.num_same_objects;
570 return 1;
572 /* Likewise if both are volatile. */
573 if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2))
575 ++alias_stats.num_volatile;
576 return 1;
579 set1 = t1 ? get_alias_set (t1) : 0;
580 set2 = t2 ? get_alias_set (t2) : 0;
582 /* We can't use alias_sets_conflict_p because we must make sure
583 that every subtype of t1 will conflict with every subtype of
584 t2 for which a pair of subobjects of these respective subtypes
585 overlaps on the stack. */
586 return alias_sets_must_conflict_p (set1, set2);
589 /* Return the outermost parent of component present in the chain of
590 component references handled by get_inner_reference in T with the
591 following property:
592 - the component is non-addressable
593 or NULL_TREE if no such parent exists. In the former cases, the alias
594 set of this parent is the alias set that must be used for T itself. */
596 tree
597 component_uses_parent_alias_set_from (const_tree t)
599 const_tree found = NULL_TREE;
601 while (handled_component_p (t))
603 switch (TREE_CODE (t))
605 case COMPONENT_REF:
606 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
607 found = t;
608 /* Permit type-punning when accessing a union, provided the access
609 is directly through the union. For example, this code does not
610 permit taking the address of a union member and then storing
611 through it. Even the type-punning allowed here is a GCC
612 extension, albeit a common and useful one; the C standard says
613 that such accesses have implementation-defined behavior. */
614 else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
615 found = t;
616 break;
618 case ARRAY_REF:
619 case ARRAY_RANGE_REF:
620 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
621 found = t;
622 break;
624 case REALPART_EXPR:
625 case IMAGPART_EXPR:
626 break;
628 case BIT_FIELD_REF:
629 case VIEW_CONVERT_EXPR:
630 /* Bitfields and casts are never addressable. */
631 found = t;
632 break;
634 default:
635 gcc_unreachable ();
638 t = TREE_OPERAND (t, 0);
641 if (found)
642 return TREE_OPERAND (found, 0);
644 return NULL_TREE;
648 /* Return whether the pointer-type T effective for aliasing may
649 access everything and thus the reference has to be assigned
650 alias-set zero. */
652 static bool
653 ref_all_alias_ptr_type_p (const_tree t)
655 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
656 || TYPE_REF_CAN_ALIAS_ALL (t));
659 /* Return the alias set for the memory pointed to by T, which may be
660 either a type or an expression. Return -1 if there is nothing
661 special about dereferencing T. */
663 static alias_set_type
664 get_deref_alias_set_1 (tree t)
666 /* All we care about is the type. */
667 if (! TYPE_P (t))
668 t = TREE_TYPE (t);
670 /* If we have an INDIRECT_REF via a void pointer, we don't
671 know anything about what that might alias. Likewise if the
672 pointer is marked that way. */
673 if (ref_all_alias_ptr_type_p (t))
674 return 0;
676 return -1;
679 /* Return the alias set for the memory pointed to by T, which may be
680 either a type or an expression. */
682 alias_set_type
683 get_deref_alias_set (tree t)
685 /* If we're not doing any alias analysis, just assume everything
686 aliases everything else. */
687 if (!flag_strict_aliasing)
688 return 0;
690 alias_set_type set = get_deref_alias_set_1 (t);
692 /* Fall back to the alias-set of the pointed-to type. */
693 if (set == -1)
695 if (! TYPE_P (t))
696 t = TREE_TYPE (t);
697 set = get_alias_set (TREE_TYPE (t));
700 return set;
703 /* Return the pointer-type relevant for TBAA purposes from the
704 memory reference tree *T or NULL_TREE in which case *T is
705 adjusted to point to the outermost component reference that
706 can be used for assigning an alias set. */
708 static tree
709 reference_alias_ptr_type_1 (tree *t)
711 tree inner;
713 /* Get the base object of the reference. */
714 inner = *t;
715 while (handled_component_p (inner))
717 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
718 the type of any component references that wrap it to
719 determine the alias-set. */
720 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
721 *t = TREE_OPERAND (inner, 0);
722 inner = TREE_OPERAND (inner, 0);
725 /* Handle pointer dereferences here, they can override the
726 alias-set. */
727 if (INDIRECT_REF_P (inner)
728 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
729 return TREE_TYPE (TREE_OPERAND (inner, 0));
730 else if (TREE_CODE (inner) == TARGET_MEM_REF)
731 return TREE_TYPE (TMR_OFFSET (inner));
732 else if (TREE_CODE (inner) == MEM_REF
733 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
734 return TREE_TYPE (TREE_OPERAND (inner, 1));
736 /* If the innermost reference is a MEM_REF that has a
737 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
738 using the memory access type for determining the alias-set. */
739 if (TREE_CODE (inner) == MEM_REF
740 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
741 != TYPE_MAIN_VARIANT
742 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
743 return TREE_TYPE (TREE_OPERAND (inner, 1));
745 /* Otherwise, pick up the outermost object that we could have
746 a pointer to. */
747 tree tem = component_uses_parent_alias_set_from (*t);
748 if (tem)
749 *t = tem;
751 return NULL_TREE;
754 /* Return the pointer-type relevant for TBAA purposes from the
755 gimple memory reference tree T. This is the type to be used for
756 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
757 and guarantees that get_alias_set will return the same alias
758 set for T and the replacement. */
760 tree
761 reference_alias_ptr_type (tree t)
763 /* If the frontend assigns this alias-set zero, preserve that. */
764 if (lang_hooks.get_alias_set (t) == 0)
765 return ptr_type_node;
767 tree ptype = reference_alias_ptr_type_1 (&t);
768 /* If there is a given pointer type for aliasing purposes, return it. */
769 if (ptype != NULL_TREE)
770 return ptype;
772 /* Otherwise build one from the outermost component reference we
773 may use. */
774 if (TREE_CODE (t) == MEM_REF
775 || TREE_CODE (t) == TARGET_MEM_REF)
776 return TREE_TYPE (TREE_OPERAND (t, 1));
777 else
778 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
781 /* Return whether the pointer-types T1 and T2 used to determine
782 two alias sets of two references will yield the same answer
783 from get_deref_alias_set. */
785 bool
786 alias_ptr_types_compatible_p (tree t1, tree t2)
788 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
789 return true;
791 if (ref_all_alias_ptr_type_p (t1)
792 || ref_all_alias_ptr_type_p (t2))
793 return false;
795 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
796 == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
799 /* Create emptry alias set entry. */
801 alias_set_entry *
802 init_alias_set_entry (alias_set_type set)
804 alias_set_entry *ase = ggc_alloc<alias_set_entry> ();
805 ase->alias_set = set;
806 ase->children = NULL;
807 ase->has_zero_child = false;
808 ase->is_pointer = false;
809 ase->has_pointer = false;
810 gcc_checking_assert (!get_alias_set_entry (set));
811 (*alias_sets)[set] = ase;
812 return ase;
815 /* Return the alias set for T, which may be either a type or an
816 expression. Call language-specific routine for help, if needed. */
818 alias_set_type
819 get_alias_set (tree t)
821 alias_set_type set;
823 /* We cannot give up with -fno-strict-aliasing because we need to build
824 proper type representation for possible functions which are build with
825 -fstrict-aliasing. */
827 /* return 0 if this or its type is an error. */
828 if (t == error_mark_node
829 || (! TYPE_P (t)
830 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
831 return 0;
833 /* We can be passed either an expression or a type. This and the
834 language-specific routine may make mutually-recursive calls to each other
835 to figure out what to do. At each juncture, we see if this is a tree
836 that the language may need to handle specially. First handle things that
837 aren't types. */
838 if (! TYPE_P (t))
840 /* Give the language a chance to do something with this tree
841 before we look at it. */
842 STRIP_NOPS (t);
843 set = lang_hooks.get_alias_set (t);
844 if (set != -1)
845 return set;
847 /* Get the alias pointer-type to use or the outermost object
848 that we could have a pointer to. */
849 tree ptype = reference_alias_ptr_type_1 (&t);
850 if (ptype != NULL)
851 return get_deref_alias_set (ptype);
853 /* If we've already determined the alias set for a decl, just return
854 it. This is necessary for C++ anonymous unions, whose component
855 variables don't look like union members (boo!). */
856 if (VAR_P (t)
857 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
858 return MEM_ALIAS_SET (DECL_RTL (t));
860 /* Now all we care about is the type. */
861 t = TREE_TYPE (t);
864 /* Variant qualifiers don't affect the alias set, so get the main
865 variant. */
866 t = TYPE_MAIN_VARIANT (t);
868 if (AGGREGATE_TYPE_P (t)
869 && TYPE_TYPELESS_STORAGE (t))
870 return 0;
872 /* Always use the canonical type as well. If this is a type that
873 requires structural comparisons to identify compatible types
874 use alias set zero. */
875 if (TYPE_STRUCTURAL_EQUALITY_P (t))
877 /* Allow the language to specify another alias set for this
878 type. */
879 set = lang_hooks.get_alias_set (t);
880 if (set != -1)
881 return set;
882 /* Handle structure type equality for pointer types, arrays and vectors.
883 This is easy to do, because the code bellow ignore canonical types on
884 these anyway. This is important for LTO, where TYPE_CANONICAL for
885 pointers cannot be meaningfuly computed by the frotnend. */
886 if (canonical_type_used_p (t))
888 /* In LTO we set canonical types for all types where it makes
889 sense to do so. Double check we did not miss some type. */
890 gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t));
891 return 0;
894 else
896 t = TYPE_CANONICAL (t);
897 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
900 /* If this is a type with a known alias set, return it. */
901 gcc_checking_assert (t == TYPE_MAIN_VARIANT (t));
902 if (TYPE_ALIAS_SET_KNOWN_P (t))
903 return TYPE_ALIAS_SET (t);
905 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
906 if (!COMPLETE_TYPE_P (t))
908 /* For arrays with unknown size the conservative answer is the
909 alias set of the element type. */
910 if (TREE_CODE (t) == ARRAY_TYPE)
911 return get_alias_set (TREE_TYPE (t));
913 /* But return zero as a conservative answer for incomplete types. */
914 return 0;
917 /* See if the language has special handling for this type. */
918 set = lang_hooks.get_alias_set (t);
919 if (set != -1)
920 return set;
922 /* There are no objects of FUNCTION_TYPE, so there's no point in
923 using up an alias set for them. (There are, of course, pointers
924 and references to functions, but that's different.) */
925 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
926 set = 0;
928 /* Unless the language specifies otherwise, let vector types alias
929 their components. This avoids some nasty type punning issues in
930 normal usage. And indeed lets vectors be treated more like an
931 array slice. */
932 else if (TREE_CODE (t) == VECTOR_TYPE)
933 set = get_alias_set (TREE_TYPE (t));
935 /* Unless the language specifies otherwise, treat array types the
936 same as their components. This avoids the asymmetry we get
937 through recording the components. Consider accessing a
938 character(kind=1) through a reference to a character(kind=1)[1:1].
939 Or consider if we want to assign integer(kind=4)[0:D.1387] and
940 integer(kind=4)[4] the same alias set or not.
941 Just be pragmatic here and make sure the array and its element
942 type get the same alias set assigned. */
943 else if (TREE_CODE (t) == ARRAY_TYPE
944 && (!TYPE_NONALIASED_COMPONENT (t)
945 || TYPE_STRUCTURAL_EQUALITY_P (t)))
946 set = get_alias_set (TREE_TYPE (t));
948 /* From the former common C and C++ langhook implementation:
950 Unfortunately, there is no canonical form of a pointer type.
951 In particular, if we have `typedef int I', then `int *', and
952 `I *' are different types. So, we have to pick a canonical
953 representative. We do this below.
955 Technically, this approach is actually more conservative that
956 it needs to be. In particular, `const int *' and `int *'
957 should be in different alias sets, according to the C and C++
958 standard, since their types are not the same, and so,
959 technically, an `int **' and `const int **' cannot point at
960 the same thing.
962 But, the standard is wrong. In particular, this code is
963 legal C++:
965 int *ip;
966 int **ipp = &ip;
967 const int* const* cipp = ipp;
968 And, it doesn't make sense for that to be legal unless you
969 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
970 the pointed-to types. This issue has been reported to the
971 C++ committee.
973 For this reason go to canonical type of the unqalified pointer type.
974 Until GCC 6 this code set all pointers sets to have alias set of
975 ptr_type_node but that is a bad idea, because it prevents disabiguations
976 in between pointers. For Firefox this accounts about 20% of all
977 disambiguations in the program. */
978 else if (POINTER_TYPE_P (t) && t != ptr_type_node)
980 tree p;
981 auto_vec <bool, 8> reference;
983 /* Unnest all pointers and references.
984 We also want to make pointer to array/vector equivalent to pointer to
985 its element (see the reasoning above). Skip all those types, too. */
986 for (p = t; POINTER_TYPE_P (p)
987 || (TREE_CODE (p) == ARRAY_TYPE
988 && (!TYPE_NONALIASED_COMPONENT (p)
989 || !COMPLETE_TYPE_P (p)
990 || TYPE_STRUCTURAL_EQUALITY_P (p)))
991 || TREE_CODE (p) == VECTOR_TYPE;
992 p = TREE_TYPE (p))
994 /* Ada supports recusive pointers. Instead of doing recrusion check
995 just give up once the preallocated space of 8 elements is up.
996 In this case just punt to void * alias set. */
997 if (reference.length () == 8)
999 p = ptr_type_node;
1000 break;
1002 if (TREE_CODE (p) == REFERENCE_TYPE)
1003 /* In LTO we want languages that use references to be compatible
1004 with languages that use pointers. */
1005 reference.safe_push (true && !in_lto_p);
1006 if (TREE_CODE (p) == POINTER_TYPE)
1007 reference.safe_push (false);
1009 p = TYPE_MAIN_VARIANT (p);
1011 /* Make void * compatible with char * and also void **.
1012 Programs are commonly violating TBAA by this.
1014 We also make void * to conflict with every pointer
1015 (see record_component_aliases) and thus it is safe it to use it for
1016 pointers to types with TYPE_STRUCTURAL_EQUALITY_P. */
1017 if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p))
1018 set = get_alias_set (ptr_type_node);
1019 else
1021 /* Rebuild pointer type starting from canonical types using
1022 unqualified pointers and references only. This way all such
1023 pointers will have the same alias set and will conflict with
1024 each other.
1026 Most of time we already have pointers or references of a given type.
1027 If not we build new one just to be sure that if someone later
1028 (probably only middle-end can, as we should assign all alias
1029 classes only after finishing translation unit) builds the pointer
1030 type, the canonical type will match. */
1031 p = TYPE_CANONICAL (p);
1032 while (!reference.is_empty ())
1034 if (reference.pop ())
1035 p = build_reference_type (p);
1036 else
1037 p = build_pointer_type (p);
1038 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1039 /* build_pointer_type should always return the canonical type.
1040 For LTO TYPE_CANOINCAL may be NULL, because we do not compute
1041 them. Be sure that frontends do not glob canonical types of
1042 pointers in unexpected way and that p == TYPE_CANONICAL (p)
1043 in all other cases. */
1044 gcc_checking_assert (!TYPE_CANONICAL (p)
1045 || p == TYPE_CANONICAL (p));
1048 /* Assign the alias set to both p and t.
1049 We cannot call get_alias_set (p) here as that would trigger
1050 infinite recursion when p == t. In other cases it would just
1051 trigger unnecesary legwork of rebuilding the pointer again. */
1052 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1053 if (TYPE_ALIAS_SET_KNOWN_P (p))
1054 set = TYPE_ALIAS_SET (p);
1055 else
1057 set = new_alias_set ();
1058 TYPE_ALIAS_SET (p) = set;
1062 /* Alias set of ptr_type_node is special and serve as universal pointer which
1063 is TBAA compatible with every other pointer type. Be sure we have the
1064 alias set built even for LTO which otherwise keeps all TYPE_CANONICAL
1065 of pointer types NULL. */
1066 else if (t == ptr_type_node)
1067 set = new_alias_set ();
1069 /* Otherwise make a new alias set for this type. */
1070 else
1072 /* Each canonical type gets its own alias set, so canonical types
1073 shouldn't form a tree. It doesn't really matter for types
1074 we handle specially above, so only check it where it possibly
1075 would result in a bogus alias set. */
1076 gcc_checking_assert (TYPE_CANONICAL (t) == t);
1078 set = new_alias_set ();
1081 TYPE_ALIAS_SET (t) = set;
1083 /* If this is an aggregate type or a complex type, we must record any
1084 component aliasing information. */
1085 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
1086 record_component_aliases (t);
1088 /* We treat pointer types specially in alias_set_subset_of. */
1089 if (POINTER_TYPE_P (t) && set)
1091 alias_set_entry *ase = get_alias_set_entry (set);
1092 if (!ase)
1093 ase = init_alias_set_entry (set);
1094 ase->is_pointer = true;
1095 ase->has_pointer = true;
1098 return set;
1101 /* Return a brand-new alias set. */
1103 alias_set_type
1104 new_alias_set (void)
1106 if (alias_sets == 0)
1107 vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1108 vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1109 return alias_sets->length () - 1;
1112 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
1113 not everything that aliases SUPERSET also aliases SUBSET. For example,
1114 in C, a store to an `int' can alias a load of a structure containing an
1115 `int', and vice versa. But it can't alias a load of a 'double' member
1116 of the same structure. Here, the structure would be the SUPERSET and
1117 `int' the SUBSET. This relationship is also described in the comment at
1118 the beginning of this file.
1120 This function should be called only once per SUPERSET/SUBSET pair.
1122 It is illegal for SUPERSET to be zero; everything is implicitly a
1123 subset of alias set zero. */
1125 void
1126 record_alias_subset (alias_set_type superset, alias_set_type subset)
1128 alias_set_entry *superset_entry;
1129 alias_set_entry *subset_entry;
1131 /* It is possible in complex type situations for both sets to be the same,
1132 in which case we can ignore this operation. */
1133 if (superset == subset)
1134 return;
1136 gcc_assert (superset);
1138 superset_entry = get_alias_set_entry (superset);
1139 if (superset_entry == 0)
1141 /* Create an entry for the SUPERSET, so that we have a place to
1142 attach the SUBSET. */
1143 superset_entry = init_alias_set_entry (superset);
1146 if (subset == 0)
1147 superset_entry->has_zero_child = 1;
1148 else
1150 subset_entry = get_alias_set_entry (subset);
1151 if (!superset_entry->children)
1152 superset_entry->children
1153 = hash_map<alias_set_hash, int>::create_ggc (64);
1154 /* If there is an entry for the subset, enter all of its children
1155 (if they are not already present) as children of the SUPERSET. */
1156 if (subset_entry)
1158 if (subset_entry->has_zero_child)
1159 superset_entry->has_zero_child = true;
1160 if (subset_entry->has_pointer)
1161 superset_entry->has_pointer = true;
1163 if (subset_entry->children)
1165 hash_map<alias_set_hash, int>::iterator iter
1166 = subset_entry->children->begin ();
1167 for (; iter != subset_entry->children->end (); ++iter)
1168 superset_entry->children->put ((*iter).first, (*iter).second);
1172 /* Enter the SUBSET itself as a child of the SUPERSET. */
1173 superset_entry->children->put (subset, 0);
1177 /* Record that component types of TYPE, if any, are part of that type for
1178 aliasing purposes. For record types, we only record component types
1179 for fields that are not marked non-addressable. For array types, we
1180 only record the component type if it is not marked non-aliased. */
1182 void
1183 record_component_aliases (tree type)
1185 alias_set_type superset = get_alias_set (type);
1186 tree field;
1188 if (superset == 0)
1189 return;
1191 switch (TREE_CODE (type))
1193 case RECORD_TYPE:
1194 case UNION_TYPE:
1195 case QUAL_UNION_TYPE:
1196 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1197 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1199 /* LTO type merging does not make any difference between
1200 component pointer types. We may have
1202 struct foo {int *a;};
1204 as TYPE_CANONICAL of
1206 struct bar {float *a;};
1208 Because accesses to int * and float * do not alias, we would get
1209 false negative when accessing the same memory location by
1210 float ** and bar *. We thus record the canonical type as:
1212 struct {void *a;};
1214 void * is special cased and works as a universal pointer type.
1215 Accesses to it conflicts with accesses to any other pointer
1216 type. */
1217 tree t = TREE_TYPE (field);
1218 if (in_lto_p)
1220 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1221 element type and that type has to be normalized to void *,
1222 too, in the case it is a pointer. */
1223 while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
1225 gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
1226 t = TREE_TYPE (t);
1228 if (POINTER_TYPE_P (t))
1229 t = ptr_type_node;
1230 else if (flag_checking)
1231 gcc_checking_assert (get_alias_set (t)
1232 == get_alias_set (TREE_TYPE (field)));
1235 record_alias_subset (superset, get_alias_set (t));
1237 break;
1239 case COMPLEX_TYPE:
1240 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1241 break;
1243 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1244 element type. */
1246 default:
1247 break;
1251 /* Allocate an alias set for use in storing and reading from the varargs
1252 spill area. */
1254 static GTY(()) alias_set_type varargs_set = -1;
1256 alias_set_type
1257 get_varargs_alias_set (void)
1259 #if 1
1260 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1261 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1262 consistently use the varargs alias set for loads from the varargs
1263 area. So don't use it anywhere. */
1264 return 0;
1265 #else
1266 if (varargs_set == -1)
1267 varargs_set = new_alias_set ();
1269 return varargs_set;
1270 #endif
1273 /* Likewise, but used for the fixed portions of the frame, e.g., register
1274 save areas. */
1276 static GTY(()) alias_set_type frame_set = -1;
1278 alias_set_type
1279 get_frame_alias_set (void)
1281 if (frame_set == -1)
1282 frame_set = new_alias_set ();
1284 return frame_set;
1287 /* Create a new, unique base with id ID. */
1289 static rtx
1290 unique_base_value (HOST_WIDE_INT id)
1292 return gen_rtx_ADDRESS (Pmode, id);
1295 /* Return true if accesses based on any other base value cannot alias
1296 those based on X. */
1298 static bool
1299 unique_base_value_p (rtx x)
1301 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1304 /* Return true if X is known to be a base value. */
1306 static bool
1307 known_base_value_p (rtx x)
1309 switch (GET_CODE (x))
1311 case LABEL_REF:
1312 case SYMBOL_REF:
1313 return true;
1315 case ADDRESS:
1316 /* Arguments may or may not be bases; we don't know for sure. */
1317 return GET_MODE (x) != VOIDmode;
1319 default:
1320 return false;
1324 /* Inside SRC, the source of a SET, find a base address. */
1326 static rtx
1327 find_base_value (rtx src)
1329 unsigned int regno;
1330 scalar_int_mode int_mode;
1332 #if defined (FIND_BASE_TERM)
1333 /* Try machine-dependent ways to find the base term. */
1334 src = FIND_BASE_TERM (src);
1335 #endif
1337 switch (GET_CODE (src))
1339 case SYMBOL_REF:
1340 case LABEL_REF:
1341 return src;
1343 case REG:
1344 regno = REGNO (src);
1345 /* At the start of a function, argument registers have known base
1346 values which may be lost later. Returning an ADDRESS
1347 expression here allows optimization based on argument values
1348 even when the argument registers are used for other purposes. */
1349 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1350 return new_reg_base_value[regno];
1352 /* If a pseudo has a known base value, return it. Do not do this
1353 for non-fixed hard regs since it can result in a circular
1354 dependency chain for registers which have values at function entry.
1356 The test above is not sufficient because the scheduler may move
1357 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1358 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1359 && regno < vec_safe_length (reg_base_value))
1361 /* If we're inside init_alias_analysis, use new_reg_base_value
1362 to reduce the number of relaxation iterations. */
1363 if (new_reg_base_value && new_reg_base_value[regno]
1364 && DF_REG_DEF_COUNT (regno) == 1)
1365 return new_reg_base_value[regno];
1367 if ((*reg_base_value)[regno])
1368 return (*reg_base_value)[regno];
1371 return 0;
1373 case MEM:
1374 /* Check for an argument passed in memory. Only record in the
1375 copying-arguments block; it is too hard to track changes
1376 otherwise. */
1377 if (copying_arguments
1378 && (XEXP (src, 0) == arg_pointer_rtx
1379 || (GET_CODE (XEXP (src, 0)) == PLUS
1380 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1381 return arg_base_value;
1382 return 0;
1384 case CONST:
1385 src = XEXP (src, 0);
1386 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1387 break;
1389 /* fall through */
1391 case PLUS:
1392 case MINUS:
1394 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1396 /* If either operand is a REG that is a known pointer, then it
1397 is the base. */
1398 if (REG_P (src_0) && REG_POINTER (src_0))
1399 return find_base_value (src_0);
1400 if (REG_P (src_1) && REG_POINTER (src_1))
1401 return find_base_value (src_1);
1403 /* If either operand is a REG, then see if we already have
1404 a known value for it. */
1405 if (REG_P (src_0))
1407 temp = find_base_value (src_0);
1408 if (temp != 0)
1409 src_0 = temp;
1412 if (REG_P (src_1))
1414 temp = find_base_value (src_1);
1415 if (temp!= 0)
1416 src_1 = temp;
1419 /* If either base is named object or a special address
1420 (like an argument or stack reference), then use it for the
1421 base term. */
1422 if (src_0 != 0 && known_base_value_p (src_0))
1423 return src_0;
1425 if (src_1 != 0 && known_base_value_p (src_1))
1426 return src_1;
1428 /* Guess which operand is the base address:
1429 If either operand is a symbol, then it is the base. If
1430 either operand is a CONST_INT, then the other is the base. */
1431 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1432 return find_base_value (src_0);
1433 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1434 return find_base_value (src_1);
1436 return 0;
1439 case LO_SUM:
1440 /* The standard form is (lo_sum reg sym) so look only at the
1441 second operand. */
1442 return find_base_value (XEXP (src, 1));
1444 case AND:
1445 /* If the second operand is constant set the base
1446 address to the first operand. */
1447 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1448 return find_base_value (XEXP (src, 0));
1449 return 0;
1451 case TRUNCATE:
1452 /* As we do not know which address space the pointer is referring to, we can
1453 handle this only if the target does not support different pointer or
1454 address modes depending on the address space. */
1455 if (!target_default_pointer_address_modes_p ())
1456 break;
1457 if (!is_a <scalar_int_mode> (GET_MODE (src), &int_mode)
1458 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode))
1459 break;
1460 /* Fall through. */
1461 case HIGH:
1462 case PRE_INC:
1463 case PRE_DEC:
1464 case POST_INC:
1465 case POST_DEC:
1466 case PRE_MODIFY:
1467 case POST_MODIFY:
1468 return find_base_value (XEXP (src, 0));
1470 case ZERO_EXTEND:
1471 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1472 /* As we do not know which address space the pointer is referring to, we can
1473 handle this only if the target does not support different pointer or
1474 address modes depending on the address space. */
1475 if (!target_default_pointer_address_modes_p ())
1476 break;
1479 rtx temp = find_base_value (XEXP (src, 0));
1481 if (temp != 0 && CONSTANT_P (temp))
1482 temp = convert_memory_address (Pmode, temp);
1484 return temp;
1487 default:
1488 break;
1491 return 0;
1494 /* Called from init_alias_analysis indirectly through note_stores,
1495 or directly if DEST is a register with a REG_NOALIAS note attached.
1496 SET is null in the latter case. */
1498 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1499 register N has been set in this function. */
1500 static sbitmap reg_seen;
1502 static void
1503 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1505 unsigned regno;
1506 rtx src;
1507 int n;
1509 if (!REG_P (dest))
1510 return;
1512 regno = REGNO (dest);
1514 gcc_checking_assert (regno < reg_base_value->length ());
1516 n = REG_NREGS (dest);
1517 if (n != 1)
1519 while (--n >= 0)
1521 bitmap_set_bit (reg_seen, regno + n);
1522 new_reg_base_value[regno + n] = 0;
1524 return;
1527 if (set)
1529 /* A CLOBBER wipes out any old value but does not prevent a previously
1530 unset register from acquiring a base address (i.e. reg_seen is not
1531 set). */
1532 if (GET_CODE (set) == CLOBBER)
1534 new_reg_base_value[regno] = 0;
1535 return;
1537 /* A CLOBBER_HIGH only wipes out the old value if the mode of the old
1538 value is greater than that of the clobber. */
1539 else if (GET_CODE (set) == CLOBBER_HIGH)
1541 if (new_reg_base_value[regno] != 0
1542 && reg_is_clobbered_by_clobber_high (
1543 regno, GET_MODE (new_reg_base_value[regno]), XEXP (set, 0)))
1544 new_reg_base_value[regno] = 0;
1545 return;
1548 src = SET_SRC (set);
1550 else
1552 /* There's a REG_NOALIAS note against DEST. */
1553 if (bitmap_bit_p (reg_seen, regno))
1555 new_reg_base_value[regno] = 0;
1556 return;
1558 bitmap_set_bit (reg_seen, regno);
1559 new_reg_base_value[regno] = unique_base_value (unique_id++);
1560 return;
1563 /* If this is not the first set of REGNO, see whether the new value
1564 is related to the old one. There are two cases of interest:
1566 (1) The register might be assigned an entirely new value
1567 that has the same base term as the original set.
1569 (2) The set might be a simple self-modification that
1570 cannot change REGNO's base value.
1572 If neither case holds, reject the original base value as invalid.
1573 Note that the following situation is not detected:
1575 extern int x, y; int *p = &x; p += (&y-&x);
1577 ANSI C does not allow computing the difference of addresses
1578 of distinct top level objects. */
1579 if (new_reg_base_value[regno] != 0
1580 && find_base_value (src) != new_reg_base_value[regno])
1581 switch (GET_CODE (src))
1583 case LO_SUM:
1584 case MINUS:
1585 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1586 new_reg_base_value[regno] = 0;
1587 break;
1588 case PLUS:
1589 /* If the value we add in the PLUS is also a valid base value,
1590 this might be the actual base value, and the original value
1591 an index. */
1593 rtx other = NULL_RTX;
1595 if (XEXP (src, 0) == dest)
1596 other = XEXP (src, 1);
1597 else if (XEXP (src, 1) == dest)
1598 other = XEXP (src, 0);
1600 if (! other || find_base_value (other))
1601 new_reg_base_value[regno] = 0;
1602 break;
1604 case AND:
1605 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1606 new_reg_base_value[regno] = 0;
1607 break;
1608 default:
1609 new_reg_base_value[regno] = 0;
1610 break;
1612 /* If this is the first set of a register, record the value. */
1613 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1614 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1615 new_reg_base_value[regno] = find_base_value (src);
1617 bitmap_set_bit (reg_seen, regno);
1620 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1621 using hard registers with non-null REG_BASE_VALUE for renaming. */
1623 get_reg_base_value (unsigned int regno)
1625 return (*reg_base_value)[regno];
1628 /* If a value is known for REGNO, return it. */
1631 get_reg_known_value (unsigned int regno)
1633 if (regno >= FIRST_PSEUDO_REGISTER)
1635 regno -= FIRST_PSEUDO_REGISTER;
1636 if (regno < vec_safe_length (reg_known_value))
1637 return (*reg_known_value)[regno];
1639 return NULL;
1642 /* Set it. */
1644 static void
1645 set_reg_known_value (unsigned int regno, rtx val)
1647 if (regno >= FIRST_PSEUDO_REGISTER)
1649 regno -= FIRST_PSEUDO_REGISTER;
1650 if (regno < vec_safe_length (reg_known_value))
1651 (*reg_known_value)[regno] = val;
1655 /* Similarly for reg_known_equiv_p. */
1657 bool
1658 get_reg_known_equiv_p (unsigned int regno)
1660 if (regno >= FIRST_PSEUDO_REGISTER)
1662 regno -= FIRST_PSEUDO_REGISTER;
1663 if (regno < vec_safe_length (reg_known_value))
1664 return bitmap_bit_p (reg_known_equiv_p, regno);
1666 return false;
1669 static void
1670 set_reg_known_equiv_p (unsigned int regno, bool val)
1672 if (regno >= FIRST_PSEUDO_REGISTER)
1674 regno -= FIRST_PSEUDO_REGISTER;
1675 if (regno < vec_safe_length (reg_known_value))
1677 if (val)
1678 bitmap_set_bit (reg_known_equiv_p, regno);
1679 else
1680 bitmap_clear_bit (reg_known_equiv_p, regno);
1686 /* Returns a canonical version of X, from the point of view alias
1687 analysis. (For example, if X is a MEM whose address is a register,
1688 and the register has a known value (say a SYMBOL_REF), then a MEM
1689 whose address is the SYMBOL_REF is returned.) */
1692 canon_rtx (rtx x)
1694 /* Recursively look for equivalences. */
1695 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1697 rtx t = get_reg_known_value (REGNO (x));
1698 if (t == x)
1699 return x;
1700 if (t)
1701 return canon_rtx (t);
1704 if (GET_CODE (x) == PLUS)
1706 rtx x0 = canon_rtx (XEXP (x, 0));
1707 rtx x1 = canon_rtx (XEXP (x, 1));
1709 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1710 return simplify_gen_binary (PLUS, GET_MODE (x), x0, x1);
1713 /* This gives us much better alias analysis when called from
1714 the loop optimizer. Note we want to leave the original
1715 MEM alone, but need to return the canonicalized MEM with
1716 all the flags with their original values. */
1717 else if (MEM_P (x))
1718 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1720 return x;
1723 /* Return 1 if X and Y are identical-looking rtx's.
1724 Expect that X and Y has been already canonicalized.
1726 We use the data in reg_known_value above to see if two registers with
1727 different numbers are, in fact, equivalent. */
1729 static int
1730 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1732 int i;
1733 int j;
1734 enum rtx_code code;
1735 const char *fmt;
1737 if (x == 0 && y == 0)
1738 return 1;
1739 if (x == 0 || y == 0)
1740 return 0;
1742 if (x == y)
1743 return 1;
1745 code = GET_CODE (x);
1746 /* Rtx's of different codes cannot be equal. */
1747 if (code != GET_CODE (y))
1748 return 0;
1750 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1751 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1753 if (GET_MODE (x) != GET_MODE (y))
1754 return 0;
1756 /* Some RTL can be compared without a recursive examination. */
1757 switch (code)
1759 case REG:
1760 return REGNO (x) == REGNO (y);
1762 case LABEL_REF:
1763 return label_ref_label (x) == label_ref_label (y);
1765 case SYMBOL_REF:
1766 return compare_base_symbol_refs (x, y) == 1;
1768 case ENTRY_VALUE:
1769 /* This is magic, don't go through canonicalization et al. */
1770 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1772 case VALUE:
1773 CASE_CONST_UNIQUE:
1774 /* Pointer equality guarantees equality for these nodes. */
1775 return 0;
1777 default:
1778 break;
1781 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1782 if (code == PLUS)
1783 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1784 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1785 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1786 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1787 /* For commutative operations, the RTX match if the operand match in any
1788 order. Also handle the simple binary and unary cases without a loop. */
1789 if (COMMUTATIVE_P (x))
1791 rtx xop0 = canon_rtx (XEXP (x, 0));
1792 rtx yop0 = canon_rtx (XEXP (y, 0));
1793 rtx yop1 = canon_rtx (XEXP (y, 1));
1795 return ((rtx_equal_for_memref_p (xop0, yop0)
1796 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1797 || (rtx_equal_for_memref_p (xop0, yop1)
1798 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1800 else if (NON_COMMUTATIVE_P (x))
1802 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1803 canon_rtx (XEXP (y, 0)))
1804 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1805 canon_rtx (XEXP (y, 1))));
1807 else if (UNARY_P (x))
1808 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1809 canon_rtx (XEXP (y, 0)));
1811 /* Compare the elements. If any pair of corresponding elements
1812 fail to match, return 0 for the whole things.
1814 Limit cases to types which actually appear in addresses. */
1816 fmt = GET_RTX_FORMAT (code);
1817 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1819 switch (fmt[i])
1821 case 'i':
1822 if (XINT (x, i) != XINT (y, i))
1823 return 0;
1824 break;
1826 case 'p':
1827 if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
1828 return 0;
1829 break;
1831 case 'E':
1832 /* Two vectors must have the same length. */
1833 if (XVECLEN (x, i) != XVECLEN (y, i))
1834 return 0;
1836 /* And the corresponding elements must match. */
1837 for (j = 0; j < XVECLEN (x, i); j++)
1838 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1839 canon_rtx (XVECEXP (y, i, j))) == 0)
1840 return 0;
1841 break;
1843 case 'e':
1844 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1845 canon_rtx (XEXP (y, i))) == 0)
1846 return 0;
1847 break;
1849 /* This can happen for asm operands. */
1850 case 's':
1851 if (strcmp (XSTR (x, i), XSTR (y, i)))
1852 return 0;
1853 break;
1855 /* This can happen for an asm which clobbers memory. */
1856 case '0':
1857 break;
1859 /* It is believed that rtx's at this level will never
1860 contain anything but integers and other rtx's,
1861 except for within LABEL_REFs and SYMBOL_REFs. */
1862 default:
1863 gcc_unreachable ();
1866 return 1;
1869 static rtx
1870 find_base_term (rtx x, vec<std::pair<cselib_val *,
1871 struct elt_loc_list *> > &visited_vals)
1873 cselib_val *val;
1874 struct elt_loc_list *l, *f;
1875 rtx ret;
1876 scalar_int_mode int_mode;
1878 #if defined (FIND_BASE_TERM)
1879 /* Try machine-dependent ways to find the base term. */
1880 x = FIND_BASE_TERM (x);
1881 #endif
1883 switch (GET_CODE (x))
1885 case REG:
1886 return REG_BASE_VALUE (x);
1888 case TRUNCATE:
1889 /* As we do not know which address space the pointer is referring to, we can
1890 handle this only if the target does not support different pointer or
1891 address modes depending on the address space. */
1892 if (!target_default_pointer_address_modes_p ())
1893 return 0;
1894 if (!is_a <scalar_int_mode> (GET_MODE (x), &int_mode)
1895 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode))
1896 return 0;
1897 /* Fall through. */
1898 case HIGH:
1899 case PRE_INC:
1900 case PRE_DEC:
1901 case POST_INC:
1902 case POST_DEC:
1903 case PRE_MODIFY:
1904 case POST_MODIFY:
1905 return find_base_term (XEXP (x, 0), visited_vals);
1907 case ZERO_EXTEND:
1908 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1909 /* As we do not know which address space the pointer is referring to, we can
1910 handle this only if the target does not support different pointer or
1911 address modes depending on the address space. */
1912 if (!target_default_pointer_address_modes_p ())
1913 return 0;
1916 rtx temp = find_base_term (XEXP (x, 0), visited_vals);
1918 if (temp != 0 && CONSTANT_P (temp))
1919 temp = convert_memory_address (Pmode, temp);
1921 return temp;
1924 case VALUE:
1925 val = CSELIB_VAL_PTR (x);
1926 ret = NULL_RTX;
1928 if (!val)
1929 return ret;
1931 if (cselib_sp_based_value_p (val))
1932 return static_reg_base_value[STACK_POINTER_REGNUM];
1934 f = val->locs;
1935 /* Reset val->locs to avoid infinite recursion. */
1936 if (f)
1937 visited_vals.safe_push (std::make_pair (val, f));
1938 val->locs = NULL;
1940 for (l = f; l; l = l->next)
1941 if (GET_CODE (l->loc) == VALUE
1942 && CSELIB_VAL_PTR (l->loc)->locs
1943 && !CSELIB_VAL_PTR (l->loc)->locs->next
1944 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1945 continue;
1946 else if ((ret = find_base_term (l->loc, visited_vals)) != 0)
1947 break;
1949 return ret;
1951 case LO_SUM:
1952 /* The standard form is (lo_sum reg sym) so look only at the
1953 second operand. */
1954 return find_base_term (XEXP (x, 1), visited_vals);
1956 case CONST:
1957 x = XEXP (x, 0);
1958 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1959 return 0;
1960 /* Fall through. */
1961 case PLUS:
1962 case MINUS:
1964 rtx tmp1 = XEXP (x, 0);
1965 rtx tmp2 = XEXP (x, 1);
1967 /* This is a little bit tricky since we have to determine which of
1968 the two operands represents the real base address. Otherwise this
1969 routine may return the index register instead of the base register.
1971 That may cause us to believe no aliasing was possible, when in
1972 fact aliasing is possible.
1974 We use a few simple tests to guess the base register. Additional
1975 tests can certainly be added. For example, if one of the operands
1976 is a shift or multiply, then it must be the index register and the
1977 other operand is the base register. */
1979 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1980 return find_base_term (tmp2, visited_vals);
1982 /* If either operand is known to be a pointer, then prefer it
1983 to determine the base term. */
1984 if (REG_P (tmp1) && REG_POINTER (tmp1))
1986 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1987 std::swap (tmp1, tmp2);
1988 /* If second argument is constant which has base term, prefer it
1989 over variable tmp1. See PR64025. */
1990 else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
1991 std::swap (tmp1, tmp2);
1993 /* Go ahead and find the base term for both operands. If either base
1994 term is from a pointer or is a named object or a special address
1995 (like an argument or stack reference), then use it for the
1996 base term. */
1997 rtx base = find_base_term (tmp1, visited_vals);
1998 if (base != NULL_RTX
1999 && ((REG_P (tmp1) && REG_POINTER (tmp1))
2000 || known_base_value_p (base)))
2001 return base;
2002 base = find_base_term (tmp2, visited_vals);
2003 if (base != NULL_RTX
2004 && ((REG_P (tmp2) && REG_POINTER (tmp2))
2005 || known_base_value_p (base)))
2006 return base;
2008 /* We could not determine which of the two operands was the
2009 base register and which was the index. So we can determine
2010 nothing from the base alias check. */
2011 return 0;
2014 case AND:
2015 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
2016 return find_base_term (XEXP (x, 0), visited_vals);
2017 return 0;
2019 case SYMBOL_REF:
2020 case LABEL_REF:
2021 return x;
2023 default:
2024 return 0;
2028 /* Wrapper around the worker above which removes locs from visited VALUEs
2029 to avoid visiting them multiple times. We unwind that changes here. */
2031 static rtx
2032 find_base_term (rtx x)
2034 auto_vec<std::pair<cselib_val *, struct elt_loc_list *>, 32> visited_vals;
2035 rtx res = find_base_term (x, visited_vals);
2036 for (unsigned i = 0; i < visited_vals.length (); ++i)
2037 visited_vals[i].first->locs = visited_vals[i].second;
2038 return res;
2041 /* Return true if accesses to address X may alias accesses based
2042 on the stack pointer. */
2044 bool
2045 may_be_sp_based_p (rtx x)
2047 rtx base = find_base_term (x);
2048 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
2051 /* BASE1 and BASE2 are decls. Return 1 if they refer to same object, 0
2052 if they refer to different objects and -1 if we cannot decide. */
2055 compare_base_decls (tree base1, tree base2)
2057 int ret;
2058 gcc_checking_assert (DECL_P (base1) && DECL_P (base2));
2059 if (base1 == base2)
2060 return 1;
2062 /* If we have two register decls with register specification we
2063 cannot decide unless their assembler names are the same. */
2064 if (DECL_REGISTER (base1)
2065 && DECL_REGISTER (base2)
2066 && HAS_DECL_ASSEMBLER_NAME_P (base1)
2067 && HAS_DECL_ASSEMBLER_NAME_P (base2)
2068 && DECL_ASSEMBLER_NAME_SET_P (base1)
2069 && DECL_ASSEMBLER_NAME_SET_P (base2))
2071 if (DECL_ASSEMBLER_NAME_RAW (base1) == DECL_ASSEMBLER_NAME_RAW (base2))
2072 return 1;
2073 return -1;
2076 /* Declarations of non-automatic variables may have aliases. All other
2077 decls are unique. */
2078 if (!decl_in_symtab_p (base1)
2079 || !decl_in_symtab_p (base2))
2080 return 0;
2082 /* Don't cause symbols to be inserted by the act of checking. */
2083 symtab_node *node1 = symtab_node::get (base1);
2084 if (!node1)
2085 return 0;
2086 symtab_node *node2 = symtab_node::get (base2);
2087 if (!node2)
2088 return 0;
2090 ret = node1->equal_address_to (node2, true);
2091 return ret;
2094 /* Same as compare_base_decls but for SYMBOL_REF. */
2096 static int
2097 compare_base_symbol_refs (const_rtx x_base, const_rtx y_base)
2099 tree x_decl = SYMBOL_REF_DECL (x_base);
2100 tree y_decl = SYMBOL_REF_DECL (y_base);
2101 bool binds_def = true;
2103 if (XSTR (x_base, 0) == XSTR (y_base, 0))
2104 return 1;
2105 if (x_decl && y_decl)
2106 return compare_base_decls (x_decl, y_decl);
2107 if (x_decl || y_decl)
2109 if (!x_decl)
2111 std::swap (x_decl, y_decl);
2112 std::swap (x_base, y_base);
2114 /* We handle specially only section anchors and assume that other
2115 labels may overlap with user variables in an arbitrary way. */
2116 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2117 return -1;
2118 /* Anchors contains static VAR_DECLs and CONST_DECLs. We are safe
2119 to ignore CONST_DECLs because they are readonly. */
2120 if (!VAR_P (x_decl)
2121 || (!TREE_STATIC (x_decl) && !TREE_PUBLIC (x_decl)))
2122 return 0;
2124 symtab_node *x_node = symtab_node::get_create (x_decl)
2125 ->ultimate_alias_target ();
2126 /* External variable cannot be in section anchor. */
2127 if (!x_node->definition)
2128 return 0;
2129 x_base = XEXP (DECL_RTL (x_node->decl), 0);
2130 /* If not in anchor, we can disambiguate. */
2131 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (x_base))
2132 return 0;
2134 /* We have an alias of anchored variable. If it can be interposed;
2135 we must assume it may or may not alias its anchor. */
2136 binds_def = decl_binds_to_current_def_p (x_decl);
2138 /* If we have variable in section anchor, we can compare by offset. */
2139 if (SYMBOL_REF_HAS_BLOCK_INFO_P (x_base)
2140 && SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2142 if (SYMBOL_REF_BLOCK (x_base) != SYMBOL_REF_BLOCK (y_base))
2143 return 0;
2144 if (SYMBOL_REF_BLOCK_OFFSET (x_base) == SYMBOL_REF_BLOCK_OFFSET (y_base))
2145 return binds_def ? 1 : -1;
2146 if (SYMBOL_REF_ANCHOR_P (x_base) != SYMBOL_REF_ANCHOR_P (y_base))
2147 return -1;
2148 return 0;
2150 /* In general we assume that memory locations pointed to by different labels
2151 may overlap in undefined ways. */
2152 return -1;
2155 /* Return 0 if the addresses X and Y are known to point to different
2156 objects, 1 if they might be pointers to the same object. */
2158 static int
2159 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
2160 machine_mode x_mode, machine_mode y_mode)
2162 /* If the address itself has no known base see if a known equivalent
2163 value has one. If either address still has no known base, nothing
2164 is known about aliasing. */
2165 if (x_base == 0)
2167 rtx x_c;
2169 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
2170 return 1;
2172 x_base = find_base_term (x_c);
2173 if (x_base == 0)
2174 return 1;
2177 if (y_base == 0)
2179 rtx y_c;
2180 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
2181 return 1;
2183 y_base = find_base_term (y_c);
2184 if (y_base == 0)
2185 return 1;
2188 /* If the base addresses are equal nothing is known about aliasing. */
2189 if (rtx_equal_p (x_base, y_base))
2190 return 1;
2192 /* The base addresses are different expressions. If they are not accessed
2193 via AND, there is no conflict. We can bring knowledge of object
2194 alignment into play here. For example, on alpha, "char a, b;" can
2195 alias one another, though "char a; long b;" cannot. AND addresses may
2196 implicitly alias surrounding objects; i.e. unaligned access in DImode
2197 via AND address can alias all surrounding object types except those
2198 with aligment 8 or higher. */
2199 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
2200 return 1;
2201 if (GET_CODE (x) == AND
2202 && (!CONST_INT_P (XEXP (x, 1))
2203 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
2204 return 1;
2205 if (GET_CODE (y) == AND
2206 && (!CONST_INT_P (XEXP (y, 1))
2207 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
2208 return 1;
2210 /* Differing symbols not accessed via AND never alias. */
2211 if (GET_CODE (x_base) == SYMBOL_REF && GET_CODE (y_base) == SYMBOL_REF)
2212 return compare_base_symbol_refs (x_base, y_base) != 0;
2214 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
2215 return 0;
2217 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
2218 return 0;
2220 return 1;
2223 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
2224 (or equal to) that of V. */
2226 static bool
2227 refs_newer_value_p (const_rtx expr, rtx v)
2229 int minuid = CSELIB_VAL_PTR (v)->uid;
2230 subrtx_iterator::array_type array;
2231 FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
2232 if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid)
2233 return true;
2234 return false;
2237 /* Convert the address X into something we can use. This is done by returning
2238 it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE
2239 we call cselib to get a more useful rtx. */
2242 get_addr (rtx x)
2244 cselib_val *v;
2245 struct elt_loc_list *l;
2247 if (GET_CODE (x) != VALUE)
2249 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2250 && GET_CODE (XEXP (x, 0)) == VALUE
2251 && CONST_SCALAR_INT_P (XEXP (x, 1)))
2253 rtx op0 = get_addr (XEXP (x, 0));
2254 if (op0 != XEXP (x, 0))
2256 poly_int64 c;
2257 if (GET_CODE (x) == PLUS
2258 && poly_int_rtx_p (XEXP (x, 1), &c))
2259 return plus_constant (GET_MODE (x), op0, c);
2260 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
2261 op0, XEXP (x, 1));
2264 return x;
2266 v = CSELIB_VAL_PTR (x);
2267 if (v)
2269 bool have_equivs = cselib_have_permanent_equivalences ();
2270 if (have_equivs)
2271 v = canonical_cselib_val (v);
2272 for (l = v->locs; l; l = l->next)
2273 if (CONSTANT_P (l->loc))
2274 return l->loc;
2275 for (l = v->locs; l; l = l->next)
2276 if (!REG_P (l->loc) && !MEM_P (l->loc)
2277 /* Avoid infinite recursion when potentially dealing with
2278 var-tracking artificial equivalences, by skipping the
2279 equivalences themselves, and not choosing expressions
2280 that refer to newer VALUEs. */
2281 && (!have_equivs
2282 || (GET_CODE (l->loc) != VALUE
2283 && !refs_newer_value_p (l->loc, x))))
2284 return l->loc;
2285 if (have_equivs)
2287 for (l = v->locs; l; l = l->next)
2288 if (REG_P (l->loc)
2289 || (GET_CODE (l->loc) != VALUE
2290 && !refs_newer_value_p (l->loc, x)))
2291 return l->loc;
2292 /* Return the canonical value. */
2293 return v->val_rtx;
2295 if (v->locs)
2296 return v->locs->loc;
2298 return x;
2301 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
2302 where SIZE is the size in bytes of the memory reference. If ADDR
2303 is not modified by the memory reference then ADDR is returned. */
2305 static rtx
2306 addr_side_effect_eval (rtx addr, poly_int64 size, int n_refs)
2308 poly_int64 offset = 0;
2310 switch (GET_CODE (addr))
2312 case PRE_INC:
2313 offset = (n_refs + 1) * size;
2314 break;
2315 case PRE_DEC:
2316 offset = -(n_refs + 1) * size;
2317 break;
2318 case POST_INC:
2319 offset = n_refs * size;
2320 break;
2321 case POST_DEC:
2322 offset = -n_refs * size;
2323 break;
2325 default:
2326 return addr;
2329 addr = plus_constant (GET_MODE (addr), XEXP (addr, 0), offset);
2330 addr = canon_rtx (addr);
2332 return addr;
2335 /* Return TRUE if an object X sized at XSIZE bytes and another object
2336 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
2337 any of the sizes is zero, assume an overlap, otherwise use the
2338 absolute value of the sizes as the actual sizes. */
2340 static inline bool
2341 offset_overlap_p (poly_int64 c, poly_int64 xsize, poly_int64 ysize)
2343 if (known_eq (xsize, 0) || known_eq (ysize, 0))
2344 return true;
2346 if (maybe_ge (c, 0))
2347 return maybe_gt (maybe_lt (xsize, 0) ? -xsize : xsize, c);
2348 else
2349 return maybe_gt (maybe_lt (ysize, 0) ? -ysize : ysize, -c);
2352 /* Return one if X and Y (memory addresses) reference the
2353 same location in memory or if the references overlap.
2354 Return zero if they do not overlap, else return
2355 minus one in which case they still might reference the same location.
2357 C is an offset accumulator. When
2358 C is nonzero, we are testing aliases between X and Y + C.
2359 XSIZE is the size in bytes of the X reference,
2360 similarly YSIZE is the size in bytes for Y.
2361 Expect that canon_rtx has been already called for X and Y.
2363 If XSIZE or YSIZE is zero, we do not know the amount of memory being
2364 referenced (the reference was BLKmode), so make the most pessimistic
2365 assumptions.
2367 If XSIZE or YSIZE is negative, we may access memory outside the object
2368 being referenced as a side effect. This can happen when using AND to
2369 align memory references, as is done on the Alpha.
2371 Nice to notice that varying addresses cannot conflict with fp if no
2372 local variables had their addresses taken, but that's too hard now.
2374 ??? Contrary to the tree alias oracle this does not return
2375 one for X + non-constant and Y + non-constant when X and Y are equal.
2376 If that is fixed the TBAA hack for union type-punning can be removed. */
2378 static int
2379 memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y,
2380 poly_int64 c)
2382 if (GET_CODE (x) == VALUE)
2384 if (REG_P (y))
2386 struct elt_loc_list *l = NULL;
2387 if (CSELIB_VAL_PTR (x))
2388 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2389 l; l = l->next)
2390 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2391 break;
2392 if (l)
2393 x = y;
2394 else
2395 x = get_addr (x);
2397 /* Don't call get_addr if y is the same VALUE. */
2398 else if (x != y)
2399 x = get_addr (x);
2401 if (GET_CODE (y) == VALUE)
2403 if (REG_P (x))
2405 struct elt_loc_list *l = NULL;
2406 if (CSELIB_VAL_PTR (y))
2407 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2408 l; l = l->next)
2409 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2410 break;
2411 if (l)
2412 y = x;
2413 else
2414 y = get_addr (y);
2416 /* Don't call get_addr if x is the same VALUE. */
2417 else if (y != x)
2418 y = get_addr (y);
2420 if (GET_CODE (x) == HIGH)
2421 x = XEXP (x, 0);
2422 else if (GET_CODE (x) == LO_SUM)
2423 x = XEXP (x, 1);
2424 else
2425 x = addr_side_effect_eval (x, maybe_lt (xsize, 0) ? -xsize : xsize, 0);
2426 if (GET_CODE (y) == HIGH)
2427 y = XEXP (y, 0);
2428 else if (GET_CODE (y) == LO_SUM)
2429 y = XEXP (y, 1);
2430 else
2431 y = addr_side_effect_eval (y, maybe_lt (ysize, 0) ? -ysize : ysize, 0);
2433 if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF)
2435 int cmp = compare_base_symbol_refs (x,y);
2437 /* If both decls are the same, decide by offsets. */
2438 if (cmp == 1)
2439 return offset_overlap_p (c, xsize, ysize);
2440 /* Assume a potential overlap for symbolic addresses that went
2441 through alignment adjustments (i.e., that have negative
2442 sizes), because we can't know how far they are from each
2443 other. */
2444 if (maybe_lt (xsize, 0) || maybe_lt (ysize, 0))
2445 return -1;
2446 /* If decls are different or we know by offsets that there is no overlap,
2447 we win. */
2448 if (!cmp || !offset_overlap_p (c, xsize, ysize))
2449 return 0;
2450 /* Decls may or may not be different and offsets overlap....*/
2451 return -1;
2453 else if (rtx_equal_for_memref_p (x, y))
2455 return offset_overlap_p (c, xsize, ysize);
2458 /* This code used to check for conflicts involving stack references and
2459 globals but the base address alias code now handles these cases. */
2461 if (GET_CODE (x) == PLUS)
2463 /* The fact that X is canonicalized means that this
2464 PLUS rtx is canonicalized. */
2465 rtx x0 = XEXP (x, 0);
2466 rtx x1 = XEXP (x, 1);
2468 /* However, VALUEs might end up in different positions even in
2469 canonical PLUSes. Comparing their addresses is enough. */
2470 if (x0 == y)
2471 return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c);
2472 else if (x1 == y)
2473 return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c);
2475 poly_int64 cx1, cy1;
2476 if (GET_CODE (y) == PLUS)
2478 /* The fact that Y is canonicalized means that this
2479 PLUS rtx is canonicalized. */
2480 rtx y0 = XEXP (y, 0);
2481 rtx y1 = XEXP (y, 1);
2483 if (x0 == y1)
2484 return memrefs_conflict_p (xsize, x1, ysize, y0, c);
2485 if (x1 == y0)
2486 return memrefs_conflict_p (xsize, x0, ysize, y1, c);
2488 if (rtx_equal_for_memref_p (x1, y1))
2489 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2490 if (rtx_equal_for_memref_p (x0, y0))
2491 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2492 if (poly_int_rtx_p (x1, &cx1))
2494 if (poly_int_rtx_p (y1, &cy1))
2495 return memrefs_conflict_p (xsize, x0, ysize, y0,
2496 c - cx1 + cy1);
2497 else
2498 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
2500 else if (poly_int_rtx_p (y1, &cy1))
2501 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
2503 return -1;
2505 else if (poly_int_rtx_p (x1, &cx1))
2506 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
2508 else if (GET_CODE (y) == PLUS)
2510 /* The fact that Y is canonicalized means that this
2511 PLUS rtx is canonicalized. */
2512 rtx y0 = XEXP (y, 0);
2513 rtx y1 = XEXP (y, 1);
2515 if (x == y0)
2516 return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c);
2517 if (x == y1)
2518 return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c);
2520 poly_int64 cy1;
2521 if (poly_int_rtx_p (y1, &cy1))
2522 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
2523 else
2524 return -1;
2527 if (GET_CODE (x) == GET_CODE (y))
2528 switch (GET_CODE (x))
2530 case MULT:
2532 /* Handle cases where we expect the second operands to be the
2533 same, and check only whether the first operand would conflict
2534 or not. */
2535 rtx x0, y0;
2536 rtx x1 = canon_rtx (XEXP (x, 1));
2537 rtx y1 = canon_rtx (XEXP (y, 1));
2538 if (! rtx_equal_for_memref_p (x1, y1))
2539 return -1;
2540 x0 = canon_rtx (XEXP (x, 0));
2541 y0 = canon_rtx (XEXP (y, 0));
2542 if (rtx_equal_for_memref_p (x0, y0))
2543 return offset_overlap_p (c, xsize, ysize);
2545 /* Can't properly adjust our sizes. */
2546 poly_int64 c1;
2547 if (!poly_int_rtx_p (x1, &c1)
2548 || !can_div_trunc_p (xsize, c1, &xsize)
2549 || !can_div_trunc_p (ysize, c1, &ysize)
2550 || !can_div_trunc_p (c, c1, &c))
2551 return -1;
2552 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2555 default:
2556 break;
2559 /* Deal with alignment ANDs by adjusting offset and size so as to
2560 cover the maximum range, without taking any previously known
2561 alignment into account. Make a size negative after such an
2562 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2563 assume a potential overlap, because they may end up in contiguous
2564 memory locations and the stricter-alignment access may span over
2565 part of both. */
2566 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2568 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2569 unsigned HOST_WIDE_INT uc = sc;
2570 if (sc < 0 && pow2_or_zerop (-uc))
2572 if (maybe_gt (xsize, 0))
2573 xsize = -xsize;
2574 if (maybe_ne (xsize, 0))
2575 xsize += sc + 1;
2576 c -= sc + 1;
2577 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2578 ysize, y, c);
2581 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2583 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2584 unsigned HOST_WIDE_INT uc = sc;
2585 if (sc < 0 && pow2_or_zerop (-uc))
2587 if (maybe_gt (ysize, 0))
2588 ysize = -ysize;
2589 if (maybe_ne (ysize, 0))
2590 ysize += sc + 1;
2591 c += sc + 1;
2592 return memrefs_conflict_p (xsize, x,
2593 ysize, canon_rtx (XEXP (y, 0)), c);
2597 if (CONSTANT_P (x))
2599 poly_int64 cx, cy;
2600 if (poly_int_rtx_p (x, &cx) && poly_int_rtx_p (y, &cy))
2602 c += cy - cx;
2603 return offset_overlap_p (c, xsize, ysize);
2606 if (GET_CODE (x) == CONST)
2608 if (GET_CODE (y) == CONST)
2609 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2610 ysize, canon_rtx (XEXP (y, 0)), c);
2611 else
2612 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2613 ysize, y, c);
2615 if (GET_CODE (y) == CONST)
2616 return memrefs_conflict_p (xsize, x, ysize,
2617 canon_rtx (XEXP (y, 0)), c);
2619 /* Assume a potential overlap for symbolic addresses that went
2620 through alignment adjustments (i.e., that have negative
2621 sizes), because we can't know how far they are from each
2622 other. */
2623 if (CONSTANT_P (y))
2624 return (maybe_lt (xsize, 0)
2625 || maybe_lt (ysize, 0)
2626 || offset_overlap_p (c, xsize, ysize));
2628 return -1;
2631 return -1;
2634 /* Functions to compute memory dependencies.
2636 Since we process the insns in execution order, we can build tables
2637 to keep track of what registers are fixed (and not aliased), what registers
2638 are varying in known ways, and what registers are varying in unknown
2639 ways.
2641 If both memory references are volatile, then there must always be a
2642 dependence between the two references, since their order cannot be
2643 changed. A volatile and non-volatile reference can be interchanged
2644 though.
2646 We also must allow AND addresses, because they may generate accesses
2647 outside the object being referenced. This is used to generate aligned
2648 addresses from unaligned addresses, for instance, the alpha
2649 storeqi_unaligned pattern. */
2651 /* Read dependence: X is read after read in MEM takes place. There can
2652 only be a dependence here if both reads are volatile, or if either is
2653 an explicit barrier. */
2656 read_dependence (const_rtx mem, const_rtx x)
2658 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2659 return true;
2660 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2661 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2662 return true;
2663 return false;
2666 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2668 static tree
2669 decl_for_component_ref (tree x)
2673 x = TREE_OPERAND (x, 0);
2675 while (x && TREE_CODE (x) == COMPONENT_REF);
2677 return x && DECL_P (x) ? x : NULL_TREE;
2680 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2681 for the offset of the field reference. *KNOWN_P says whether the
2682 offset is known. */
2684 static void
2685 adjust_offset_for_component_ref (tree x, bool *known_p,
2686 poly_int64 *offset)
2688 if (!*known_p)
2689 return;
2692 tree xoffset = component_ref_field_offset (x);
2693 tree field = TREE_OPERAND (x, 1);
2694 if (!poly_int_tree_p (xoffset))
2696 *known_p = false;
2697 return;
2700 poly_offset_int woffset
2701 = (wi::to_poly_offset (xoffset)
2702 + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
2703 >> LOG2_BITS_PER_UNIT)
2704 + *offset);
2705 if (!woffset.to_shwi (offset))
2707 *known_p = false;
2708 return;
2711 x = TREE_OPERAND (x, 0);
2713 while (x && TREE_CODE (x) == COMPONENT_REF);
2716 /* Return nonzero if we can determine the exprs corresponding to memrefs
2717 X and Y and they do not overlap.
2718 If LOOP_VARIANT is set, skip offset-based disambiguation */
2721 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2723 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2724 rtx rtlx, rtly;
2725 rtx basex, basey;
2726 bool moffsetx_known_p, moffsety_known_p;
2727 poly_int64 moffsetx = 0, moffsety = 0;
2728 poly_int64 offsetx = 0, offsety = 0, sizex, sizey;
2730 /* Unless both have exprs, we can't tell anything. */
2731 if (exprx == 0 || expry == 0)
2732 return 0;
2734 /* For spill-slot accesses make sure we have valid offsets. */
2735 if ((exprx == get_spill_slot_decl (false)
2736 && ! MEM_OFFSET_KNOWN_P (x))
2737 || (expry == get_spill_slot_decl (false)
2738 && ! MEM_OFFSET_KNOWN_P (y)))
2739 return 0;
2741 /* If the field reference test failed, look at the DECLs involved. */
2742 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2743 if (moffsetx_known_p)
2744 moffsetx = MEM_OFFSET (x);
2745 if (TREE_CODE (exprx) == COMPONENT_REF)
2747 tree t = decl_for_component_ref (exprx);
2748 if (! t)
2749 return 0;
2750 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2751 exprx = t;
2754 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2755 if (moffsety_known_p)
2756 moffsety = MEM_OFFSET (y);
2757 if (TREE_CODE (expry) == COMPONENT_REF)
2759 tree t = decl_for_component_ref (expry);
2760 if (! t)
2761 return 0;
2762 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2763 expry = t;
2766 if (! DECL_P (exprx) || ! DECL_P (expry))
2767 return 0;
2769 /* If we refer to different gimple registers, or one gimple register
2770 and one non-gimple-register, we know they can't overlap. First,
2771 gimple registers don't have their addresses taken. Now, there
2772 could be more than one stack slot for (different versions of) the
2773 same gimple register, but we can presumably tell they don't
2774 overlap based on offsets from stack base addresses elsewhere.
2775 It's important that we don't proceed to DECL_RTL, because gimple
2776 registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be
2777 able to do anything about them since no SSA information will have
2778 remained to guide it. */
2779 if (is_gimple_reg (exprx) || is_gimple_reg (expry))
2780 return exprx != expry
2781 || (moffsetx_known_p && moffsety_known_p
2782 && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)
2783 && !offset_overlap_p (moffsety - moffsetx,
2784 MEM_SIZE (x), MEM_SIZE (y)));
2786 /* With invalid code we can end up storing into the constant pool.
2787 Bail out to avoid ICEing when creating RTL for this.
2788 See gfortran.dg/lto/20091028-2_0.f90. */
2789 if (TREE_CODE (exprx) == CONST_DECL
2790 || TREE_CODE (expry) == CONST_DECL)
2791 return 1;
2793 /* If one decl is known to be a function or label in a function and
2794 the other is some kind of data, they can't overlap. */
2795 if ((TREE_CODE (exprx) == FUNCTION_DECL
2796 || TREE_CODE (exprx) == LABEL_DECL)
2797 != (TREE_CODE (expry) == FUNCTION_DECL
2798 || TREE_CODE (expry) == LABEL_DECL))
2799 return 1;
2801 /* If either of the decls doesn't have DECL_RTL set (e.g. marked as
2802 living in multiple places), we can't tell anything. Exception
2803 are FUNCTION_DECLs for which we can create DECL_RTL on demand. */
2804 if ((!DECL_RTL_SET_P (exprx) && TREE_CODE (exprx) != FUNCTION_DECL)
2805 || (!DECL_RTL_SET_P (expry) && TREE_CODE (expry) != FUNCTION_DECL))
2806 return 0;
2808 rtlx = DECL_RTL (exprx);
2809 rtly = DECL_RTL (expry);
2811 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2812 can't overlap unless they are the same because we never reuse that part
2813 of the stack frame used for locals for spilled pseudos. */
2814 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2815 && ! rtx_equal_p (rtlx, rtly))
2816 return 1;
2818 /* If we have MEMs referring to different address spaces (which can
2819 potentially overlap), we cannot easily tell from the addresses
2820 whether the references overlap. */
2821 if (MEM_P (rtlx) && MEM_P (rtly)
2822 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2823 return 0;
2825 /* Get the base and offsets of both decls. If either is a register, we
2826 know both are and are the same, so use that as the base. The only
2827 we can avoid overlap is if we can deduce that they are nonoverlapping
2828 pieces of that decl, which is very rare. */
2829 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2830 basex = strip_offset_and_add (basex, &offsetx);
2832 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2833 basey = strip_offset_and_add (basey, &offsety);
2835 /* If the bases are different, we know they do not overlap if both
2836 are constants or if one is a constant and the other a pointer into the
2837 stack frame. Otherwise a different base means we can't tell if they
2838 overlap or not. */
2839 if (compare_base_decls (exprx, expry) == 0)
2840 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2841 || (CONSTANT_P (basex) && REG_P (basey)
2842 && REGNO_PTR_FRAME_P (REGNO (basey)))
2843 || (CONSTANT_P (basey) && REG_P (basex)
2844 && REGNO_PTR_FRAME_P (REGNO (basex))));
2846 /* Offset based disambiguation not appropriate for loop invariant */
2847 if (loop_invariant)
2848 return 0;
2850 /* Offset based disambiguation is OK even if we do not know that the
2851 declarations are necessarily different
2852 (i.e. compare_base_decls (exprx, expry) == -1) */
2854 sizex = (!MEM_P (rtlx) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtlx)))
2855 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2856 : -1);
2857 sizey = (!MEM_P (rtly) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtly)))
2858 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2859 : -1);
2861 /* If we have an offset for either memref, it can update the values computed
2862 above. */
2863 if (moffsetx_known_p)
2864 offsetx += moffsetx, sizex -= moffsetx;
2865 if (moffsety_known_p)
2866 offsety += moffsety, sizey -= moffsety;
2868 /* If a memref has both a size and an offset, we can use the smaller size.
2869 We can't do this if the offset isn't known because we must view this
2870 memref as being anywhere inside the DECL's MEM. */
2871 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2872 sizex = MEM_SIZE (x);
2873 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2874 sizey = MEM_SIZE (y);
2876 return !ranges_maybe_overlap_p (offsetx, sizex, offsety, sizey);
2879 /* Helper for true_dependence and canon_true_dependence.
2880 Checks for true dependence: X is read after store in MEM takes place.
2882 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2883 NULL_RTX, and the canonical addresses of MEM and X are both computed
2884 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2886 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2888 Returns 1 if there is a true dependence, 0 otherwise. */
2890 static int
2891 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2892 const_rtx x, rtx x_addr, bool mem_canonicalized)
2894 rtx true_mem_addr;
2895 rtx base;
2896 int ret;
2898 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2899 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2901 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2902 return 1;
2904 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2905 This is used in epilogue deallocation functions, and in cselib. */
2906 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2907 return 1;
2908 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2909 return 1;
2910 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2911 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2912 return 1;
2914 if (! x_addr)
2915 x_addr = XEXP (x, 0);
2916 x_addr = get_addr (x_addr);
2918 if (! mem_addr)
2920 mem_addr = XEXP (mem, 0);
2921 if (mem_mode == VOIDmode)
2922 mem_mode = GET_MODE (mem);
2924 true_mem_addr = get_addr (mem_addr);
2926 /* Read-only memory is by definition never modified, and therefore can't
2927 conflict with anything. However, don't assume anything when AND
2928 addresses are involved and leave to the code below to determine
2929 dependence. We don't expect to find read-only set on MEM, but
2930 stupid user tricks can produce them, so don't die. */
2931 if (MEM_READONLY_P (x)
2932 && GET_CODE (x_addr) != AND
2933 && GET_CODE (true_mem_addr) != AND)
2934 return 0;
2936 /* If we have MEMs referring to different address spaces (which can
2937 potentially overlap), we cannot easily tell from the addresses
2938 whether the references overlap. */
2939 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2940 return 1;
2942 base = find_base_term (x_addr);
2943 if (base && (GET_CODE (base) == LABEL_REF
2944 || (GET_CODE (base) == SYMBOL_REF
2945 && CONSTANT_POOL_ADDRESS_P (base))))
2946 return 0;
2948 rtx mem_base = find_base_term (true_mem_addr);
2949 if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2950 GET_MODE (x), mem_mode))
2951 return 0;
2953 x_addr = canon_rtx (x_addr);
2954 if (!mem_canonicalized)
2955 mem_addr = canon_rtx (true_mem_addr);
2957 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2958 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2959 return ret;
2961 if (mems_in_disjoint_alias_sets_p (x, mem))
2962 return 0;
2964 if (nonoverlapping_memrefs_p (mem, x, false))
2965 return 0;
2967 return rtx_refs_may_alias_p (x, mem, true);
2970 /* True dependence: X is read after store in MEM takes place. */
2973 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
2975 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2976 x, NULL_RTX, /*mem_canonicalized=*/false);
2979 /* Canonical true dependence: X is read after store in MEM takes place.
2980 Variant of true_dependence which assumes MEM has already been
2981 canonicalized (hence we no longer do that here).
2982 The mem_addr argument has been added, since true_dependence_1 computed
2983 this value prior to canonicalizing. */
2986 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2987 const_rtx x, rtx x_addr)
2989 return true_dependence_1 (mem, mem_mode, mem_addr,
2990 x, x_addr, /*mem_canonicalized=*/true);
2993 /* Returns nonzero if a write to X might alias a previous read from
2994 (or, if WRITEP is true, a write to) MEM.
2995 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2996 and X_MODE the mode for that access.
2997 If MEM_CANONICALIZED is true, MEM is canonicalized. */
2999 static int
3000 write_dependence_p (const_rtx mem,
3001 const_rtx x, machine_mode x_mode, rtx x_addr,
3002 bool mem_canonicalized, bool x_canonicalized, bool writep)
3004 rtx mem_addr;
3005 rtx true_mem_addr, true_x_addr;
3006 rtx base;
3007 int ret;
3009 gcc_checking_assert (x_canonicalized
3010 ? (x_addr != NULL_RTX
3011 && (x_mode != VOIDmode || GET_MODE (x) == VOIDmode))
3012 : (x_addr == NULL_RTX && x_mode == VOIDmode));
3014 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3015 return 1;
3017 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3018 This is used in epilogue deallocation functions. */
3019 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3020 return 1;
3021 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3022 return 1;
3023 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3024 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3025 return 1;
3027 if (!x_addr)
3028 x_addr = XEXP (x, 0);
3029 true_x_addr = get_addr (x_addr);
3031 mem_addr = XEXP (mem, 0);
3032 true_mem_addr = get_addr (mem_addr);
3034 /* A read from read-only memory can't conflict with read-write memory.
3035 Don't assume anything when AND addresses are involved and leave to
3036 the code below to determine dependence. */
3037 if (!writep
3038 && MEM_READONLY_P (mem)
3039 && GET_CODE (true_x_addr) != AND
3040 && GET_CODE (true_mem_addr) != AND)
3041 return 0;
3043 /* If we have MEMs referring to different address spaces (which can
3044 potentially overlap), we cannot easily tell from the addresses
3045 whether the references overlap. */
3046 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3047 return 1;
3049 base = find_base_term (true_mem_addr);
3050 if (! writep
3051 && base
3052 && (GET_CODE (base) == LABEL_REF
3053 || (GET_CODE (base) == SYMBOL_REF
3054 && CONSTANT_POOL_ADDRESS_P (base))))
3055 return 0;
3057 rtx x_base = find_base_term (true_x_addr);
3058 if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
3059 GET_MODE (x), GET_MODE (mem)))
3060 return 0;
3062 if (!x_canonicalized)
3064 x_addr = canon_rtx (true_x_addr);
3065 x_mode = GET_MODE (x);
3067 if (!mem_canonicalized)
3068 mem_addr = canon_rtx (true_mem_addr);
3070 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
3071 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
3072 return ret;
3074 if (nonoverlapping_memrefs_p (x, mem, false))
3075 return 0;
3077 return rtx_refs_may_alias_p (x, mem, false);
3080 /* Anti dependence: X is written after read in MEM takes place. */
3083 anti_dependence (const_rtx mem, const_rtx x)
3085 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3086 /*mem_canonicalized=*/false,
3087 /*x_canonicalized*/false, /*writep=*/false);
3090 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3091 Also, consider X in X_MODE (which might be from an enclosing
3092 STRICT_LOW_PART / ZERO_EXTRACT).
3093 If MEM_CANONICALIZED is true, MEM is canonicalized. */
3096 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
3097 const_rtx x, machine_mode x_mode, rtx x_addr)
3099 return write_dependence_p (mem, x, x_mode, x_addr,
3100 mem_canonicalized, /*x_canonicalized=*/true,
3101 /*writep=*/false);
3104 /* Output dependence: X is written after store in MEM takes place. */
3107 output_dependence (const_rtx mem, const_rtx x)
3109 return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3110 /*mem_canonicalized=*/false,
3111 /*x_canonicalized*/false, /*writep=*/true);
3114 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3115 Also, consider X in X_MODE (which might be from an enclosing
3116 STRICT_LOW_PART / ZERO_EXTRACT).
3117 If MEM_CANONICALIZED is true, MEM is canonicalized. */
3120 canon_output_dependence (const_rtx mem, bool mem_canonicalized,
3121 const_rtx x, machine_mode x_mode, rtx x_addr)
3123 return write_dependence_p (mem, x, x_mode, x_addr,
3124 mem_canonicalized, /*x_canonicalized=*/true,
3125 /*writep=*/true);
3130 /* Check whether X may be aliased with MEM. Don't do offset-based
3131 memory disambiguation & TBAA. */
3133 may_alias_p (const_rtx mem, const_rtx x)
3135 rtx x_addr, mem_addr;
3137 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3138 return 1;
3140 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3141 This is used in epilogue deallocation functions. */
3142 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3143 return 1;
3144 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3145 return 1;
3146 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3147 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3148 return 1;
3150 x_addr = XEXP (x, 0);
3151 x_addr = get_addr (x_addr);
3153 mem_addr = XEXP (mem, 0);
3154 mem_addr = get_addr (mem_addr);
3156 /* Read-only memory is by definition never modified, and therefore can't
3157 conflict with anything. However, don't assume anything when AND
3158 addresses are involved and leave to the code below to determine
3159 dependence. We don't expect to find read-only set on MEM, but
3160 stupid user tricks can produce them, so don't die. */
3161 if (MEM_READONLY_P (x)
3162 && GET_CODE (x_addr) != AND
3163 && GET_CODE (mem_addr) != AND)
3164 return 0;
3166 /* If we have MEMs referring to different address spaces (which can
3167 potentially overlap), we cannot easily tell from the addresses
3168 whether the references overlap. */
3169 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3170 return 1;
3172 rtx x_base = find_base_term (x_addr);
3173 rtx mem_base = find_base_term (mem_addr);
3174 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
3175 GET_MODE (x), GET_MODE (mem_addr)))
3176 return 0;
3178 if (nonoverlapping_memrefs_p (mem, x, true))
3179 return 0;
3181 /* TBAA not valid for loop_invarint */
3182 return rtx_refs_may_alias_p (x, mem, false);
3185 void
3186 init_alias_target (void)
3188 int i;
3190 if (!arg_base_value)
3191 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
3193 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
3195 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3196 /* Check whether this register can hold an incoming pointer
3197 argument. FUNCTION_ARG_REGNO_P tests outgoing register
3198 numbers, so translate if necessary due to register windows. */
3199 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
3200 && targetm.hard_regno_mode_ok (i, Pmode))
3201 static_reg_base_value[i] = arg_base_value;
3203 /* RTL code is required to be consistent about whether it uses the
3204 stack pointer, the frame pointer or the argument pointer to
3205 access a given area of the frame. We can therefore use the
3206 base address to distinguish between the different areas. */
3207 static_reg_base_value[STACK_POINTER_REGNUM]
3208 = unique_base_value (UNIQUE_BASE_VALUE_SP);
3209 static_reg_base_value[ARG_POINTER_REGNUM]
3210 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
3211 static_reg_base_value[FRAME_POINTER_REGNUM]
3212 = unique_base_value (UNIQUE_BASE_VALUE_FP);
3214 /* The above rules extend post-reload, with eliminations applying
3215 consistently to each of the three pointers. Cope with cases in
3216 which the frame pointer is eliminated to the hard frame pointer
3217 rather than the stack pointer. */
3218 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3219 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
3220 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
3223 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
3224 to be memory reference. */
3225 static bool memory_modified;
3226 static void
3227 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3229 if (MEM_P (x))
3231 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
3232 memory_modified = true;
3237 /* Return true when INSN possibly modify memory contents of MEM
3238 (i.e. address can be modified). */
3239 bool
3240 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
3242 if (!INSN_P (insn))
3243 return false;
3244 /* Conservatively assume all non-readonly MEMs might be modified in
3245 calls. */
3246 if (CALL_P (insn))
3247 return true;
3248 memory_modified = false;
3249 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
3250 return memory_modified;
3253 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
3254 array. */
3256 void
3257 init_alias_analysis (void)
3259 unsigned int maxreg = max_reg_num ();
3260 int changed, pass;
3261 int i;
3262 unsigned int ui;
3263 rtx_insn *insn;
3264 rtx val;
3265 int rpo_cnt;
3266 int *rpo;
3268 timevar_push (TV_ALIAS_ANALYSIS);
3270 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
3271 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
3272 bitmap_clear (reg_known_equiv_p);
3274 /* If we have memory allocated from the previous run, use it. */
3275 if (old_reg_base_value)
3276 reg_base_value = old_reg_base_value;
3278 if (reg_base_value)
3279 reg_base_value->truncate (0);
3281 vec_safe_grow_cleared (reg_base_value, maxreg);
3283 new_reg_base_value = XNEWVEC (rtx, maxreg);
3284 reg_seen = sbitmap_alloc (maxreg);
3286 /* The basic idea is that each pass through this loop will use the
3287 "constant" information from the previous pass to propagate alias
3288 information through another level of assignments.
3290 The propagation is done on the CFG in reverse post-order, to propagate
3291 things forward as far as possible in each iteration.
3293 This could get expensive if the assignment chains are long. Maybe
3294 we should throttle the number of iterations, possibly based on
3295 the optimization level or flag_expensive_optimizations.
3297 We could propagate more information in the first pass by making use
3298 of DF_REG_DEF_COUNT to determine immediately that the alias information
3299 for a pseudo is "constant".
3301 A program with an uninitialized variable can cause an infinite loop
3302 here. Instead of doing a full dataflow analysis to detect such problems
3303 we just cap the number of iterations for the loop.
3305 The state of the arrays for the set chain in question does not matter
3306 since the program has undefined behavior. */
3308 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
3309 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3311 /* The prologue/epilogue insns are not threaded onto the
3312 insn chain until after reload has completed. Thus,
3313 there is no sense wasting time checking if INSN is in
3314 the prologue/epilogue until after reload has completed. */
3315 bool could_be_prologue_epilogue = ((targetm.have_prologue ()
3316 || targetm.have_epilogue ())
3317 && reload_completed);
3319 pass = 0;
3322 /* Assume nothing will change this iteration of the loop. */
3323 changed = 0;
3325 /* We want to assign the same IDs each iteration of this loop, so
3326 start counting from one each iteration of the loop. */
3327 unique_id = 1;
3329 /* We're at the start of the function each iteration through the
3330 loop, so we're copying arguments. */
3331 copying_arguments = true;
3333 /* Wipe the potential alias information clean for this pass. */
3334 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
3336 /* Wipe the reg_seen array clean. */
3337 bitmap_clear (reg_seen);
3339 /* Initialize the alias information for this pass. */
3340 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3341 if (static_reg_base_value[i]
3342 /* Don't treat the hard frame pointer as special if we
3343 eliminated the frame pointer to the stack pointer instead. */
3344 && !(i == HARD_FRAME_POINTER_REGNUM
3345 && reload_completed
3346 && !frame_pointer_needed
3347 && targetm.can_eliminate (FRAME_POINTER_REGNUM,
3348 STACK_POINTER_REGNUM)))
3350 new_reg_base_value[i] = static_reg_base_value[i];
3351 bitmap_set_bit (reg_seen, i);
3354 /* Walk the insns adding values to the new_reg_base_value array. */
3355 for (i = 0; i < rpo_cnt; i++)
3357 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3358 FOR_BB_INSNS (bb, insn)
3360 if (NONDEBUG_INSN_P (insn))
3362 rtx note, set;
3364 if (could_be_prologue_epilogue
3365 && prologue_epilogue_contains (insn))
3366 continue;
3368 /* If this insn has a noalias note, process it, Otherwise,
3369 scan for sets. A simple set will have no side effects
3370 which could change the base value of any other register. */
3372 if (GET_CODE (PATTERN (insn)) == SET
3373 && REG_NOTES (insn) != 0
3374 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3375 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3376 else
3377 note_stores (PATTERN (insn), record_set, NULL);
3379 set = single_set (insn);
3381 if (set != 0
3382 && REG_P (SET_DEST (set))
3383 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3385 unsigned int regno = REGNO (SET_DEST (set));
3386 rtx src = SET_SRC (set);
3387 rtx t;
3389 note = find_reg_equal_equiv_note (insn);
3390 if (note && REG_NOTE_KIND (note) == REG_EQUAL
3391 && DF_REG_DEF_COUNT (regno) != 1)
3392 note = NULL_RTX;
3394 poly_int64 offset;
3395 if (note != NULL_RTX
3396 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3397 && ! rtx_varies_p (XEXP (note, 0), 1)
3398 && ! reg_overlap_mentioned_p (SET_DEST (set),
3399 XEXP (note, 0)))
3401 set_reg_known_value (regno, XEXP (note, 0));
3402 set_reg_known_equiv_p (regno,
3403 REG_NOTE_KIND (note) == REG_EQUIV);
3405 else if (DF_REG_DEF_COUNT (regno) == 1
3406 && GET_CODE (src) == PLUS
3407 && REG_P (XEXP (src, 0))
3408 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3409 && poly_int_rtx_p (XEXP (src, 1), &offset))
3411 t = plus_constant (GET_MODE (src), t, offset);
3412 set_reg_known_value (regno, t);
3413 set_reg_known_equiv_p (regno, false);
3415 else if (DF_REG_DEF_COUNT (regno) == 1
3416 && ! rtx_varies_p (src, 1))
3418 set_reg_known_value (regno, src);
3419 set_reg_known_equiv_p (regno, false);
3423 else if (NOTE_P (insn)
3424 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3425 copying_arguments = false;
3429 /* Now propagate values from new_reg_base_value to reg_base_value. */
3430 gcc_assert (maxreg == (unsigned int) max_reg_num ());
3432 for (ui = 0; ui < maxreg; ui++)
3434 if (new_reg_base_value[ui]
3435 && new_reg_base_value[ui] != (*reg_base_value)[ui]
3436 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3438 (*reg_base_value)[ui] = new_reg_base_value[ui];
3439 changed = 1;
3443 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3444 XDELETEVEC (rpo);
3446 /* Fill in the remaining entries. */
3447 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3449 int regno = i + FIRST_PSEUDO_REGISTER;
3450 if (! val)
3451 set_reg_known_value (regno, regno_reg_rtx[regno]);
3454 /* Clean up. */
3455 free (new_reg_base_value);
3456 new_reg_base_value = 0;
3457 sbitmap_free (reg_seen);
3458 reg_seen = 0;
3459 timevar_pop (TV_ALIAS_ANALYSIS);
3462 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3463 Special API for var-tracking pass purposes. */
3465 void
3466 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3468 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3471 void
3472 end_alias_analysis (void)
3474 old_reg_base_value = reg_base_value;
3475 vec_free (reg_known_value);
3476 sbitmap_free (reg_known_equiv_p);
3479 void
3480 dump_alias_stats_in_alias_c (FILE *s)
3482 fprintf (s, " TBAA oracle: %llu disambiguations %llu queries\n"
3483 " %llu are in alias set 0\n"
3484 " %llu queries asked about the same object\n"
3485 " %llu queries asked about the same alias set\n"
3486 " %llu access volatile\n"
3487 " %llu are dependent in the DAG\n"
3488 " %llu are aritificially in conflict with void *\n",
3489 alias_stats.num_disambiguated,
3490 alias_stats.num_alias_zero + alias_stats.num_same_alias_set
3491 + alias_stats.num_same_objects + alias_stats.num_volatile
3492 + alias_stats.num_dag + alias_stats.num_disambiguated
3493 + alias_stats.num_universal,
3494 alias_stats.num_alias_zero, alias_stats.num_same_alias_set,
3495 alias_stats.num_same_objects, alias_stats.num_volatile,
3496 alias_stats.num_dag, alias_stats.num_universal);
3498 #include "gt-alias.h"