2013-06-18 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
[official-gcc.git] / gcc / alias.c
blob902ed334769b84c92b0b7e7dcf5c571f8c7ba729
1 /* Alias analysis for GNU C
2 Copyright (C) 1997-2013 Free Software Foundation, Inc.
3 Contributed by John Carr (jfc@mit.edu).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "function.h"
29 #include "alias.h"
30 #include "emit-rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "flags.h"
35 #include "diagnostic-core.h"
36 #include "cselib.h"
37 #include "splay-tree.h"
38 #include "ggc.h"
39 #include "langhooks.h"
40 #include "timevar.h"
41 #include "dumpfile.h"
42 #include "target.h"
43 #include "cgraph.h"
44 #include "df.h"
45 #include "tree-ssa-alias.h"
46 #include "pointer-set.h"
47 #include "tree-flow.h"
49 /* The aliasing API provided here solves related but different problems:
51 Say there exists (in c)
53 struct X {
54 struct Y y1;
55 struct Z z2;
56 } x1, *px1, *px2;
58 struct Y y2, *py;
59 struct Z z2, *pz;
62 py = &x1.y1;
63 px2 = &x1;
65 Consider the four questions:
67 Can a store to x1 interfere with px2->y1?
68 Can a store to x1 interfere with px2->z2?
69 Can a store to x1 change the value pointed to by with py?
70 Can a store to x1 change the value pointed to by with pz?
72 The answer to these questions can be yes, yes, yes, and maybe.
74 The first two questions can be answered with a simple examination
75 of the type system. If structure X contains a field of type Y then
76 a store through a pointer to an X can overwrite any field that is
77 contained (recursively) in an X (unless we know that px1 != px2).
79 The last two questions can be solved in the same way as the first
80 two questions but this is too conservative. The observation is
81 that in some cases we can know which (if any) fields are addressed
82 and if those addresses are used in bad ways. This analysis may be
83 language specific. In C, arbitrary operations may be applied to
84 pointers. However, there is some indication that this may be too
85 conservative for some C++ types.
87 The pass ipa-type-escape does this analysis for the types whose
88 instances do not escape across the compilation boundary.
90 Historically in GCC, these two problems were combined and a single
91 data structure that was used to represent the solution to these
92 problems. We now have two similar but different data structures,
93 The data structure to solve the last two questions is similar to
94 the first, but does not contain the fields whose address are never
95 taken. For types that do escape the compilation unit, the data
96 structures will have identical information.
99 /* The alias sets assigned to MEMs assist the back-end in determining
100 which MEMs can alias which other MEMs. In general, two MEMs in
101 different alias sets cannot alias each other, with one important
102 exception. Consider something like:
104 struct S { int i; double d; };
106 a store to an `S' can alias something of either type `int' or type
107 `double'. (However, a store to an `int' cannot alias a `double'
108 and vice versa.) We indicate this via a tree structure that looks
109 like:
110 struct S
113 |/_ _\|
114 int double
116 (The arrows are directed and point downwards.)
117 In this situation we say the alias set for `struct S' is the
118 `superset' and that those for `int' and `double' are `subsets'.
120 To see whether two alias sets can point to the same memory, we must
121 see if either alias set is a subset of the other. We need not trace
122 past immediate descendants, however, since we propagate all
123 grandchildren up one level.
125 Alias set zero is implicitly a superset of all other alias sets.
126 However, this is no actual entry for alias set zero. It is an
127 error to attempt to explicitly construct a subset of zero. */
129 struct GTY(()) alias_set_entry_d {
130 /* The alias set number, as stored in MEM_ALIAS_SET. */
131 alias_set_type alias_set;
133 /* Nonzero if would have a child of zero: this effectively makes this
134 alias set the same as alias set zero. */
135 int has_zero_child;
137 /* The children of the alias set. These are not just the immediate
138 children, but, in fact, all descendants. So, if we have:
140 struct T { struct S s; float f; }
142 continuing our example above, the children here will be all of
143 `int', `double', `float', and `struct S'. */
144 splay_tree GTY((param1_is (int), param2_is (int))) children;
146 typedef struct alias_set_entry_d *alias_set_entry;
148 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
149 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
150 static void record_set (rtx, const_rtx, void *);
151 static int base_alias_check (rtx, rtx, rtx, rtx, enum machine_mode,
152 enum machine_mode);
153 static rtx find_base_value (rtx);
154 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
155 static int insert_subset_children (splay_tree_node, void*);
156 static alias_set_entry get_alias_set_entry (alias_set_type);
157 static bool nonoverlapping_component_refs_p (const_rtx, const_rtx);
158 static tree decl_for_component_ref (tree);
159 static int write_dependence_p (const_rtx, enum machine_mode, rtx, const_rtx,
160 bool, bool);
162 static void memory_modified_1 (rtx, const_rtx, void *);
164 /* Set up all info needed to perform alias analysis on memory references. */
166 /* Returns the size in bytes of the mode of X. */
167 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
169 /* Cap the number of passes we make over the insns propagating alias
170 information through set chains.
171 ??? 10 is a completely arbitrary choice. This should be based on the
172 maximum loop depth in the CFG, but we do not have this information
173 available (even if current_loops _is_ available). */
174 #define MAX_ALIAS_LOOP_PASSES 10
176 /* reg_base_value[N] gives an address to which register N is related.
177 If all sets after the first add or subtract to the current value
178 or otherwise modify it so it does not point to a different top level
179 object, reg_base_value[N] is equal to the address part of the source
180 of the first set.
182 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
183 expressions represent three types of base:
185 1. incoming arguments. There is just one ADDRESS to represent all
186 arguments, since we do not know at this level whether accesses
187 based on different arguments can alias. The ADDRESS has id 0.
189 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
190 (if distinct from frame_pointer_rtx) and arg_pointer_rtx.
191 Each of these rtxes has a separate ADDRESS associated with it,
192 each with a negative id.
194 GCC is (and is required to be) precise in which register it
195 chooses to access a particular region of stack. We can therefore
196 assume that accesses based on one of these rtxes do not alias
197 accesses based on another of these rtxes.
199 3. bases that are derived from malloc()ed memory (REG_NOALIAS).
200 Each such piece of memory has a separate ADDRESS associated
201 with it, each with an id greater than 0.
203 Accesses based on one ADDRESS do not alias accesses based on other
204 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not
205 alias globals either; the ADDRESSes have Pmode to indicate this.
206 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
207 indicate this. */
209 static GTY(()) vec<rtx, va_gc> *reg_base_value;
210 static rtx *new_reg_base_value;
212 /* The single VOIDmode ADDRESS that represents all argument bases.
213 It has id 0. */
214 static GTY(()) rtx arg_base_value;
216 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */
217 static int unique_id;
219 /* We preserve the copy of old array around to avoid amount of garbage
220 produced. About 8% of garbage produced were attributed to this
221 array. */
222 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
224 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
225 registers. */
226 #define UNIQUE_BASE_VALUE_SP -1
227 #define UNIQUE_BASE_VALUE_ARGP -2
228 #define UNIQUE_BASE_VALUE_FP -3
229 #define UNIQUE_BASE_VALUE_HFP -4
231 #define static_reg_base_value \
232 (this_target_rtl->x_static_reg_base_value)
234 #define REG_BASE_VALUE(X) \
235 (REGNO (X) < vec_safe_length (reg_base_value) \
236 ? (*reg_base_value)[REGNO (X)] : 0)
238 /* Vector indexed by N giving the initial (unchanging) value known for
239 pseudo-register N. This vector is initialized in init_alias_analysis,
240 and does not change until end_alias_analysis is called. */
241 static GTY(()) vec<rtx, va_gc> *reg_known_value;
243 /* Vector recording for each reg_known_value whether it is due to a
244 REG_EQUIV note. Future passes (viz., reload) may replace the
245 pseudo with the equivalent expression and so we account for the
246 dependences that would be introduced if that happens.
248 The REG_EQUIV notes created in assign_parms may mention the arg
249 pointer, and there are explicit insns in the RTL that modify the
250 arg pointer. Thus we must ensure that such insns don't get
251 scheduled across each other because that would invalidate the
252 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
253 wrong, but solving the problem in the scheduler will likely give
254 better code, so we do it here. */
255 static sbitmap reg_known_equiv_p;
257 /* True when scanning insns from the start of the rtl to the
258 NOTE_INSN_FUNCTION_BEG note. */
259 static bool copying_arguments;
262 /* The splay-tree used to store the various alias set entries. */
263 static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
265 /* Build a decomposed reference object for querying the alias-oracle
266 from the MEM rtx and store it in *REF.
267 Returns false if MEM is not suitable for the alias-oracle. */
269 static bool
270 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
272 tree expr = MEM_EXPR (mem);
273 tree base;
275 if (!expr)
276 return false;
278 ao_ref_init (ref, expr);
280 /* Get the base of the reference and see if we have to reject or
281 adjust it. */
282 base = ao_ref_base (ref);
283 if (base == NULL_TREE)
284 return false;
286 /* The tree oracle doesn't like bases that are neither decls
287 nor indirect references of SSA names. */
288 if (!(DECL_P (base)
289 || (TREE_CODE (base) == MEM_REF
290 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
291 || (TREE_CODE (base) == TARGET_MEM_REF
292 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
293 return false;
295 /* If this is a reference based on a partitioned decl replace the
296 base with a MEM_REF of the pointer representative we
297 created during stack slot partitioning. */
298 if (TREE_CODE (base) == VAR_DECL
299 && ! is_global_var (base)
300 && cfun->gimple_df->decls_to_pointers != NULL)
302 void *namep;
303 namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
304 if (namep)
305 ref->base = build_simple_mem_ref (*(tree *)namep);
308 ref->ref_alias_set = MEM_ALIAS_SET (mem);
310 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
311 is conservative, so trust it. */
312 if (!MEM_OFFSET_KNOWN_P (mem)
313 || !MEM_SIZE_KNOWN_P (mem))
314 return true;
316 /* If the base decl is a parameter we can have negative MEM_OFFSET in
317 case of promoted subregs on bigendian targets. Trust the MEM_EXPR
318 here. */
319 if (MEM_OFFSET (mem) < 0
320 && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
321 return true;
323 /* Otherwise continue and refine size and offset we got from analyzing
324 MEM_EXPR by using MEM_SIZE and MEM_OFFSET. */
326 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
327 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
329 /* The MEM may extend into adjacent fields, so adjust max_size if
330 necessary. */
331 if (ref->max_size != -1
332 && ref->size > ref->max_size)
333 ref->max_size = ref->size;
335 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
336 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
337 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
338 && (ref->offset < 0
339 || (DECL_P (ref->base)
340 && (!host_integerp (DECL_SIZE (ref->base), 1)
341 || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
342 < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
343 return false;
345 return true;
348 /* Query the alias-oracle on whether the two memory rtx X and MEM may
349 alias. If TBAA_P is set also apply TBAA. Returns true if the
350 two rtxen may alias, false otherwise. */
352 static bool
353 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
355 ao_ref ref1, ref2;
357 if (!ao_ref_from_mem (&ref1, x)
358 || !ao_ref_from_mem (&ref2, mem))
359 return true;
361 return refs_may_alias_p_1 (&ref1, &ref2,
362 tbaa_p
363 && MEM_ALIAS_SET (x) != 0
364 && MEM_ALIAS_SET (mem) != 0);
367 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
368 such an entry, or NULL otherwise. */
370 static inline alias_set_entry
371 get_alias_set_entry (alias_set_type alias_set)
373 return (*alias_sets)[alias_set];
376 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
377 the two MEMs cannot alias each other. */
379 static inline int
380 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
382 /* Perform a basic sanity check. Namely, that there are no alias sets
383 if we're not using strict aliasing. This helps to catch bugs
384 whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
385 where a MEM is allocated in some way other than by the use of
386 gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
387 use alias sets to indicate that spilled registers cannot alias each
388 other, we might need to remove this check. */
389 gcc_assert (flag_strict_aliasing
390 || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
392 return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
395 /* Insert the NODE into the splay tree given by DATA. Used by
396 record_alias_subset via splay_tree_foreach. */
398 static int
399 insert_subset_children (splay_tree_node node, void *data)
401 splay_tree_insert ((splay_tree) data, node->key, node->value);
403 return 0;
406 /* Return true if the first alias set is a subset of the second. */
408 bool
409 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
411 alias_set_entry ase;
413 /* Everything is a subset of the "aliases everything" set. */
414 if (set2 == 0)
415 return true;
417 /* Otherwise, check if set1 is a subset of set2. */
418 ase = get_alias_set_entry (set2);
419 if (ase != 0
420 && (ase->has_zero_child
421 || splay_tree_lookup (ase->children,
422 (splay_tree_key) set1)))
423 return true;
424 return false;
427 /* Return 1 if the two specified alias sets may conflict. */
430 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
432 alias_set_entry ase;
434 /* The easy case. */
435 if (alias_sets_must_conflict_p (set1, set2))
436 return 1;
438 /* See if the first alias set is a subset of the second. */
439 ase = get_alias_set_entry (set1);
440 if (ase != 0
441 && (ase->has_zero_child
442 || splay_tree_lookup (ase->children,
443 (splay_tree_key) set2)))
444 return 1;
446 /* Now do the same, but with the alias sets reversed. */
447 ase = get_alias_set_entry (set2);
448 if (ase != 0
449 && (ase->has_zero_child
450 || splay_tree_lookup (ase->children,
451 (splay_tree_key) set1)))
452 return 1;
454 /* The two alias sets are distinct and neither one is the
455 child of the other. Therefore, they cannot conflict. */
456 return 0;
459 /* Return 1 if the two specified alias sets will always conflict. */
462 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
464 if (set1 == 0 || set2 == 0 || set1 == set2)
465 return 1;
467 return 0;
470 /* Return 1 if any MEM object of type T1 will always conflict (using the
471 dependency routines in this file) with any MEM object of type T2.
472 This is used when allocating temporary storage. If T1 and/or T2 are
473 NULL_TREE, it means we know nothing about the storage. */
476 objects_must_conflict_p (tree t1, tree t2)
478 alias_set_type set1, set2;
480 /* If neither has a type specified, we don't know if they'll conflict
481 because we may be using them to store objects of various types, for
482 example the argument and local variables areas of inlined functions. */
483 if (t1 == 0 && t2 == 0)
484 return 0;
486 /* If they are the same type, they must conflict. */
487 if (t1 == t2
488 /* Likewise if both are volatile. */
489 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
490 return 1;
492 set1 = t1 ? get_alias_set (t1) : 0;
493 set2 = t2 ? get_alias_set (t2) : 0;
495 /* We can't use alias_sets_conflict_p because we must make sure
496 that every subtype of t1 will conflict with every subtype of
497 t2 for which a pair of subobjects of these respective subtypes
498 overlaps on the stack. */
499 return alias_sets_must_conflict_p (set1, set2);
502 /* Return true if all nested component references handled by
503 get_inner_reference in T are such that we should use the alias set
504 provided by the object at the heart of T.
506 This is true for non-addressable components (which don't have their
507 own alias set), as well as components of objects in alias set zero.
508 This later point is a special case wherein we wish to override the
509 alias set used by the component, but we don't have per-FIELD_DECL
510 assignable alias sets. */
512 bool
513 component_uses_parent_alias_set (const_tree t)
515 while (1)
517 /* If we're at the end, it vacuously uses its own alias set. */
518 if (!handled_component_p (t))
519 return false;
521 switch (TREE_CODE (t))
523 case COMPONENT_REF:
524 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
525 return true;
526 break;
528 case ARRAY_REF:
529 case ARRAY_RANGE_REF:
530 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
531 return true;
532 break;
534 case REALPART_EXPR:
535 case IMAGPART_EXPR:
536 break;
538 default:
539 /* Bitfields and casts are never addressable. */
540 return true;
543 t = TREE_OPERAND (t, 0);
544 if (get_alias_set (TREE_TYPE (t)) == 0)
545 return true;
549 /* Return the alias set for the memory pointed to by T, which may be
550 either a type or an expression. Return -1 if there is nothing
551 special about dereferencing T. */
553 static alias_set_type
554 get_deref_alias_set_1 (tree t)
556 /* If we're not doing any alias analysis, just assume everything
557 aliases everything else. */
558 if (!flag_strict_aliasing)
559 return 0;
561 /* All we care about is the type. */
562 if (! TYPE_P (t))
563 t = TREE_TYPE (t);
565 /* If we have an INDIRECT_REF via a void pointer, we don't
566 know anything about what that might alias. Likewise if the
567 pointer is marked that way. */
568 if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
569 || TYPE_REF_CAN_ALIAS_ALL (t))
570 return 0;
572 return -1;
575 /* Return the alias set for the memory pointed to by T, which may be
576 either a type or an expression. */
578 alias_set_type
579 get_deref_alias_set (tree t)
581 alias_set_type set = get_deref_alias_set_1 (t);
583 /* Fall back to the alias-set of the pointed-to type. */
584 if (set == -1)
586 if (! TYPE_P (t))
587 t = TREE_TYPE (t);
588 set = get_alias_set (TREE_TYPE (t));
591 return set;
594 /* Return the alias set for T, which may be either a type or an
595 expression. Call language-specific routine for help, if needed. */
597 alias_set_type
598 get_alias_set (tree t)
600 alias_set_type set;
602 /* If we're not doing any alias analysis, just assume everything
603 aliases everything else. Also return 0 if this or its type is
604 an error. */
605 if (! flag_strict_aliasing || t == error_mark_node
606 || (! TYPE_P (t)
607 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
608 return 0;
610 /* We can be passed either an expression or a type. This and the
611 language-specific routine may make mutually-recursive calls to each other
612 to figure out what to do. At each juncture, we see if this is a tree
613 that the language may need to handle specially. First handle things that
614 aren't types. */
615 if (! TYPE_P (t))
617 tree inner;
619 /* Give the language a chance to do something with this tree
620 before we look at it. */
621 STRIP_NOPS (t);
622 set = lang_hooks.get_alias_set (t);
623 if (set != -1)
624 return set;
626 /* Get the base object of the reference. */
627 inner = t;
628 while (handled_component_p (inner))
630 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
631 the type of any component references that wrap it to
632 determine the alias-set. */
633 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
634 t = TREE_OPERAND (inner, 0);
635 inner = TREE_OPERAND (inner, 0);
638 /* Handle pointer dereferences here, they can override the
639 alias-set. */
640 if (INDIRECT_REF_P (inner))
642 set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
643 if (set != -1)
644 return set;
646 else if (TREE_CODE (inner) == TARGET_MEM_REF)
647 return get_deref_alias_set (TMR_OFFSET (inner));
648 else if (TREE_CODE (inner) == MEM_REF)
650 set = get_deref_alias_set_1 (TREE_OPERAND (inner, 1));
651 if (set != -1)
652 return set;
655 /* If the innermost reference is a MEM_REF that has a
656 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
657 using the memory access type for determining the alias-set. */
658 if (TREE_CODE (inner) == MEM_REF
659 && TYPE_MAIN_VARIANT (TREE_TYPE (inner))
660 != TYPE_MAIN_VARIANT
661 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))
662 return get_deref_alias_set (TREE_OPERAND (inner, 1));
664 /* Otherwise, pick up the outermost object that we could have a pointer
665 to, processing conversions as above. */
666 while (component_uses_parent_alias_set (t))
668 t = TREE_OPERAND (t, 0);
669 STRIP_NOPS (t);
672 /* If we've already determined the alias set for a decl, just return
673 it. This is necessary for C++ anonymous unions, whose component
674 variables don't look like union members (boo!). */
675 if (TREE_CODE (t) == VAR_DECL
676 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
677 return MEM_ALIAS_SET (DECL_RTL (t));
679 /* Now all we care about is the type. */
680 t = TREE_TYPE (t);
683 /* Variant qualifiers don't affect the alias set, so get the main
684 variant. */
685 t = TYPE_MAIN_VARIANT (t);
687 /* Always use the canonical type as well. If this is a type that
688 requires structural comparisons to identify compatible types
689 use alias set zero. */
690 if (TYPE_STRUCTURAL_EQUALITY_P (t))
692 /* Allow the language to specify another alias set for this
693 type. */
694 set = lang_hooks.get_alias_set (t);
695 if (set != -1)
696 return set;
697 return 0;
700 t = TYPE_CANONICAL (t);
702 /* The canonical type should not require structural equality checks. */
703 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
705 /* If this is a type with a known alias set, return it. */
706 if (TYPE_ALIAS_SET_KNOWN_P (t))
707 return TYPE_ALIAS_SET (t);
709 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
710 if (!COMPLETE_TYPE_P (t))
712 /* For arrays with unknown size the conservative answer is the
713 alias set of the element type. */
714 if (TREE_CODE (t) == ARRAY_TYPE)
715 return get_alias_set (TREE_TYPE (t));
717 /* But return zero as a conservative answer for incomplete types. */
718 return 0;
721 /* See if the language has special handling for this type. */
722 set = lang_hooks.get_alias_set (t);
723 if (set != -1)
724 return set;
726 /* There are no objects of FUNCTION_TYPE, so there's no point in
727 using up an alias set for them. (There are, of course, pointers
728 and references to functions, but that's different.) */
729 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
730 set = 0;
732 /* Unless the language specifies otherwise, let vector types alias
733 their components. This avoids some nasty type punning issues in
734 normal usage. And indeed lets vectors be treated more like an
735 array slice. */
736 else if (TREE_CODE (t) == VECTOR_TYPE)
737 set = get_alias_set (TREE_TYPE (t));
739 /* Unless the language specifies otherwise, treat array types the
740 same as their components. This avoids the asymmetry we get
741 through recording the components. Consider accessing a
742 character(kind=1) through a reference to a character(kind=1)[1:1].
743 Or consider if we want to assign integer(kind=4)[0:D.1387] and
744 integer(kind=4)[4] the same alias set or not.
745 Just be pragmatic here and make sure the array and its element
746 type get the same alias set assigned. */
747 else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
748 set = get_alias_set (TREE_TYPE (t));
750 /* From the former common C and C++ langhook implementation:
752 Unfortunately, there is no canonical form of a pointer type.
753 In particular, if we have `typedef int I', then `int *', and
754 `I *' are different types. So, we have to pick a canonical
755 representative. We do this below.
757 Technically, this approach is actually more conservative that
758 it needs to be. In particular, `const int *' and `int *'
759 should be in different alias sets, according to the C and C++
760 standard, since their types are not the same, and so,
761 technically, an `int **' and `const int **' cannot point at
762 the same thing.
764 But, the standard is wrong. In particular, this code is
765 legal C++:
767 int *ip;
768 int **ipp = &ip;
769 const int* const* cipp = ipp;
770 And, it doesn't make sense for that to be legal unless you
771 can dereference IPP and CIPP. So, we ignore cv-qualifiers on
772 the pointed-to types. This issue has been reported to the
773 C++ committee.
775 In addition to the above canonicalization issue, with LTO
776 we should also canonicalize `T (*)[]' to `T *' avoiding
777 alias issues with pointer-to element types and pointer-to
778 array types.
780 Likewise we need to deal with the situation of incomplete
781 pointed-to types and make `*(struct X **)&a' and
782 `*(struct X {} **)&a' alias. Otherwise we will have to
783 guarantee that all pointer-to incomplete type variants
784 will be replaced by pointer-to complete type variants if
785 they are available.
787 With LTO the convenient situation of using `void *' to
788 access and store any pointer type will also become
789 more apparent (and `void *' is just another pointer-to
790 incomplete type). Assigning alias-set zero to `void *'
791 and all pointer-to incomplete types is a not appealing
792 solution. Assigning an effective alias-set zero only
793 affecting pointers might be - by recording proper subset
794 relationships of all pointer alias-sets.
796 Pointer-to function types are another grey area which
797 needs caution. Globbing them all into one alias-set
798 or the above effective zero set would work.
800 For now just assign the same alias-set to all pointers.
801 That's simple and avoids all the above problems. */
802 else if (POINTER_TYPE_P (t)
803 && t != ptr_type_node)
804 set = get_alias_set (ptr_type_node);
806 /* Otherwise make a new alias set for this type. */
807 else
809 /* Each canonical type gets its own alias set, so canonical types
810 shouldn't form a tree. It doesn't really matter for types
811 we handle specially above, so only check it where it possibly
812 would result in a bogus alias set. */
813 gcc_checking_assert (TYPE_CANONICAL (t) == t);
815 set = new_alias_set ();
818 TYPE_ALIAS_SET (t) = set;
820 /* If this is an aggregate type or a complex type, we must record any
821 component aliasing information. */
822 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
823 record_component_aliases (t);
825 return set;
828 /* Return a brand-new alias set. */
830 alias_set_type
831 new_alias_set (void)
833 if (flag_strict_aliasing)
835 if (alias_sets == 0)
836 vec_safe_push (alias_sets, (alias_set_entry) 0);
837 vec_safe_push (alias_sets, (alias_set_entry) 0);
838 return alias_sets->length () - 1;
840 else
841 return 0;
844 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
845 not everything that aliases SUPERSET also aliases SUBSET. For example,
846 in C, a store to an `int' can alias a load of a structure containing an
847 `int', and vice versa. But it can't alias a load of a 'double' member
848 of the same structure. Here, the structure would be the SUPERSET and
849 `int' the SUBSET. This relationship is also described in the comment at
850 the beginning of this file.
852 This function should be called only once per SUPERSET/SUBSET pair.
854 It is illegal for SUPERSET to be zero; everything is implicitly a
855 subset of alias set zero. */
857 void
858 record_alias_subset (alias_set_type superset, alias_set_type subset)
860 alias_set_entry superset_entry;
861 alias_set_entry subset_entry;
863 /* It is possible in complex type situations for both sets to be the same,
864 in which case we can ignore this operation. */
865 if (superset == subset)
866 return;
868 gcc_assert (superset);
870 superset_entry = get_alias_set_entry (superset);
871 if (superset_entry == 0)
873 /* Create an entry for the SUPERSET, so that we have a place to
874 attach the SUBSET. */
875 superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
876 superset_entry->alias_set = superset;
877 superset_entry->children
878 = splay_tree_new_ggc (splay_tree_compare_ints,
879 ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
880 ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
881 superset_entry->has_zero_child = 0;
882 (*alias_sets)[superset] = superset_entry;
885 if (subset == 0)
886 superset_entry->has_zero_child = 1;
887 else
889 subset_entry = get_alias_set_entry (subset);
890 /* If there is an entry for the subset, enter all of its children
891 (if they are not already present) as children of the SUPERSET. */
892 if (subset_entry)
894 if (subset_entry->has_zero_child)
895 superset_entry->has_zero_child = 1;
897 splay_tree_foreach (subset_entry->children, insert_subset_children,
898 superset_entry->children);
901 /* Enter the SUBSET itself as a child of the SUPERSET. */
902 splay_tree_insert (superset_entry->children,
903 (splay_tree_key) subset, 0);
907 /* Record that component types of TYPE, if any, are part of that type for
908 aliasing purposes. For record types, we only record component types
909 for fields that are not marked non-addressable. For array types, we
910 only record the component type if it is not marked non-aliased. */
912 void
913 record_component_aliases (tree type)
915 alias_set_type superset = get_alias_set (type);
916 tree field;
918 if (superset == 0)
919 return;
921 switch (TREE_CODE (type))
923 case RECORD_TYPE:
924 case UNION_TYPE:
925 case QUAL_UNION_TYPE:
926 /* Recursively record aliases for the base classes, if there are any. */
927 if (TYPE_BINFO (type))
929 int i;
930 tree binfo, base_binfo;
932 for (binfo = TYPE_BINFO (type), i = 0;
933 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
934 record_alias_subset (superset,
935 get_alias_set (BINFO_TYPE (base_binfo)));
937 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
938 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
939 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
940 break;
942 case COMPLEX_TYPE:
943 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
944 break;
946 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
947 element type. */
949 default:
950 break;
954 /* Allocate an alias set for use in storing and reading from the varargs
955 spill area. */
957 static GTY(()) alias_set_type varargs_set = -1;
959 alias_set_type
960 get_varargs_alias_set (void)
962 #if 1
963 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
964 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
965 consistently use the varargs alias set for loads from the varargs
966 area. So don't use it anywhere. */
967 return 0;
968 #else
969 if (varargs_set == -1)
970 varargs_set = new_alias_set ();
972 return varargs_set;
973 #endif
976 /* Likewise, but used for the fixed portions of the frame, e.g., register
977 save areas. */
979 static GTY(()) alias_set_type frame_set = -1;
981 alias_set_type
982 get_frame_alias_set (void)
984 if (frame_set == -1)
985 frame_set = new_alias_set ();
987 return frame_set;
990 /* Create a new, unique base with id ID. */
992 static rtx
993 unique_base_value (HOST_WIDE_INT id)
995 return gen_rtx_ADDRESS (Pmode, id);
998 /* Return true if accesses based on any other base value cannot alias
999 those based on X. */
1001 static bool
1002 unique_base_value_p (rtx x)
1004 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1007 /* Return true if X is known to be a base value. */
1009 static bool
1010 known_base_value_p (rtx x)
1012 switch (GET_CODE (x))
1014 case LABEL_REF:
1015 case SYMBOL_REF:
1016 return true;
1018 case ADDRESS:
1019 /* Arguments may or may not be bases; we don't know for sure. */
1020 return GET_MODE (x) != VOIDmode;
1022 default:
1023 return false;
1027 /* Inside SRC, the source of a SET, find a base address. */
1029 static rtx
1030 find_base_value (rtx src)
1032 unsigned int regno;
1034 #if defined (FIND_BASE_TERM)
1035 /* Try machine-dependent ways to find the base term. */
1036 src = FIND_BASE_TERM (src);
1037 #endif
1039 switch (GET_CODE (src))
1041 case SYMBOL_REF:
1042 case LABEL_REF:
1043 return src;
1045 case REG:
1046 regno = REGNO (src);
1047 /* At the start of a function, argument registers have known base
1048 values which may be lost later. Returning an ADDRESS
1049 expression here allows optimization based on argument values
1050 even when the argument registers are used for other purposes. */
1051 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1052 return new_reg_base_value[regno];
1054 /* If a pseudo has a known base value, return it. Do not do this
1055 for non-fixed hard regs since it can result in a circular
1056 dependency chain for registers which have values at function entry.
1058 The test above is not sufficient because the scheduler may move
1059 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
1060 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1061 && regno < vec_safe_length (reg_base_value))
1063 /* If we're inside init_alias_analysis, use new_reg_base_value
1064 to reduce the number of relaxation iterations. */
1065 if (new_reg_base_value && new_reg_base_value[regno]
1066 && DF_REG_DEF_COUNT (regno) == 1)
1067 return new_reg_base_value[regno];
1069 if ((*reg_base_value)[regno])
1070 return (*reg_base_value)[regno];
1073 return 0;
1075 case MEM:
1076 /* Check for an argument passed in memory. Only record in the
1077 copying-arguments block; it is too hard to track changes
1078 otherwise. */
1079 if (copying_arguments
1080 && (XEXP (src, 0) == arg_pointer_rtx
1081 || (GET_CODE (XEXP (src, 0)) == PLUS
1082 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1083 return arg_base_value;
1084 return 0;
1086 case CONST:
1087 src = XEXP (src, 0);
1088 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1089 break;
1091 /* ... fall through ... */
1093 case PLUS:
1094 case MINUS:
1096 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1098 /* If either operand is a REG that is a known pointer, then it
1099 is the base. */
1100 if (REG_P (src_0) && REG_POINTER (src_0))
1101 return find_base_value (src_0);
1102 if (REG_P (src_1) && REG_POINTER (src_1))
1103 return find_base_value (src_1);
1105 /* If either operand is a REG, then see if we already have
1106 a known value for it. */
1107 if (REG_P (src_0))
1109 temp = find_base_value (src_0);
1110 if (temp != 0)
1111 src_0 = temp;
1114 if (REG_P (src_1))
1116 temp = find_base_value (src_1);
1117 if (temp!= 0)
1118 src_1 = temp;
1121 /* If either base is named object or a special address
1122 (like an argument or stack reference), then use it for the
1123 base term. */
1124 if (src_0 != 0 && known_base_value_p (src_0))
1125 return src_0;
1127 if (src_1 != 0 && known_base_value_p (src_1))
1128 return src_1;
1130 /* Guess which operand is the base address:
1131 If either operand is a symbol, then it is the base. If
1132 either operand is a CONST_INT, then the other is the base. */
1133 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1134 return find_base_value (src_0);
1135 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1136 return find_base_value (src_1);
1138 return 0;
1141 case LO_SUM:
1142 /* The standard form is (lo_sum reg sym) so look only at the
1143 second operand. */
1144 return find_base_value (XEXP (src, 1));
1146 case AND:
1147 /* If the second operand is constant set the base
1148 address to the first operand. */
1149 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1150 return find_base_value (XEXP (src, 0));
1151 return 0;
1153 case TRUNCATE:
1154 /* As we do not know which address space the pointer is referring to, we can
1155 handle this only if the target does not support different pointer or
1156 address modes depending on the address space. */
1157 if (!target_default_pointer_address_modes_p ())
1158 break;
1159 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1160 break;
1161 /* Fall through. */
1162 case HIGH:
1163 case PRE_INC:
1164 case PRE_DEC:
1165 case POST_INC:
1166 case POST_DEC:
1167 case PRE_MODIFY:
1168 case POST_MODIFY:
1169 return find_base_value (XEXP (src, 0));
1171 case ZERO_EXTEND:
1172 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1173 /* As we do not know which address space the pointer is referring to, we can
1174 handle this only if the target does not support different pointer or
1175 address modes depending on the address space. */
1176 if (!target_default_pointer_address_modes_p ())
1177 break;
1180 rtx temp = find_base_value (XEXP (src, 0));
1182 if (temp != 0 && CONSTANT_P (temp))
1183 temp = convert_memory_address (Pmode, temp);
1185 return temp;
1188 default:
1189 break;
1192 return 0;
1195 /* Called from init_alias_analysis indirectly through note_stores,
1196 or directly if DEST is a register with a REG_NOALIAS note attached.
1197 SET is null in the latter case. */
1199 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1200 register N has been set in this function. */
1201 static sbitmap reg_seen;
1203 static void
1204 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1206 unsigned regno;
1207 rtx src;
1208 int n;
1210 if (!REG_P (dest))
1211 return;
1213 regno = REGNO (dest);
1215 gcc_checking_assert (regno < reg_base_value->length ());
1217 /* If this spans multiple hard registers, then we must indicate that every
1218 register has an unusable value. */
1219 if (regno < FIRST_PSEUDO_REGISTER)
1220 n = hard_regno_nregs[regno][GET_MODE (dest)];
1221 else
1222 n = 1;
1223 if (n != 1)
1225 while (--n >= 0)
1227 bitmap_set_bit (reg_seen, regno + n);
1228 new_reg_base_value[regno + n] = 0;
1230 return;
1233 if (set)
1235 /* A CLOBBER wipes out any old value but does not prevent a previously
1236 unset register from acquiring a base address (i.e. reg_seen is not
1237 set). */
1238 if (GET_CODE (set) == CLOBBER)
1240 new_reg_base_value[regno] = 0;
1241 return;
1243 src = SET_SRC (set);
1245 else
1247 /* There's a REG_NOALIAS note against DEST. */
1248 if (bitmap_bit_p (reg_seen, regno))
1250 new_reg_base_value[regno] = 0;
1251 return;
1253 bitmap_set_bit (reg_seen, regno);
1254 new_reg_base_value[regno] = unique_base_value (unique_id++);
1255 return;
1258 /* If this is not the first set of REGNO, see whether the new value
1259 is related to the old one. There are two cases of interest:
1261 (1) The register might be assigned an entirely new value
1262 that has the same base term as the original set.
1264 (2) The set might be a simple self-modification that
1265 cannot change REGNO's base value.
1267 If neither case holds, reject the original base value as invalid.
1268 Note that the following situation is not detected:
1270 extern int x, y; int *p = &x; p += (&y-&x);
1272 ANSI C does not allow computing the difference of addresses
1273 of distinct top level objects. */
1274 if (new_reg_base_value[regno] != 0
1275 && find_base_value (src) != new_reg_base_value[regno])
1276 switch (GET_CODE (src))
1278 case LO_SUM:
1279 case MINUS:
1280 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1281 new_reg_base_value[regno] = 0;
1282 break;
1283 case PLUS:
1284 /* If the value we add in the PLUS is also a valid base value,
1285 this might be the actual base value, and the original value
1286 an index. */
1288 rtx other = NULL_RTX;
1290 if (XEXP (src, 0) == dest)
1291 other = XEXP (src, 1);
1292 else if (XEXP (src, 1) == dest)
1293 other = XEXP (src, 0);
1295 if (! other || find_base_value (other))
1296 new_reg_base_value[regno] = 0;
1297 break;
1299 case AND:
1300 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1301 new_reg_base_value[regno] = 0;
1302 break;
1303 default:
1304 new_reg_base_value[regno] = 0;
1305 break;
1307 /* If this is the first set of a register, record the value. */
1308 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1309 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1310 new_reg_base_value[regno] = find_base_value (src);
1312 bitmap_set_bit (reg_seen, regno);
1315 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
1316 using hard registers with non-null REG_BASE_VALUE for renaming. */
1318 get_reg_base_value (unsigned int regno)
1320 return (*reg_base_value)[regno];
1323 /* If a value is known for REGNO, return it. */
1326 get_reg_known_value (unsigned int regno)
1328 if (regno >= FIRST_PSEUDO_REGISTER)
1330 regno -= FIRST_PSEUDO_REGISTER;
1331 if (regno < vec_safe_length (reg_known_value))
1332 return (*reg_known_value)[regno];
1334 return NULL;
1337 /* Set it. */
1339 static void
1340 set_reg_known_value (unsigned int regno, rtx val)
1342 if (regno >= FIRST_PSEUDO_REGISTER)
1344 regno -= FIRST_PSEUDO_REGISTER;
1345 if (regno < vec_safe_length (reg_known_value))
1346 (*reg_known_value)[regno] = val;
1350 /* Similarly for reg_known_equiv_p. */
1352 bool
1353 get_reg_known_equiv_p (unsigned int regno)
1355 if (regno >= FIRST_PSEUDO_REGISTER)
1357 regno -= FIRST_PSEUDO_REGISTER;
1358 if (regno < vec_safe_length (reg_known_value))
1359 return bitmap_bit_p (reg_known_equiv_p, regno);
1361 return false;
1364 static void
1365 set_reg_known_equiv_p (unsigned int regno, bool val)
1367 if (regno >= FIRST_PSEUDO_REGISTER)
1369 regno -= FIRST_PSEUDO_REGISTER;
1370 if (regno < vec_safe_length (reg_known_value))
1372 if (val)
1373 bitmap_set_bit (reg_known_equiv_p, regno);
1374 else
1375 bitmap_clear_bit (reg_known_equiv_p, regno);
1381 /* Returns a canonical version of X, from the point of view alias
1382 analysis. (For example, if X is a MEM whose address is a register,
1383 and the register has a known value (say a SYMBOL_REF), then a MEM
1384 whose address is the SYMBOL_REF is returned.) */
1387 canon_rtx (rtx x)
1389 /* Recursively look for equivalences. */
1390 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1392 rtx t = get_reg_known_value (REGNO (x));
1393 if (t == x)
1394 return x;
1395 if (t)
1396 return canon_rtx (t);
1399 if (GET_CODE (x) == PLUS)
1401 rtx x0 = canon_rtx (XEXP (x, 0));
1402 rtx x1 = canon_rtx (XEXP (x, 1));
1404 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1406 if (CONST_INT_P (x0))
1407 return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1408 else if (CONST_INT_P (x1))
1409 return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1410 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1414 /* This gives us much better alias analysis when called from
1415 the loop optimizer. Note we want to leave the original
1416 MEM alone, but need to return the canonicalized MEM with
1417 all the flags with their original values. */
1418 else if (MEM_P (x))
1419 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1421 return x;
1424 /* Return 1 if X and Y are identical-looking rtx's.
1425 Expect that X and Y has been already canonicalized.
1427 We use the data in reg_known_value above to see if two registers with
1428 different numbers are, in fact, equivalent. */
1430 static int
1431 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1433 int i;
1434 int j;
1435 enum rtx_code code;
1436 const char *fmt;
1438 if (x == 0 && y == 0)
1439 return 1;
1440 if (x == 0 || y == 0)
1441 return 0;
1443 if (x == y)
1444 return 1;
1446 code = GET_CODE (x);
1447 /* Rtx's of different codes cannot be equal. */
1448 if (code != GET_CODE (y))
1449 return 0;
1451 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1452 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1454 if (GET_MODE (x) != GET_MODE (y))
1455 return 0;
1457 /* Some RTL can be compared without a recursive examination. */
1458 switch (code)
1460 case REG:
1461 return REGNO (x) == REGNO (y);
1463 case LABEL_REF:
1464 return XEXP (x, 0) == XEXP (y, 0);
1466 case SYMBOL_REF:
1467 return XSTR (x, 0) == XSTR (y, 0);
1469 case ENTRY_VALUE:
1470 /* This is magic, don't go through canonicalization et al. */
1471 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1473 case VALUE:
1474 CASE_CONST_UNIQUE:
1475 /* There's no need to compare the contents of CONST_DOUBLEs or
1476 CONST_INTs because pointer equality is a good enough
1477 comparison for these nodes. */
1478 return 0;
1480 default:
1481 break;
1484 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1485 if (code == PLUS)
1486 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1487 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1488 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1489 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1490 /* For commutative operations, the RTX match if the operand match in any
1491 order. Also handle the simple binary and unary cases without a loop. */
1492 if (COMMUTATIVE_P (x))
1494 rtx xop0 = canon_rtx (XEXP (x, 0));
1495 rtx yop0 = canon_rtx (XEXP (y, 0));
1496 rtx yop1 = canon_rtx (XEXP (y, 1));
1498 return ((rtx_equal_for_memref_p (xop0, yop0)
1499 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1500 || (rtx_equal_for_memref_p (xop0, yop1)
1501 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1503 else if (NON_COMMUTATIVE_P (x))
1505 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1506 canon_rtx (XEXP (y, 0)))
1507 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1508 canon_rtx (XEXP (y, 1))));
1510 else if (UNARY_P (x))
1511 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1512 canon_rtx (XEXP (y, 0)));
1514 /* Compare the elements. If any pair of corresponding elements
1515 fail to match, return 0 for the whole things.
1517 Limit cases to types which actually appear in addresses. */
1519 fmt = GET_RTX_FORMAT (code);
1520 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1522 switch (fmt[i])
1524 case 'i':
1525 if (XINT (x, i) != XINT (y, i))
1526 return 0;
1527 break;
1529 case 'E':
1530 /* Two vectors must have the same length. */
1531 if (XVECLEN (x, i) != XVECLEN (y, i))
1532 return 0;
1534 /* And the corresponding elements must match. */
1535 for (j = 0; j < XVECLEN (x, i); j++)
1536 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1537 canon_rtx (XVECEXP (y, i, j))) == 0)
1538 return 0;
1539 break;
1541 case 'e':
1542 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1543 canon_rtx (XEXP (y, i))) == 0)
1544 return 0;
1545 break;
1547 /* This can happen for asm operands. */
1548 case 's':
1549 if (strcmp (XSTR (x, i), XSTR (y, i)))
1550 return 0;
1551 break;
1553 /* This can happen for an asm which clobbers memory. */
1554 case '0':
1555 break;
1557 /* It is believed that rtx's at this level will never
1558 contain anything but integers and other rtx's,
1559 except for within LABEL_REFs and SYMBOL_REFs. */
1560 default:
1561 gcc_unreachable ();
1564 return 1;
1567 static rtx
1568 find_base_term (rtx x)
1570 cselib_val *val;
1571 struct elt_loc_list *l, *f;
1572 rtx ret;
1574 #if defined (FIND_BASE_TERM)
1575 /* Try machine-dependent ways to find the base term. */
1576 x = FIND_BASE_TERM (x);
1577 #endif
1579 switch (GET_CODE (x))
1581 case REG:
1582 return REG_BASE_VALUE (x);
1584 case TRUNCATE:
1585 /* As we do not know which address space the pointer is referring to, we can
1586 handle this only if the target does not support different pointer or
1587 address modes depending on the address space. */
1588 if (!target_default_pointer_address_modes_p ())
1589 return 0;
1590 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1591 return 0;
1592 /* Fall through. */
1593 case HIGH:
1594 case PRE_INC:
1595 case PRE_DEC:
1596 case POST_INC:
1597 case POST_DEC:
1598 case PRE_MODIFY:
1599 case POST_MODIFY:
1600 return find_base_term (XEXP (x, 0));
1602 case ZERO_EXTEND:
1603 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1604 /* As we do not know which address space the pointer is referring to, we can
1605 handle this only if the target does not support different pointer or
1606 address modes depending on the address space. */
1607 if (!target_default_pointer_address_modes_p ())
1608 return 0;
1611 rtx temp = find_base_term (XEXP (x, 0));
1613 if (temp != 0 && CONSTANT_P (temp))
1614 temp = convert_memory_address (Pmode, temp);
1616 return temp;
1619 case VALUE:
1620 val = CSELIB_VAL_PTR (x);
1621 ret = NULL_RTX;
1623 if (!val)
1624 return ret;
1626 if (cselib_sp_based_value_p (val))
1627 return static_reg_base_value[STACK_POINTER_REGNUM];
1629 f = val->locs;
1630 /* Temporarily reset val->locs to avoid infinite recursion. */
1631 val->locs = NULL;
1633 for (l = f; l; l = l->next)
1634 if (GET_CODE (l->loc) == VALUE
1635 && CSELIB_VAL_PTR (l->loc)->locs
1636 && !CSELIB_VAL_PTR (l->loc)->locs->next
1637 && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1638 continue;
1639 else if ((ret = find_base_term (l->loc)) != 0)
1640 break;
1642 val->locs = f;
1643 return ret;
1645 case LO_SUM:
1646 /* The standard form is (lo_sum reg sym) so look only at the
1647 second operand. */
1648 return find_base_term (XEXP (x, 1));
1650 case CONST:
1651 x = XEXP (x, 0);
1652 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1653 return 0;
1654 /* Fall through. */
1655 case PLUS:
1656 case MINUS:
1658 rtx tmp1 = XEXP (x, 0);
1659 rtx tmp2 = XEXP (x, 1);
1661 /* This is a little bit tricky since we have to determine which of
1662 the two operands represents the real base address. Otherwise this
1663 routine may return the index register instead of the base register.
1665 That may cause us to believe no aliasing was possible, when in
1666 fact aliasing is possible.
1668 We use a few simple tests to guess the base register. Additional
1669 tests can certainly be added. For example, if one of the operands
1670 is a shift or multiply, then it must be the index register and the
1671 other operand is the base register. */
1673 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1674 return find_base_term (tmp2);
1676 /* If either operand is known to be a pointer, then prefer it
1677 to determine the base term. */
1678 if (REG_P (tmp1) && REG_POINTER (tmp1))
1680 else if (REG_P (tmp2) && REG_POINTER (tmp2))
1682 rtx tem = tmp1;
1683 tmp1 = tmp2;
1684 tmp2 = tem;
1687 /* Go ahead and find the base term for both operands. If either base
1688 term is from a pointer or is a named object or a special address
1689 (like an argument or stack reference), then use it for the
1690 base term. */
1691 rtx base = find_base_term (tmp1);
1692 if (base != NULL_RTX
1693 && ((REG_P (tmp1) && REG_POINTER (tmp1))
1694 || known_base_value_p (base)))
1695 return base;
1696 base = find_base_term (tmp2);
1697 if (base != NULL_RTX
1698 && ((REG_P (tmp2) && REG_POINTER (tmp2))
1699 || known_base_value_p (base)))
1700 return base;
1702 /* We could not determine which of the two operands was the
1703 base register and which was the index. So we can determine
1704 nothing from the base alias check. */
1705 return 0;
1708 case AND:
1709 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1710 return find_base_term (XEXP (x, 0));
1711 return 0;
1713 case SYMBOL_REF:
1714 case LABEL_REF:
1715 return x;
1717 default:
1718 return 0;
1722 /* Return true if accesses to address X may alias accesses based
1723 on the stack pointer. */
1725 bool
1726 may_be_sp_based_p (rtx x)
1728 rtx base = find_base_term (x);
1729 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1732 /* Return 0 if the addresses X and Y are known to point to different
1733 objects, 1 if they might be pointers to the same object. */
1735 static int
1736 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1737 enum machine_mode x_mode, enum machine_mode y_mode)
1739 /* If the address itself has no known base see if a known equivalent
1740 value has one. If either address still has no known base, nothing
1741 is known about aliasing. */
1742 if (x_base == 0)
1744 rtx x_c;
1746 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1747 return 1;
1749 x_base = find_base_term (x_c);
1750 if (x_base == 0)
1751 return 1;
1754 if (y_base == 0)
1756 rtx y_c;
1757 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1758 return 1;
1760 y_base = find_base_term (y_c);
1761 if (y_base == 0)
1762 return 1;
1765 /* If the base addresses are equal nothing is known about aliasing. */
1766 if (rtx_equal_p (x_base, y_base))
1767 return 1;
1769 /* The base addresses are different expressions. If they are not accessed
1770 via AND, there is no conflict. We can bring knowledge of object
1771 alignment into play here. For example, on alpha, "char a, b;" can
1772 alias one another, though "char a; long b;" cannot. AND addesses may
1773 implicitly alias surrounding objects; i.e. unaligned access in DImode
1774 via AND address can alias all surrounding object types except those
1775 with aligment 8 or higher. */
1776 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1777 return 1;
1778 if (GET_CODE (x) == AND
1779 && (!CONST_INT_P (XEXP (x, 1))
1780 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1781 return 1;
1782 if (GET_CODE (y) == AND
1783 && (!CONST_INT_P (XEXP (y, 1))
1784 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1785 return 1;
1787 /* Differing symbols not accessed via AND never alias. */
1788 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1789 return 0;
1791 if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1792 return 0;
1794 return 1;
1797 /* Callback for for_each_rtx, that returns 1 upon encountering a VALUE
1798 whose UID is greater than the int uid that D points to. */
1800 static int
1801 refs_newer_value_cb (rtx *x, void *d)
1803 if (GET_CODE (*x) == VALUE && CSELIB_VAL_PTR (*x)->uid > *(int *)d)
1804 return 1;
1806 return 0;
1809 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1810 that of V. */
1812 static bool
1813 refs_newer_value_p (rtx expr, rtx v)
1815 int minuid = CSELIB_VAL_PTR (v)->uid;
1817 return for_each_rtx (&expr, refs_newer_value_cb, &minuid);
1820 /* Convert the address X into something we can use. This is done by returning
1821 it unchanged unless it is a value; in the latter case we call cselib to get
1822 a more useful rtx. */
1825 get_addr (rtx x)
1827 cselib_val *v;
1828 struct elt_loc_list *l;
1830 if (GET_CODE (x) != VALUE)
1831 return x;
1832 v = CSELIB_VAL_PTR (x);
1833 if (v)
1835 bool have_equivs = cselib_have_permanent_equivalences ();
1836 if (have_equivs)
1837 v = canonical_cselib_val (v);
1838 for (l = v->locs; l; l = l->next)
1839 if (CONSTANT_P (l->loc))
1840 return l->loc;
1841 for (l = v->locs; l; l = l->next)
1842 if (!REG_P (l->loc) && !MEM_P (l->loc)
1843 /* Avoid infinite recursion when potentially dealing with
1844 var-tracking artificial equivalences, by skipping the
1845 equivalences themselves, and not choosing expressions
1846 that refer to newer VALUEs. */
1847 && (!have_equivs
1848 || (GET_CODE (l->loc) != VALUE
1849 && !refs_newer_value_p (l->loc, x))))
1850 return l->loc;
1851 if (have_equivs)
1853 for (l = v->locs; l; l = l->next)
1854 if (REG_P (l->loc)
1855 || (GET_CODE (l->loc) != VALUE
1856 && !refs_newer_value_p (l->loc, x)))
1857 return l->loc;
1858 /* Return the canonical value. */
1859 return v->val_rtx;
1861 if (v->locs)
1862 return v->locs->loc;
1864 return x;
1867 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1868 where SIZE is the size in bytes of the memory reference. If ADDR
1869 is not modified by the memory reference then ADDR is returned. */
1871 static rtx
1872 addr_side_effect_eval (rtx addr, int size, int n_refs)
1874 int offset = 0;
1876 switch (GET_CODE (addr))
1878 case PRE_INC:
1879 offset = (n_refs + 1) * size;
1880 break;
1881 case PRE_DEC:
1882 offset = -(n_refs + 1) * size;
1883 break;
1884 case POST_INC:
1885 offset = n_refs * size;
1886 break;
1887 case POST_DEC:
1888 offset = -n_refs * size;
1889 break;
1891 default:
1892 return addr;
1895 if (offset)
1896 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1897 GEN_INT (offset));
1898 else
1899 addr = XEXP (addr, 0);
1900 addr = canon_rtx (addr);
1902 return addr;
1905 /* Return TRUE if an object X sized at XSIZE bytes and another object
1906 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If
1907 any of the sizes is zero, assume an overlap, otherwise use the
1908 absolute value of the sizes as the actual sizes. */
1910 static inline bool
1911 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
1913 return (xsize == 0 || ysize == 0
1914 || (c >= 0
1915 ? (abs (xsize) > c)
1916 : (abs (ysize) > -c)));
1919 /* Return one if X and Y (memory addresses) reference the
1920 same location in memory or if the references overlap.
1921 Return zero if they do not overlap, else return
1922 minus one in which case they still might reference the same location.
1924 C is an offset accumulator. When
1925 C is nonzero, we are testing aliases between X and Y + C.
1926 XSIZE is the size in bytes of the X reference,
1927 similarly YSIZE is the size in bytes for Y.
1928 Expect that canon_rtx has been already called for X and Y.
1930 If XSIZE or YSIZE is zero, we do not know the amount of memory being
1931 referenced (the reference was BLKmode), so make the most pessimistic
1932 assumptions.
1934 If XSIZE or YSIZE is negative, we may access memory outside the object
1935 being referenced as a side effect. This can happen when using AND to
1936 align memory references, as is done on the Alpha.
1938 Nice to notice that varying addresses cannot conflict with fp if no
1939 local variables had their addresses taken, but that's too hard now.
1941 ??? Contrary to the tree alias oracle this does not return
1942 one for X + non-constant and Y + non-constant when X and Y are equal.
1943 If that is fixed the TBAA hack for union type-punning can be removed. */
1945 static int
1946 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1948 if (GET_CODE (x) == VALUE)
1950 if (REG_P (y))
1952 struct elt_loc_list *l = NULL;
1953 if (CSELIB_VAL_PTR (x))
1954 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
1955 l; l = l->next)
1956 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
1957 break;
1958 if (l)
1959 x = y;
1960 else
1961 x = get_addr (x);
1963 /* Don't call get_addr if y is the same VALUE. */
1964 else if (x != y)
1965 x = get_addr (x);
1967 if (GET_CODE (y) == VALUE)
1969 if (REG_P (x))
1971 struct elt_loc_list *l = NULL;
1972 if (CSELIB_VAL_PTR (y))
1973 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
1974 l; l = l->next)
1975 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
1976 break;
1977 if (l)
1978 y = x;
1979 else
1980 y = get_addr (y);
1982 /* Don't call get_addr if x is the same VALUE. */
1983 else if (y != x)
1984 y = get_addr (y);
1986 if (GET_CODE (x) == HIGH)
1987 x = XEXP (x, 0);
1988 else if (GET_CODE (x) == LO_SUM)
1989 x = XEXP (x, 1);
1990 else
1991 x = addr_side_effect_eval (x, abs (xsize), 0);
1992 if (GET_CODE (y) == HIGH)
1993 y = XEXP (y, 0);
1994 else if (GET_CODE (y) == LO_SUM)
1995 y = XEXP (y, 1);
1996 else
1997 y = addr_side_effect_eval (y, abs (ysize), 0);
1999 if (rtx_equal_for_memref_p (x, y))
2001 return offset_overlap_p (c, xsize, ysize);
2004 /* This code used to check for conflicts involving stack references and
2005 globals but the base address alias code now handles these cases. */
2007 if (GET_CODE (x) == PLUS)
2009 /* The fact that X is canonicalized means that this
2010 PLUS rtx is canonicalized. */
2011 rtx x0 = XEXP (x, 0);
2012 rtx x1 = XEXP (x, 1);
2014 if (GET_CODE (y) == PLUS)
2016 /* The fact that Y is canonicalized means that this
2017 PLUS rtx is canonicalized. */
2018 rtx y0 = XEXP (y, 0);
2019 rtx y1 = XEXP (y, 1);
2021 if (rtx_equal_for_memref_p (x1, y1))
2022 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2023 if (rtx_equal_for_memref_p (x0, y0))
2024 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2025 if (CONST_INT_P (x1))
2027 if (CONST_INT_P (y1))
2028 return memrefs_conflict_p (xsize, x0, ysize, y0,
2029 c - INTVAL (x1) + INTVAL (y1));
2030 else
2031 return memrefs_conflict_p (xsize, x0, ysize, y,
2032 c - INTVAL (x1));
2034 else if (CONST_INT_P (y1))
2035 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2037 return -1;
2039 else if (CONST_INT_P (x1))
2040 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2042 else if (GET_CODE (y) == PLUS)
2044 /* The fact that Y is canonicalized means that this
2045 PLUS rtx is canonicalized. */
2046 rtx y0 = XEXP (y, 0);
2047 rtx y1 = XEXP (y, 1);
2049 if (CONST_INT_P (y1))
2050 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2051 else
2052 return -1;
2055 if (GET_CODE (x) == GET_CODE (y))
2056 switch (GET_CODE (x))
2058 case MULT:
2060 /* Handle cases where we expect the second operands to be the
2061 same, and check only whether the first operand would conflict
2062 or not. */
2063 rtx x0, y0;
2064 rtx x1 = canon_rtx (XEXP (x, 1));
2065 rtx y1 = canon_rtx (XEXP (y, 1));
2066 if (! rtx_equal_for_memref_p (x1, y1))
2067 return -1;
2068 x0 = canon_rtx (XEXP (x, 0));
2069 y0 = canon_rtx (XEXP (y, 0));
2070 if (rtx_equal_for_memref_p (x0, y0))
2071 return offset_overlap_p (c, xsize, ysize);
2073 /* Can't properly adjust our sizes. */
2074 if (!CONST_INT_P (x1))
2075 return -1;
2076 xsize /= INTVAL (x1);
2077 ysize /= INTVAL (x1);
2078 c /= INTVAL (x1);
2079 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2082 default:
2083 break;
2086 /* Deal with alignment ANDs by adjusting offset and size so as to
2087 cover the maximum range, without taking any previously known
2088 alignment into account. Make a size negative after such an
2089 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2090 assume a potential overlap, because they may end up in contiguous
2091 memory locations and the stricter-alignment access may span over
2092 part of both. */
2093 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2095 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2096 unsigned HOST_WIDE_INT uc = sc;
2097 if (sc < 0 && -uc == (uc & -uc))
2099 if (xsize > 0)
2100 xsize = -xsize;
2101 if (xsize)
2102 xsize += sc + 1;
2103 c -= sc + 1;
2104 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2105 ysize, y, c);
2108 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2110 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2111 unsigned HOST_WIDE_INT uc = sc;
2112 if (sc < 0 && -uc == (uc & -uc))
2114 if (ysize > 0)
2115 ysize = -ysize;
2116 if (ysize)
2117 ysize += sc + 1;
2118 c += sc + 1;
2119 return memrefs_conflict_p (xsize, x,
2120 ysize, canon_rtx (XEXP (y, 0)), c);
2124 if (CONSTANT_P (x))
2126 if (CONST_INT_P (x) && CONST_INT_P (y))
2128 c += (INTVAL (y) - INTVAL (x));
2129 return offset_overlap_p (c, xsize, ysize);
2132 if (GET_CODE (x) == CONST)
2134 if (GET_CODE (y) == CONST)
2135 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2136 ysize, canon_rtx (XEXP (y, 0)), c);
2137 else
2138 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2139 ysize, y, c);
2141 if (GET_CODE (y) == CONST)
2142 return memrefs_conflict_p (xsize, x, ysize,
2143 canon_rtx (XEXP (y, 0)), c);
2145 /* Assume a potential overlap for symbolic addresses that went
2146 through alignment adjustments (i.e., that have negative
2147 sizes), because we can't know how far they are from each
2148 other. */
2149 if (CONSTANT_P (y))
2150 return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2152 return -1;
2155 return -1;
2158 /* Functions to compute memory dependencies.
2160 Since we process the insns in execution order, we can build tables
2161 to keep track of what registers are fixed (and not aliased), what registers
2162 are varying in known ways, and what registers are varying in unknown
2163 ways.
2165 If both memory references are volatile, then there must always be a
2166 dependence between the two references, since their order can not be
2167 changed. A volatile and non-volatile reference can be interchanged
2168 though.
2170 We also must allow AND addresses, because they may generate accesses
2171 outside the object being referenced. This is used to generate aligned
2172 addresses from unaligned addresses, for instance, the alpha
2173 storeqi_unaligned pattern. */
2175 /* Read dependence: X is read after read in MEM takes place. There can
2176 only be a dependence here if both reads are volatile, or if either is
2177 an explicit barrier. */
2180 read_dependence (const_rtx mem, const_rtx x)
2182 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2183 return true;
2184 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2185 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2186 return true;
2187 return false;
2190 /* Return true if we can determine that the fields referenced cannot
2191 overlap for any pair of objects. */
2193 static bool
2194 nonoverlapping_component_refs_p (const_rtx rtlx, const_rtx rtly)
2196 const_tree x = MEM_EXPR (rtlx), y = MEM_EXPR (rtly);
2197 const_tree fieldx, fieldy, typex, typey, orig_y;
2199 if (!flag_strict_aliasing
2200 || !x || !y
2201 || TREE_CODE (x) != COMPONENT_REF
2202 || TREE_CODE (y) != COMPONENT_REF)
2203 return false;
2207 /* The comparison has to be done at a common type, since we don't
2208 know how the inheritance hierarchy works. */
2209 orig_y = y;
2212 fieldx = TREE_OPERAND (x, 1);
2213 typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2215 y = orig_y;
2218 fieldy = TREE_OPERAND (y, 1);
2219 typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2221 if (typex == typey)
2222 goto found;
2224 y = TREE_OPERAND (y, 0);
2226 while (y && TREE_CODE (y) == COMPONENT_REF);
2228 x = TREE_OPERAND (x, 0);
2230 while (x && TREE_CODE (x) == COMPONENT_REF);
2231 /* Never found a common type. */
2232 return false;
2234 found:
2235 /* If we're left with accessing different fields of a structure, then no
2236 possible overlap, unless they are both bitfields. */
2237 if (TREE_CODE (typex) == RECORD_TYPE && fieldx != fieldy)
2238 return !(DECL_BIT_FIELD (fieldx) && DECL_BIT_FIELD (fieldy));
2240 /* The comparison on the current field failed. If we're accessing
2241 a very nested structure, look at the next outer level. */
2242 x = TREE_OPERAND (x, 0);
2243 y = TREE_OPERAND (y, 0);
2245 while (x && y
2246 && TREE_CODE (x) == COMPONENT_REF
2247 && TREE_CODE (y) == COMPONENT_REF);
2249 return false;
2252 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2254 static tree
2255 decl_for_component_ref (tree x)
2259 x = TREE_OPERAND (x, 0);
2261 while (x && TREE_CODE (x) == COMPONENT_REF);
2263 return x && DECL_P (x) ? x : NULL_TREE;
2266 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2267 for the offset of the field reference. *KNOWN_P says whether the
2268 offset is known. */
2270 static void
2271 adjust_offset_for_component_ref (tree x, bool *known_p,
2272 HOST_WIDE_INT *offset)
2274 if (!*known_p)
2275 return;
2278 tree xoffset = component_ref_field_offset (x);
2279 tree field = TREE_OPERAND (x, 1);
2281 if (! host_integerp (xoffset, 1))
2283 *known_p = false;
2284 return;
2286 *offset += (tree_low_cst (xoffset, 1)
2287 + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2288 / BITS_PER_UNIT));
2290 x = TREE_OPERAND (x, 0);
2292 while (x && TREE_CODE (x) == COMPONENT_REF);
2295 /* Return nonzero if we can determine the exprs corresponding to memrefs
2296 X and Y and they do not overlap.
2297 If LOOP_VARIANT is set, skip offset-based disambiguation */
2300 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2302 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2303 rtx rtlx, rtly;
2304 rtx basex, basey;
2305 bool moffsetx_known_p, moffsety_known_p;
2306 HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2307 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2309 /* Unless both have exprs, we can't tell anything. */
2310 if (exprx == 0 || expry == 0)
2311 return 0;
2313 /* For spill-slot accesses make sure we have valid offsets. */
2314 if ((exprx == get_spill_slot_decl (false)
2315 && ! MEM_OFFSET_KNOWN_P (x))
2316 || (expry == get_spill_slot_decl (false)
2317 && ! MEM_OFFSET_KNOWN_P (y)))
2318 return 0;
2320 /* If the field reference test failed, look at the DECLs involved. */
2321 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2322 if (moffsetx_known_p)
2323 moffsetx = MEM_OFFSET (x);
2324 if (TREE_CODE (exprx) == COMPONENT_REF)
2326 tree t = decl_for_component_ref (exprx);
2327 if (! t)
2328 return 0;
2329 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2330 exprx = t;
2333 moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2334 if (moffsety_known_p)
2335 moffsety = MEM_OFFSET (y);
2336 if (TREE_CODE (expry) == COMPONENT_REF)
2338 tree t = decl_for_component_ref (expry);
2339 if (! t)
2340 return 0;
2341 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2342 expry = t;
2345 if (! DECL_P (exprx) || ! DECL_P (expry))
2346 return 0;
2348 /* With invalid code we can end up storing into the constant pool.
2349 Bail out to avoid ICEing when creating RTL for this.
2350 See gfortran.dg/lto/20091028-2_0.f90. */
2351 if (TREE_CODE (exprx) == CONST_DECL
2352 || TREE_CODE (expry) == CONST_DECL)
2353 return 1;
2355 rtlx = DECL_RTL (exprx);
2356 rtly = DECL_RTL (expry);
2358 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2359 can't overlap unless they are the same because we never reuse that part
2360 of the stack frame used for locals for spilled pseudos. */
2361 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2362 && ! rtx_equal_p (rtlx, rtly))
2363 return 1;
2365 /* If we have MEMs referring to different address spaces (which can
2366 potentially overlap), we cannot easily tell from the addresses
2367 whether the references overlap. */
2368 if (MEM_P (rtlx) && MEM_P (rtly)
2369 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2370 return 0;
2372 /* Get the base and offsets of both decls. If either is a register, we
2373 know both are and are the same, so use that as the base. The only
2374 we can avoid overlap is if we can deduce that they are nonoverlapping
2375 pieces of that decl, which is very rare. */
2376 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2377 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2378 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2380 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2381 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2382 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2384 /* If the bases are different, we know they do not overlap if both
2385 are constants or if one is a constant and the other a pointer into the
2386 stack frame. Otherwise a different base means we can't tell if they
2387 overlap or not. */
2388 if (! rtx_equal_p (basex, basey))
2389 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2390 || (CONSTANT_P (basex) && REG_P (basey)
2391 && REGNO_PTR_FRAME_P (REGNO (basey)))
2392 || (CONSTANT_P (basey) && REG_P (basex)
2393 && REGNO_PTR_FRAME_P (REGNO (basex))));
2395 /* Offset based disambiguation not appropriate for loop invariant */
2396 if (loop_invariant)
2397 return 0;
2399 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2400 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2401 : -1);
2402 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2403 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2404 : -1);
2406 /* If we have an offset for either memref, it can update the values computed
2407 above. */
2408 if (moffsetx_known_p)
2409 offsetx += moffsetx, sizex -= moffsetx;
2410 if (moffsety_known_p)
2411 offsety += moffsety, sizey -= moffsety;
2413 /* If a memref has both a size and an offset, we can use the smaller size.
2414 We can't do this if the offset isn't known because we must view this
2415 memref as being anywhere inside the DECL's MEM. */
2416 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2417 sizex = MEM_SIZE (x);
2418 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2419 sizey = MEM_SIZE (y);
2421 /* Put the values of the memref with the lower offset in X's values. */
2422 if (offsetx > offsety)
2424 tem = offsetx, offsetx = offsety, offsety = tem;
2425 tem = sizex, sizex = sizey, sizey = tem;
2428 /* If we don't know the size of the lower-offset value, we can't tell
2429 if they conflict. Otherwise, we do the test. */
2430 return sizex >= 0 && offsety >= offsetx + sizex;
2433 /* Helper for true_dependence and canon_true_dependence.
2434 Checks for true dependence: X is read after store in MEM takes place.
2436 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2437 NULL_RTX, and the canonical addresses of MEM and X are both computed
2438 here. If MEM_CANONICALIZED, then MEM must be already canonicalized.
2440 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2442 Returns 1 if there is a true dependence, 0 otherwise. */
2444 static int
2445 true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2446 const_rtx x, rtx x_addr, bool mem_canonicalized)
2448 rtx base;
2449 int ret;
2451 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2452 : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2454 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2455 return 1;
2457 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2458 This is used in epilogue deallocation functions, and in cselib. */
2459 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2460 return 1;
2461 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2462 return 1;
2463 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2464 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2465 return 1;
2467 /* Read-only memory is by definition never modified, and therefore can't
2468 conflict with anything. We don't expect to find read-only set on MEM,
2469 but stupid user tricks can produce them, so don't die. */
2470 if (MEM_READONLY_P (x))
2471 return 0;
2473 /* If we have MEMs referring to different address spaces (which can
2474 potentially overlap), we cannot easily tell from the addresses
2475 whether the references overlap. */
2476 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2477 return 1;
2479 if (! mem_addr)
2481 mem_addr = XEXP (mem, 0);
2482 if (mem_mode == VOIDmode)
2483 mem_mode = GET_MODE (mem);
2486 if (! x_addr)
2488 x_addr = XEXP (x, 0);
2489 if (!((GET_CODE (x_addr) == VALUE
2490 && GET_CODE (mem_addr) != VALUE
2491 && reg_mentioned_p (x_addr, mem_addr))
2492 || (GET_CODE (x_addr) != VALUE
2493 && GET_CODE (mem_addr) == VALUE
2494 && reg_mentioned_p (mem_addr, x_addr))))
2496 x_addr = get_addr (x_addr);
2497 if (! mem_canonicalized)
2498 mem_addr = get_addr (mem_addr);
2502 base = find_base_term (x_addr);
2503 if (base && (GET_CODE (base) == LABEL_REF
2504 || (GET_CODE (base) == SYMBOL_REF
2505 && CONSTANT_POOL_ADDRESS_P (base))))
2506 return 0;
2508 rtx mem_base = find_base_term (mem_addr);
2509 if (! base_alias_check (x_addr, base, mem_addr, mem_base,
2510 GET_MODE (x), mem_mode))
2511 return 0;
2513 x_addr = canon_rtx (x_addr);
2514 if (!mem_canonicalized)
2515 mem_addr = canon_rtx (mem_addr);
2517 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2518 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2519 return ret;
2521 if (mems_in_disjoint_alias_sets_p (x, mem))
2522 return 0;
2524 if (nonoverlapping_memrefs_p (mem, x, false))
2525 return 0;
2527 if (nonoverlapping_component_refs_p (mem, x))
2528 return 0;
2530 return rtx_refs_may_alias_p (x, mem, true);
2533 /* True dependence: X is read after store in MEM takes place. */
2536 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x)
2538 return true_dependence_1 (mem, mem_mode, NULL_RTX,
2539 x, NULL_RTX, /*mem_canonicalized=*/false);
2542 /* Canonical true dependence: X is read after store in MEM takes place.
2543 Variant of true_dependence which assumes MEM has already been
2544 canonicalized (hence we no longer do that here).
2545 The mem_addr argument has been added, since true_dependence_1 computed
2546 this value prior to canonicalizing. */
2549 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2550 const_rtx x, rtx x_addr)
2552 return true_dependence_1 (mem, mem_mode, mem_addr,
2553 x, x_addr, /*mem_canonicalized=*/true);
2556 /* Returns nonzero if a write to X might alias a previous read from
2557 (or, if WRITEP is true, a write to) MEM.
2558 If MEM_CANONCALIZED is nonzero, CANON_MEM_ADDR is the canonicalized
2559 address of MEM, and MEM_MODE the mode for that access. */
2561 static int
2562 write_dependence_p (const_rtx mem, enum machine_mode mem_mode,
2563 rtx canon_mem_addr, const_rtx x,
2564 bool mem_canonicalized, bool writep)
2566 rtx x_addr, mem_addr;
2567 rtx base;
2568 int ret;
2570 gcc_checking_assert (mem_canonicalized ? (canon_mem_addr != NULL_RTX)
2571 : (canon_mem_addr == NULL_RTX && mem_mode == VOIDmode));
2573 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2574 return 1;
2576 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2577 This is used in epilogue deallocation functions. */
2578 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2579 return 1;
2580 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2581 return 1;
2582 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2583 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2584 return 1;
2586 /* A read from read-only memory can't conflict with read-write memory. */
2587 if (!writep && MEM_READONLY_P (mem))
2588 return 0;
2590 /* If we have MEMs referring to different address spaces (which can
2591 potentially overlap), we cannot easily tell from the addresses
2592 whether the references overlap. */
2593 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2594 return 1;
2596 x_addr = XEXP (x, 0);
2597 mem_addr = XEXP (mem, 0);
2598 if (!((GET_CODE (x_addr) == VALUE
2599 && GET_CODE (mem_addr) != VALUE
2600 && reg_mentioned_p (x_addr, mem_addr))
2601 || (GET_CODE (x_addr) != VALUE
2602 && GET_CODE (mem_addr) == VALUE
2603 && reg_mentioned_p (mem_addr, x_addr))))
2605 x_addr = get_addr (x_addr);
2606 mem_addr = get_addr (mem_addr);
2609 base = find_base_term (mem_addr);
2610 if (! writep
2611 && base
2612 && (GET_CODE (base) == LABEL_REF
2613 || (GET_CODE (base) == SYMBOL_REF
2614 && CONSTANT_POOL_ADDRESS_P (base))))
2615 return 0;
2617 rtx x_base = find_base_term (x_addr);
2618 if (! base_alias_check (x_addr, x_base, mem_addr, base, GET_MODE (x),
2619 GET_MODE (mem)))
2620 return 0;
2622 x_addr = canon_rtx (x_addr);
2623 if (mem_canonicalized)
2624 mem_addr = canon_mem_addr;
2625 else
2627 mem_addr = canon_rtx (mem_addr);
2628 mem_mode = GET_MODE (mem);
2631 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2632 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2633 return ret;
2635 if (nonoverlapping_memrefs_p (x, mem, false))
2636 return 0;
2638 return rtx_refs_may_alias_p (x, mem, false);
2641 /* Anti dependence: X is written after read in MEM takes place. */
2644 anti_dependence (const_rtx mem, const_rtx x)
2646 return write_dependence_p (mem, VOIDmode, NULL_RTX, x,
2647 /*mem_canonicalized=*/false, /*writep=*/false);
2650 /* Likewise, but we already have a canonicalized MEM_ADDR for MEM.
2651 Also, consider MEM in MEM_MODE (which might be from an enclosing
2652 STRICT_LOW_PART / ZERO_EXTRACT). */
2655 canon_anti_dependence (const_rtx mem, enum machine_mode mem_mode,
2656 rtx mem_addr, const_rtx x)
2658 return write_dependence_p (mem, mem_mode, mem_addr, x,
2659 /*mem_canonicalized=*/true, /*writep=*/false);
2662 /* Output dependence: X is written after store in MEM takes place. */
2665 output_dependence (const_rtx mem, const_rtx x)
2667 return write_dependence_p (mem, VOIDmode, NULL_RTX, x,
2668 /*mem_canonicalized=*/false, /*writep=*/true);
2673 /* Check whether X may be aliased with MEM. Don't do offset-based
2674 memory disambiguation & TBAA. */
2676 may_alias_p (const_rtx mem, const_rtx x)
2678 rtx x_addr, mem_addr;
2680 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2681 return 1;
2683 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2684 This is used in epilogue deallocation functions. */
2685 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2686 return 1;
2687 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2688 return 1;
2689 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2690 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2691 return 1;
2693 /* Read-only memory is by definition never modified, and therefore can't
2694 conflict with anything. We don't expect to find read-only set on MEM,
2695 but stupid user tricks can produce them, so don't die. */
2696 if (MEM_READONLY_P (x))
2697 return 0;
2699 /* If we have MEMs referring to different address spaces (which can
2700 potentially overlap), we cannot easily tell from the addresses
2701 whether the references overlap. */
2702 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2703 return 1;
2705 x_addr = XEXP (x, 0);
2706 mem_addr = XEXP (mem, 0);
2707 if (!((GET_CODE (x_addr) == VALUE
2708 && GET_CODE (mem_addr) != VALUE
2709 && reg_mentioned_p (x_addr, mem_addr))
2710 || (GET_CODE (x_addr) != VALUE
2711 && GET_CODE (mem_addr) == VALUE
2712 && reg_mentioned_p (mem_addr, x_addr))))
2714 x_addr = get_addr (x_addr);
2715 mem_addr = get_addr (mem_addr);
2718 rtx x_base = find_base_term (x_addr);
2719 rtx mem_base = find_base_term (mem_addr);
2720 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2721 GET_MODE (x), GET_MODE (mem_addr)))
2722 return 0;
2724 x_addr = canon_rtx (x_addr);
2725 mem_addr = canon_rtx (mem_addr);
2727 if (nonoverlapping_memrefs_p (mem, x, true))
2728 return 0;
2730 /* TBAA not valid for loop_invarint */
2731 return rtx_refs_may_alias_p (x, mem, false);
2734 void
2735 init_alias_target (void)
2737 int i;
2739 if (!arg_base_value)
2740 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2742 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2744 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2745 /* Check whether this register can hold an incoming pointer
2746 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2747 numbers, so translate if necessary due to register windows. */
2748 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2749 && HARD_REGNO_MODE_OK (i, Pmode))
2750 static_reg_base_value[i] = arg_base_value;
2752 static_reg_base_value[STACK_POINTER_REGNUM]
2753 = unique_base_value (UNIQUE_BASE_VALUE_SP);
2754 static_reg_base_value[ARG_POINTER_REGNUM]
2755 = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2756 static_reg_base_value[FRAME_POINTER_REGNUM]
2757 = unique_base_value (UNIQUE_BASE_VALUE_FP);
2758 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2759 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2760 = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2761 #endif
2764 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2765 to be memory reference. */
2766 static bool memory_modified;
2767 static void
2768 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2770 if (MEM_P (x))
2772 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2773 memory_modified = true;
2778 /* Return true when INSN possibly modify memory contents of MEM
2779 (i.e. address can be modified). */
2780 bool
2781 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2783 if (!INSN_P (insn))
2784 return false;
2785 memory_modified = false;
2786 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2787 return memory_modified;
2790 /* Return TRUE if the destination of a set is rtx identical to
2791 ITEM. */
2792 static inline bool
2793 set_dest_equal_p (const_rtx set, const_rtx item)
2795 rtx dest = SET_DEST (set);
2796 return rtx_equal_p (dest, item);
2799 /* Like memory_modified_in_insn_p, but return TRUE if INSN will
2800 *DEFINITELY* modify the memory contents of MEM. */
2801 bool
2802 memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2804 if (!INSN_P (insn))
2805 return false;
2806 insn = PATTERN (insn);
2807 if (GET_CODE (insn) == SET)
2808 return set_dest_equal_p (insn, mem);
2809 else if (GET_CODE (insn) == PARALLEL)
2811 int i;
2812 for (i = 0; i < XVECLEN (insn, 0); i++)
2814 rtx sub = XVECEXP (insn, 0, i);
2815 if (GET_CODE (sub) == SET
2816 && set_dest_equal_p (sub, mem))
2817 return true;
2820 return false;
2823 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2824 array. */
2826 void
2827 init_alias_analysis (void)
2829 unsigned int maxreg = max_reg_num ();
2830 int changed, pass;
2831 int i;
2832 unsigned int ui;
2833 rtx insn, val;
2834 int rpo_cnt;
2835 int *rpo;
2837 timevar_push (TV_ALIAS_ANALYSIS);
2839 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2840 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2841 bitmap_clear (reg_known_equiv_p);
2843 /* If we have memory allocated from the previous run, use it. */
2844 if (old_reg_base_value)
2845 reg_base_value = old_reg_base_value;
2847 if (reg_base_value)
2848 reg_base_value->truncate (0);
2850 vec_safe_grow_cleared (reg_base_value, maxreg);
2852 new_reg_base_value = XNEWVEC (rtx, maxreg);
2853 reg_seen = sbitmap_alloc (maxreg);
2855 /* The basic idea is that each pass through this loop will use the
2856 "constant" information from the previous pass to propagate alias
2857 information through another level of assignments.
2859 The propagation is done on the CFG in reverse post-order, to propagate
2860 things forward as far as possible in each iteration.
2862 This could get expensive if the assignment chains are long. Maybe
2863 we should throttle the number of iterations, possibly based on
2864 the optimization level or flag_expensive_optimizations.
2866 We could propagate more information in the first pass by making use
2867 of DF_REG_DEF_COUNT to determine immediately that the alias information
2868 for a pseudo is "constant".
2870 A program with an uninitialized variable can cause an infinite loop
2871 here. Instead of doing a full dataflow analysis to detect such problems
2872 we just cap the number of iterations for the loop.
2874 The state of the arrays for the set chain in question does not matter
2875 since the program has undefined behavior. */
2877 rpo = XNEWVEC (int, n_basic_blocks);
2878 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2880 pass = 0;
2883 /* Assume nothing will change this iteration of the loop. */
2884 changed = 0;
2886 /* We want to assign the same IDs each iteration of this loop, so
2887 start counting from one each iteration of the loop. */
2888 unique_id = 1;
2890 /* We're at the start of the function each iteration through the
2891 loop, so we're copying arguments. */
2892 copying_arguments = true;
2894 /* Wipe the potential alias information clean for this pass. */
2895 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2897 /* Wipe the reg_seen array clean. */
2898 bitmap_clear (reg_seen);
2900 /* Mark all hard registers which may contain an address.
2901 The stack, frame and argument pointers may contain an address.
2902 An argument register which can hold a Pmode value may contain
2903 an address even if it is not in BASE_REGS.
2905 The address expression is VOIDmode for an argument and
2906 Pmode for other registers. */
2908 memcpy (new_reg_base_value, static_reg_base_value,
2909 FIRST_PSEUDO_REGISTER * sizeof (rtx));
2911 /* Walk the insns adding values to the new_reg_base_value array. */
2912 for (i = 0; i < rpo_cnt; i++)
2914 basic_block bb = BASIC_BLOCK (rpo[i]);
2915 FOR_BB_INSNS (bb, insn)
2917 if (NONDEBUG_INSN_P (insn))
2919 rtx note, set;
2921 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2922 /* The prologue/epilogue insns are not threaded onto the
2923 insn chain until after reload has completed. Thus,
2924 there is no sense wasting time checking if INSN is in
2925 the prologue/epilogue until after reload has completed. */
2926 if (reload_completed
2927 && prologue_epilogue_contains (insn))
2928 continue;
2929 #endif
2931 /* If this insn has a noalias note, process it, Otherwise,
2932 scan for sets. A simple set will have no side effects
2933 which could change the base value of any other register. */
2935 if (GET_CODE (PATTERN (insn)) == SET
2936 && REG_NOTES (insn) != 0
2937 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2938 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2939 else
2940 note_stores (PATTERN (insn), record_set, NULL);
2942 set = single_set (insn);
2944 if (set != 0
2945 && REG_P (SET_DEST (set))
2946 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2948 unsigned int regno = REGNO (SET_DEST (set));
2949 rtx src = SET_SRC (set);
2950 rtx t;
2952 note = find_reg_equal_equiv_note (insn);
2953 if (note && REG_NOTE_KIND (note) == REG_EQUAL
2954 && DF_REG_DEF_COUNT (regno) != 1)
2955 note = NULL_RTX;
2957 if (note != NULL_RTX
2958 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2959 && ! rtx_varies_p (XEXP (note, 0), 1)
2960 && ! reg_overlap_mentioned_p (SET_DEST (set),
2961 XEXP (note, 0)))
2963 set_reg_known_value (regno, XEXP (note, 0));
2964 set_reg_known_equiv_p (regno,
2965 REG_NOTE_KIND (note) == REG_EQUIV);
2967 else if (DF_REG_DEF_COUNT (regno) == 1
2968 && GET_CODE (src) == PLUS
2969 && REG_P (XEXP (src, 0))
2970 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2971 && CONST_INT_P (XEXP (src, 1)))
2973 t = plus_constant (GET_MODE (src), t,
2974 INTVAL (XEXP (src, 1)));
2975 set_reg_known_value (regno, t);
2976 set_reg_known_equiv_p (regno, false);
2978 else if (DF_REG_DEF_COUNT (regno) == 1
2979 && ! rtx_varies_p (src, 1))
2981 set_reg_known_value (regno, src);
2982 set_reg_known_equiv_p (regno, false);
2986 else if (NOTE_P (insn)
2987 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2988 copying_arguments = false;
2992 /* Now propagate values from new_reg_base_value to reg_base_value. */
2993 gcc_assert (maxreg == (unsigned int) max_reg_num ());
2995 for (ui = 0; ui < maxreg; ui++)
2997 if (new_reg_base_value[ui]
2998 && new_reg_base_value[ui] != (*reg_base_value)[ui]
2999 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3001 (*reg_base_value)[ui] = new_reg_base_value[ui];
3002 changed = 1;
3006 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3007 XDELETEVEC (rpo);
3009 /* Fill in the remaining entries. */
3010 FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3012 int regno = i + FIRST_PSEUDO_REGISTER;
3013 if (! val)
3014 set_reg_known_value (regno, regno_reg_rtx[regno]);
3017 /* Clean up. */
3018 free (new_reg_base_value);
3019 new_reg_base_value = 0;
3020 sbitmap_free (reg_seen);
3021 reg_seen = 0;
3022 timevar_pop (TV_ALIAS_ANALYSIS);
3025 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3026 Special API for var-tracking pass purposes. */
3028 void
3029 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3031 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3034 void
3035 end_alias_analysis (void)
3037 old_reg_base_value = reg_base_value;
3038 vec_free (reg_known_value);
3039 sbitmap_free (reg_known_equiv_p);
3042 #include "gt-alias.h"