gcc/
[official-gcc.git] / gcc / alias.c
blob382093161189ee61e849753e006b288ca23e0f8b
1 /* Alias analysis for GNU C
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by John Carr (jfc@mit.edu).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "function.h"
30 #include "alias.h"
31 #include "emit-rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "flags.h"
36 #include "output.h"
37 #include "toplev.h"
38 #include "cselib.h"
39 #include "splay-tree.h"
40 #include "ggc.h"
41 #include "langhooks.h"
42 #include "timevar.h"
43 #include "target.h"
44 #include "cgraph.h"
45 #include "tree-pass.h"
46 #include "ipa-type-escape.h"
47 #include "df.h"
48 #include "tree-ssa-alias.h"
49 #include "pointer-set.h"
50 #include "tree-flow.h"
52 /* The aliasing API provided here solves related but different problems:
54 Say there exists (in c)
56 struct X {
57 struct Y y1;
58 struct Z z2;
59 } x1, *px1, *px2;
61 struct Y y2, *py;
62 struct Z z2, *pz;
65 py = &px1.y1;
66 px2 = &x1;
68 Consider the four questions:
70 Can a store to x1 interfere with px2->y1?
71 Can a store to x1 interfere with px2->z2?
72 (*px2).z2
73 Can a store to x1 change the value pointed to by with py?
74 Can a store to x1 change the value pointed to by with pz?
76 The answer to these questions can be yes, yes, yes, and maybe.
78 The first two questions can be answered with a simple examination
79 of the type system. If structure X contains a field of type Y then
80 a store thru a pointer to an X can overwrite any field that is
81 contained (recursively) in an X (unless we know that px1 != px2).
83 The last two of the questions can be solved in the same way as the
84 first two questions but this is too conservative. The observation
85 is that in some cases analysis we can know if which (if any) fields
86 are addressed and if those addresses are used in bad ways. This
87 analysis may be language specific. In C, arbitrary operations may
88 be applied to pointers. However, there is some indication that
89 this may be too conservative for some C++ types.
91 The pass ipa-type-escape does this analysis for the types whose
92 instances do not escape across the compilation boundary.
94 Historically in GCC, these two problems were combined and a single
95 data structure was used to represent the solution to these
96 problems. We now have two similar but different data structures,
97 The data structure to solve the last two question is similar to the
98 first, but does not contain have the fields in it whose address are
99 never taken. For types that do escape the compilation unit, the
100 data structures will have identical information.
103 /* The alias sets assigned to MEMs assist the back-end in determining
104 which MEMs can alias which other MEMs. In general, two MEMs in
105 different alias sets cannot alias each other, with one important
106 exception. Consider something like:
108 struct S { int i; double d; };
110 a store to an `S' can alias something of either type `int' or type
111 `double'. (However, a store to an `int' cannot alias a `double'
112 and vice versa.) We indicate this via a tree structure that looks
113 like:
114 struct S
117 |/_ _\|
118 int double
120 (The arrows are directed and point downwards.)
121 In this situation we say the alias set for `struct S' is the
122 `superset' and that those for `int' and `double' are `subsets'.
124 To see whether two alias sets can point to the same memory, we must
125 see if either alias set is a subset of the other. We need not trace
126 past immediate descendants, however, since we propagate all
127 grandchildren up one level.
129 Alias set zero is implicitly a superset of all other alias sets.
130 However, this is no actual entry for alias set zero. It is an
131 error to attempt to explicitly construct a subset of zero. */
133 struct GTY(()) alias_set_entry_d {
134 /* The alias set number, as stored in MEM_ALIAS_SET. */
135 alias_set_type alias_set;
137 /* Nonzero if would have a child of zero: this effectively makes this
138 alias set the same as alias set zero. */
139 int has_zero_child;
141 /* The children of the alias set. These are not just the immediate
142 children, but, in fact, all descendants. So, if we have:
144 struct T { struct S s; float f; }
146 continuing our example above, the children here will be all of
147 `int', `double', `float', and `struct S'. */
148 splay_tree GTY((param1_is (int), param2_is (int))) children;
150 typedef struct alias_set_entry_d *alias_set_entry;
152 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
153 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
154 static void record_set (rtx, const_rtx, void *);
155 static int base_alias_check (rtx, rtx, enum machine_mode,
156 enum machine_mode);
157 static rtx find_base_value (rtx);
158 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
159 static int insert_subset_children (splay_tree_node, void*);
160 static alias_set_entry get_alias_set_entry (alias_set_type);
161 static const_rtx fixed_scalar_and_varying_struct_p (const_rtx, const_rtx, rtx, rtx,
162 bool (*) (const_rtx, bool));
163 static int aliases_everything_p (const_rtx);
164 static bool nonoverlapping_component_refs_p (const_tree, const_tree);
165 static tree decl_for_component_ref (tree);
166 static rtx adjust_offset_for_component_ref (tree, rtx);
167 static int write_dependence_p (const_rtx, const_rtx, int);
169 static void memory_modified_1 (rtx, const_rtx, void *);
171 /* Set up all info needed to perform alias analysis on memory references. */
173 /* Returns the size in bytes of the mode of X. */
174 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
176 /* Returns nonzero if MEM1 and MEM2 do not alias because they are in
177 different alias sets. We ignore alias sets in functions making use
178 of variable arguments because the va_arg macros on some systems are
179 not legal ANSI C. */
180 #define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2) \
181 mems_in_disjoint_alias_sets_p (MEM1, MEM2)
183 /* Cap the number of passes we make over the insns propagating alias
184 information through set chains. 10 is a completely arbitrary choice. */
185 #define MAX_ALIAS_LOOP_PASSES 10
187 /* reg_base_value[N] gives an address to which register N is related.
188 If all sets after the first add or subtract to the current value
189 or otherwise modify it so it does not point to a different top level
190 object, reg_base_value[N] is equal to the address part of the source
191 of the first set.
193 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS
194 expressions represent certain special values: function arguments and
195 the stack, frame, and argument pointers.
197 The contents of an ADDRESS is not normally used, the mode of the
198 ADDRESS determines whether the ADDRESS is a function argument or some
199 other special value. Pointer equality, not rtx_equal_p, determines whether
200 two ADDRESS expressions refer to the same base address.
202 The only use of the contents of an ADDRESS is for determining if the
203 current function performs nonlocal memory memory references for the
204 purposes of marking the function as a constant function. */
206 static GTY(()) VEC(rtx,gc) *reg_base_value;
207 static rtx *new_reg_base_value;
209 /* We preserve the copy of old array around to avoid amount of garbage
210 produced. About 8% of garbage produced were attributed to this
211 array. */
212 static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
214 /* Static hunks of RTL used by the aliasing code; these are initialized
215 once per function to avoid unnecessary RTL allocations. */
216 static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
218 #define REG_BASE_VALUE(X) \
219 (REGNO (X) < VEC_length (rtx, reg_base_value) \
220 ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
222 /* Vector indexed by N giving the initial (unchanging) value known for
223 pseudo-register N. This array is initialized in init_alias_analysis,
224 and does not change until end_alias_analysis is called. */
225 static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
227 /* Indicates number of valid entries in reg_known_value. */
228 static GTY(()) unsigned int reg_known_value_size;
230 /* Vector recording for each reg_known_value whether it is due to a
231 REG_EQUIV note. Future passes (viz., reload) may replace the
232 pseudo with the equivalent expression and so we account for the
233 dependences that would be introduced if that happens.
235 The REG_EQUIV notes created in assign_parms may mention the arg
236 pointer, and there are explicit insns in the RTL that modify the
237 arg pointer. Thus we must ensure that such insns don't get
238 scheduled across each other because that would invalidate the
239 REG_EQUIV notes. One could argue that the REG_EQUIV notes are
240 wrong, but solving the problem in the scheduler will likely give
241 better code, so we do it here. */
242 static bool *reg_known_equiv_p;
244 /* True when scanning insns from the start of the rtl to the
245 NOTE_INSN_FUNCTION_BEG note. */
246 static bool copying_arguments;
248 DEF_VEC_P(alias_set_entry);
249 DEF_VEC_ALLOC_P(alias_set_entry,gc);
251 /* The splay-tree used to store the various alias set entries. */
252 static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
254 /* Build a decomposed reference object for querying the alias-oracle
255 from the MEM rtx and store it in *REF.
256 Returns false if MEM is not suitable for the alias-oracle. */
258 static bool
259 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
261 tree expr = MEM_EXPR (mem);
262 tree base;
264 if (!expr)
265 return false;
267 ao_ref_init (ref, expr);
269 /* Get the base of the reference and see if we have to reject or
270 adjust it. */
271 base = ao_ref_base (ref);
272 if (base == NULL_TREE)
273 return false;
275 /* The tree oracle doesn't like to have these. */
276 if (TREE_CODE (base) == FUNCTION_DECL
277 || TREE_CODE (base) == LABEL_DECL)
278 return false;
280 /* If this is a pointer dereference of a non-SSA_NAME punt.
281 ??? We could replace it with a pointer to anything. */
282 if (INDIRECT_REF_P (base)
283 && TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
284 return false;
286 /* If this is a reference based on a partitioned decl replace the
287 base with an INDIRECT_REF of the pointer representative we
288 created during stack slot partitioning. */
289 if (TREE_CODE (base) == VAR_DECL
290 && ! TREE_STATIC (base)
291 && cfun->gimple_df->decls_to_pointers != NULL)
293 void *namep;
294 namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
295 if (namep)
297 ref->base_alias_set = get_alias_set (base);
298 ref->base = build1 (INDIRECT_REF, TREE_TYPE (base), *(tree *)namep);
302 ref->ref_alias_set = MEM_ALIAS_SET (mem);
304 /* If MEM_OFFSET or MEM_SIZE are NULL we have to punt.
305 Keep points-to related information though. */
306 if (!MEM_OFFSET (mem)
307 || !MEM_SIZE (mem))
309 ref->ref = NULL_TREE;
310 ref->offset = 0;
311 ref->size = -1;
312 ref->max_size = -1;
313 return true;
316 /* If the base decl is a parameter we can have negative MEM_OFFSET in
317 case of promoted subregs on bigendian targets. Trust the MEM_EXPR
318 here. */
319 if (INTVAL (MEM_OFFSET (mem)) < 0
320 && ((INTVAL (MEM_SIZE (mem)) + INTVAL (MEM_OFFSET (mem)))
321 * BITS_PER_UNIT) == ref->size)
322 return true;
324 ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
325 ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
327 /* The MEM may extend into adjacent fields, so adjust max_size if
328 necessary. */
329 if (ref->max_size != -1
330 && ref->size > ref->max_size)
331 ref->max_size = ref->size;
333 /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
334 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */
335 if (MEM_EXPR (mem) != get_spill_slot_decl (false)
336 && (ref->offset < 0
337 || (DECL_P (ref->base)
338 && (!host_integerp (DECL_SIZE (ref->base), 1)
339 || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
340 < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
341 return false;
343 return true;
346 /* Query the alias-oracle on whether the two memory rtx X and MEM may
347 alias. If TBAA_P is set also apply TBAA. Returns true if the
348 two rtxen may alias, false otherwise. */
350 static bool
351 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
353 ao_ref ref1, ref2;
355 if (!ao_ref_from_mem (&ref1, x)
356 || !ao_ref_from_mem (&ref2, mem))
357 return true;
359 return refs_may_alias_p_1 (&ref1, &ref2, tbaa_p);
362 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
363 such an entry, or NULL otherwise. */
365 static inline alias_set_entry
366 get_alias_set_entry (alias_set_type alias_set)
368 return VEC_index (alias_set_entry, alias_sets, alias_set);
371 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
372 the two MEMs cannot alias each other. */
374 static inline int
375 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
377 /* Perform a basic sanity check. Namely, that there are no alias sets
378 if we're not using strict aliasing. This helps to catch bugs
379 whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
380 where a MEM is allocated in some way other than by the use of
381 gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
382 use alias sets to indicate that spilled registers cannot alias each
383 other, we might need to remove this check. */
384 gcc_assert (flag_strict_aliasing
385 || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
387 return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
390 /* Insert the NODE into the splay tree given by DATA. Used by
391 record_alias_subset via splay_tree_foreach. */
393 static int
394 insert_subset_children (splay_tree_node node, void *data)
396 splay_tree_insert ((splay_tree) data, node->key, node->value);
398 return 0;
401 /* Return true if the first alias set is a subset of the second. */
403 bool
404 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
406 alias_set_entry ase;
408 /* Everything is a subset of the "aliases everything" set. */
409 if (set2 == 0)
410 return true;
412 /* Otherwise, check if set1 is a subset of set2. */
413 ase = get_alias_set_entry (set2);
414 if (ase != 0
415 && (ase->has_zero_child
416 || splay_tree_lookup (ase->children,
417 (splay_tree_key) set1)))
418 return true;
419 return false;
422 /* Return 1 if the two specified alias sets may conflict. */
425 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
427 alias_set_entry ase;
429 /* The easy case. */
430 if (alias_sets_must_conflict_p (set1, set2))
431 return 1;
433 /* See if the first alias set is a subset of the second. */
434 ase = get_alias_set_entry (set1);
435 if (ase != 0
436 && (ase->has_zero_child
437 || splay_tree_lookup (ase->children,
438 (splay_tree_key) set2)))
439 return 1;
441 /* Now do the same, but with the alias sets reversed. */
442 ase = get_alias_set_entry (set2);
443 if (ase != 0
444 && (ase->has_zero_child
445 || splay_tree_lookup (ase->children,
446 (splay_tree_key) set1)))
447 return 1;
449 /* The two alias sets are distinct and neither one is the
450 child of the other. Therefore, they cannot conflict. */
451 return 0;
454 static int
455 walk_mems_2 (rtx *x, rtx mem)
457 if (MEM_P (*x))
459 if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
460 return 1;
462 return -1;
464 return 0;
467 static int
468 walk_mems_1 (rtx *x, rtx *pat)
470 if (MEM_P (*x))
472 /* Visit all MEMs in *PAT and check indepedence. */
473 if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
474 /* Indicate that dependence was determined and stop traversal. */
475 return 1;
477 return -1;
479 return 0;
482 /* Return 1 if two specified instructions have mem expr with conflict alias sets*/
483 bool
484 insn_alias_sets_conflict_p (rtx insn1, rtx insn2)
486 /* For each pair of MEMs in INSN1 and INSN2 check their independence. */
487 return for_each_rtx (&PATTERN (insn1), (rtx_function) walk_mems_1,
488 &PATTERN (insn2));
491 /* Return 1 if the two specified alias sets will always conflict. */
494 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
496 if (set1 == 0 || set2 == 0 || set1 == set2)
497 return 1;
499 return 0;
502 /* Return 1 if any MEM object of type T1 will always conflict (using the
503 dependency routines in this file) with any MEM object of type T2.
504 This is used when allocating temporary storage. If T1 and/or T2 are
505 NULL_TREE, it means we know nothing about the storage. */
508 objects_must_conflict_p (tree t1, tree t2)
510 alias_set_type set1, set2;
512 /* If neither has a type specified, we don't know if they'll conflict
513 because we may be using them to store objects of various types, for
514 example the argument and local variables areas of inlined functions. */
515 if (t1 == 0 && t2 == 0)
516 return 0;
518 /* If they are the same type, they must conflict. */
519 if (t1 == t2
520 /* Likewise if both are volatile. */
521 || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
522 return 1;
524 set1 = t1 ? get_alias_set (t1) : 0;
525 set2 = t2 ? get_alias_set (t2) : 0;
527 /* We can't use alias_sets_conflict_p because we must make sure
528 that every subtype of t1 will conflict with every subtype of
529 t2 for which a pair of subobjects of these respective subtypes
530 overlaps on the stack. */
531 return alias_sets_must_conflict_p (set1, set2);
534 /* Return true if all nested component references handled by
535 get_inner_reference in T are such that we should use the alias set
536 provided by the object at the heart of T.
538 This is true for non-addressable components (which don't have their
539 own alias set), as well as components of objects in alias set zero.
540 This later point is a special case wherein we wish to override the
541 alias set used by the component, but we don't have per-FIELD_DECL
542 assignable alias sets. */
544 bool
545 component_uses_parent_alias_set (const_tree t)
547 while (1)
549 /* If we're at the end, it vacuously uses its own alias set. */
550 if (!handled_component_p (t))
551 return false;
553 switch (TREE_CODE (t))
555 case COMPONENT_REF:
556 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
557 return true;
558 break;
560 case ARRAY_REF:
561 case ARRAY_RANGE_REF:
562 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
563 return true;
564 break;
566 case REALPART_EXPR:
567 case IMAGPART_EXPR:
568 break;
570 default:
571 /* Bitfields and casts are never addressable. */
572 return true;
575 t = TREE_OPERAND (t, 0);
576 if (get_alias_set (TREE_TYPE (t)) == 0)
577 return true;
581 /* Return the alias set for the memory pointed to by T, which may be
582 either a type or an expression. Return -1 if there is nothing
583 special about dereferencing T. */
585 static alias_set_type
586 get_deref_alias_set_1 (tree t)
588 /* If we're not doing any alias analysis, just assume everything
589 aliases everything else. */
590 if (!flag_strict_aliasing)
591 return 0;
593 /* All we care about is the type. */
594 if (! TYPE_P (t))
595 t = TREE_TYPE (t);
597 /* If we have an INDIRECT_REF via a void pointer, we don't
598 know anything about what that might alias. Likewise if the
599 pointer is marked that way. */
600 if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
601 || TYPE_REF_CAN_ALIAS_ALL (t))
602 return 0;
604 return -1;
607 /* Return the alias set for the memory pointed to by T, which may be
608 either a type or an expression. */
610 alias_set_type
611 get_deref_alias_set (tree t)
613 alias_set_type set = get_deref_alias_set_1 (t);
615 /* Fall back to the alias-set of the pointed-to type. */
616 if (set == -1)
618 if (! TYPE_P (t))
619 t = TREE_TYPE (t);
620 set = get_alias_set (TREE_TYPE (t));
623 return set;
626 /* Return the alias set for T, which may be either a type or an
627 expression. Call language-specific routine for help, if needed. */
629 alias_set_type
630 get_alias_set (tree t)
632 alias_set_type set;
634 /* If we're not doing any alias analysis, just assume everything
635 aliases everything else. Also return 0 if this or its type is
636 an error. */
637 if (! flag_strict_aliasing || t == error_mark_node
638 || (! TYPE_P (t)
639 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
640 return 0;
642 /* We can be passed either an expression or a type. This and the
643 language-specific routine may make mutually-recursive calls to each other
644 to figure out what to do. At each juncture, we see if this is a tree
645 that the language may need to handle specially. First handle things that
646 aren't types. */
647 if (! TYPE_P (t))
649 tree inner;
651 /* Remove any nops, then give the language a chance to do
652 something with this tree before we look at it. */
653 STRIP_NOPS (t);
654 set = lang_hooks.get_alias_set (t);
655 if (set != -1)
656 return set;
658 /* Retrieve the original memory reference if needed. */
659 if (TREE_CODE (t) == TARGET_MEM_REF)
660 t = TMR_ORIGINAL (t);
662 /* First see if the actual object referenced is an INDIRECT_REF from a
663 restrict-qualified pointer or a "void *". */
664 inner = t;
665 while (handled_component_p (inner))
667 inner = TREE_OPERAND (inner, 0);
668 STRIP_NOPS (inner);
671 if (INDIRECT_REF_P (inner))
673 set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
674 if (set != -1)
675 return set;
678 /* Otherwise, pick up the outermost object that we could have a pointer
679 to, processing conversions as above. */
680 while (component_uses_parent_alias_set (t))
682 t = TREE_OPERAND (t, 0);
683 STRIP_NOPS (t);
686 /* If we've already determined the alias set for a decl, just return
687 it. This is necessary for C++ anonymous unions, whose component
688 variables don't look like union members (boo!). */
689 if (TREE_CODE (t) == VAR_DECL
690 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
691 return MEM_ALIAS_SET (DECL_RTL (t));
693 /* Now all we care about is the type. */
694 t = TREE_TYPE (t);
697 /* Variant qualifiers don't affect the alias set, so get the main
698 variant. */
699 t = TYPE_MAIN_VARIANT (t);
701 /* Always use the canonical type as well. If this is a type that
702 requires structural comparisons to identify compatible types
703 use alias set zero. */
704 if (TYPE_STRUCTURAL_EQUALITY_P (t))
706 /* Allow the language to specify another alias set for this
707 type. */
708 set = lang_hooks.get_alias_set (t);
709 if (set != -1)
710 return set;
711 return 0;
713 t = TYPE_CANONICAL (t);
714 /* Canonical types shouldn't form a tree nor should the canonical
715 type require structural equality checks. */
716 gcc_assert (!TYPE_STRUCTURAL_EQUALITY_P (t) && TYPE_CANONICAL (t) == t);
718 /* If this is a type with a known alias set, return it. */
719 if (TYPE_ALIAS_SET_KNOWN_P (t))
720 return TYPE_ALIAS_SET (t);
722 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */
723 if (!COMPLETE_TYPE_P (t))
725 /* For arrays with unknown size the conservative answer is the
726 alias set of the element type. */
727 if (TREE_CODE (t) == ARRAY_TYPE)
728 return get_alias_set (TREE_TYPE (t));
730 /* But return zero as a conservative answer for incomplete types. */
731 return 0;
734 /* See if the language has special handling for this type. */
735 set = lang_hooks.get_alias_set (t);
736 if (set != -1)
737 return set;
739 /* There are no objects of FUNCTION_TYPE, so there's no point in
740 using up an alias set for them. (There are, of course, pointers
741 and references to functions, but that's different.) */
742 else if (TREE_CODE (t) == FUNCTION_TYPE
743 || TREE_CODE (t) == METHOD_TYPE)
744 set = 0;
746 /* Unless the language specifies otherwise, let vector types alias
747 their components. This avoids some nasty type punning issues in
748 normal usage. And indeed lets vectors be treated more like an
749 array slice. */
750 else if (TREE_CODE (t) == VECTOR_TYPE)
751 set = get_alias_set (TREE_TYPE (t));
753 /* Unless the language specifies otherwise, treat array types the
754 same as their components. This avoids the asymmetry we get
755 through recording the components. Consider accessing a
756 character(kind=1) through a reference to a character(kind=1)[1:1].
757 Or consider if we want to assign integer(kind=4)[0:D.1387] and
758 integer(kind=4)[4] the same alias set or not.
759 Just be pragmatic here and make sure the array and its element
760 type get the same alias set assigned. */
761 else if (TREE_CODE (t) == ARRAY_TYPE
762 && !TYPE_NONALIASED_COMPONENT (t))
763 set = get_alias_set (TREE_TYPE (t));
765 else
766 /* Otherwise make a new alias set for this type. */
767 set = new_alias_set ();
769 TYPE_ALIAS_SET (t) = set;
771 /* If this is an aggregate type, we must record any component aliasing
772 information. */
773 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
774 record_component_aliases (t);
776 return set;
779 /* Return a brand-new alias set. */
781 alias_set_type
782 new_alias_set (void)
784 if (flag_strict_aliasing)
786 if (alias_sets == 0)
787 VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
788 VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
789 return VEC_length (alias_set_entry, alias_sets) - 1;
791 else
792 return 0;
795 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
796 not everything that aliases SUPERSET also aliases SUBSET. For example,
797 in C, a store to an `int' can alias a load of a structure containing an
798 `int', and vice versa. But it can't alias a load of a 'double' member
799 of the same structure. Here, the structure would be the SUPERSET and
800 `int' the SUBSET. This relationship is also described in the comment at
801 the beginning of this file.
803 This function should be called only once per SUPERSET/SUBSET pair.
805 It is illegal for SUPERSET to be zero; everything is implicitly a
806 subset of alias set zero. */
808 void
809 record_alias_subset (alias_set_type superset, alias_set_type subset)
811 alias_set_entry superset_entry;
812 alias_set_entry subset_entry;
814 /* It is possible in complex type situations for both sets to be the same,
815 in which case we can ignore this operation. */
816 if (superset == subset)
817 return;
819 gcc_assert (superset);
821 superset_entry = get_alias_set_entry (superset);
822 if (superset_entry == 0)
824 /* Create an entry for the SUPERSET, so that we have a place to
825 attach the SUBSET. */
826 superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
827 superset_entry->alias_set = superset;
828 superset_entry->children
829 = splay_tree_new_ggc (splay_tree_compare_ints,
830 ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
831 ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
832 superset_entry->has_zero_child = 0;
833 VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
836 if (subset == 0)
837 superset_entry->has_zero_child = 1;
838 else
840 subset_entry = get_alias_set_entry (subset);
841 /* If there is an entry for the subset, enter all of its children
842 (if they are not already present) as children of the SUPERSET. */
843 if (subset_entry)
845 if (subset_entry->has_zero_child)
846 superset_entry->has_zero_child = 1;
848 splay_tree_foreach (subset_entry->children, insert_subset_children,
849 superset_entry->children);
852 /* Enter the SUBSET itself as a child of the SUPERSET. */
853 splay_tree_insert (superset_entry->children,
854 (splay_tree_key) subset, 0);
858 /* Record that component types of TYPE, if any, are part of that type for
859 aliasing purposes. For record types, we only record component types
860 for fields that are not marked non-addressable. For array types, we
861 only record the component type if it is not marked non-aliased. */
863 void
864 record_component_aliases (tree type)
866 alias_set_type superset = get_alias_set (type);
867 tree field;
869 if (superset == 0)
870 return;
872 switch (TREE_CODE (type))
874 case RECORD_TYPE:
875 case UNION_TYPE:
876 case QUAL_UNION_TYPE:
877 /* Recursively record aliases for the base classes, if there are any. */
878 if (TYPE_BINFO (type))
880 int i;
881 tree binfo, base_binfo;
883 for (binfo = TYPE_BINFO (type), i = 0;
884 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
885 record_alias_subset (superset,
886 get_alias_set (BINFO_TYPE (base_binfo)));
888 for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
889 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
890 record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
891 break;
893 case COMPLEX_TYPE:
894 record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
895 break;
897 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
898 element type. */
900 default:
901 break;
905 /* Allocate an alias set for use in storing and reading from the varargs
906 spill area. */
908 static GTY(()) alias_set_type varargs_set = -1;
910 alias_set_type
911 get_varargs_alias_set (void)
913 #if 1
914 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
915 varargs alias set to an INDIRECT_REF (FIXME!), so we can't
916 consistently use the varargs alias set for loads from the varargs
917 area. So don't use it anywhere. */
918 return 0;
919 #else
920 if (varargs_set == -1)
921 varargs_set = new_alias_set ();
923 return varargs_set;
924 #endif
927 /* Likewise, but used for the fixed portions of the frame, e.g., register
928 save areas. */
930 static GTY(()) alias_set_type frame_set = -1;
932 alias_set_type
933 get_frame_alias_set (void)
935 if (frame_set == -1)
936 frame_set = new_alias_set ();
938 return frame_set;
941 /* Inside SRC, the source of a SET, find a base address. */
943 static rtx
944 find_base_value (rtx src)
946 unsigned int regno;
948 #if defined (FIND_BASE_TERM)
949 /* Try machine-dependent ways to find the base term. */
950 src = FIND_BASE_TERM (src);
951 #endif
953 switch (GET_CODE (src))
955 case SYMBOL_REF:
956 case LABEL_REF:
957 return src;
959 case REG:
960 regno = REGNO (src);
961 /* At the start of a function, argument registers have known base
962 values which may be lost later. Returning an ADDRESS
963 expression here allows optimization based on argument values
964 even when the argument registers are used for other purposes. */
965 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
966 return new_reg_base_value[regno];
968 /* If a pseudo has a known base value, return it. Do not do this
969 for non-fixed hard regs since it can result in a circular
970 dependency chain for registers which have values at function entry.
972 The test above is not sufficient because the scheduler may move
973 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */
974 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
975 && regno < VEC_length (rtx, reg_base_value))
977 /* If we're inside init_alias_analysis, use new_reg_base_value
978 to reduce the number of relaxation iterations. */
979 if (new_reg_base_value && new_reg_base_value[regno]
980 && DF_REG_DEF_COUNT (regno) == 1)
981 return new_reg_base_value[regno];
983 if (VEC_index (rtx, reg_base_value, regno))
984 return VEC_index (rtx, reg_base_value, regno);
987 return 0;
989 case MEM:
990 /* Check for an argument passed in memory. Only record in the
991 copying-arguments block; it is too hard to track changes
992 otherwise. */
993 if (copying_arguments
994 && (XEXP (src, 0) == arg_pointer_rtx
995 || (GET_CODE (XEXP (src, 0)) == PLUS
996 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
997 return gen_rtx_ADDRESS (VOIDmode, src);
998 return 0;
1000 case CONST:
1001 src = XEXP (src, 0);
1002 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1003 break;
1005 /* ... fall through ... */
1007 case PLUS:
1008 case MINUS:
1010 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1012 /* If either operand is a REG that is a known pointer, then it
1013 is the base. */
1014 if (REG_P (src_0) && REG_POINTER (src_0))
1015 return find_base_value (src_0);
1016 if (REG_P (src_1) && REG_POINTER (src_1))
1017 return find_base_value (src_1);
1019 /* If either operand is a REG, then see if we already have
1020 a known value for it. */
1021 if (REG_P (src_0))
1023 temp = find_base_value (src_0);
1024 if (temp != 0)
1025 src_0 = temp;
1028 if (REG_P (src_1))
1030 temp = find_base_value (src_1);
1031 if (temp!= 0)
1032 src_1 = temp;
1035 /* If either base is named object or a special address
1036 (like an argument or stack reference), then use it for the
1037 base term. */
1038 if (src_0 != 0
1039 && (GET_CODE (src_0) == SYMBOL_REF
1040 || GET_CODE (src_0) == LABEL_REF
1041 || (GET_CODE (src_0) == ADDRESS
1042 && GET_MODE (src_0) != VOIDmode)))
1043 return src_0;
1045 if (src_1 != 0
1046 && (GET_CODE (src_1) == SYMBOL_REF
1047 || GET_CODE (src_1) == LABEL_REF
1048 || (GET_CODE (src_1) == ADDRESS
1049 && GET_MODE (src_1) != VOIDmode)))
1050 return src_1;
1052 /* Guess which operand is the base address:
1053 If either operand is a symbol, then it is the base. If
1054 either operand is a CONST_INT, then the other is the base. */
1055 if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1056 return find_base_value (src_0);
1057 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1058 return find_base_value (src_1);
1060 return 0;
1063 case LO_SUM:
1064 /* The standard form is (lo_sum reg sym) so look only at the
1065 second operand. */
1066 return find_base_value (XEXP (src, 1));
1068 case AND:
1069 /* If the second operand is constant set the base
1070 address to the first operand. */
1071 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1072 return find_base_value (XEXP (src, 0));
1073 return 0;
1075 case TRUNCATE:
1076 /* As we do not know which address space the pointer is refering to, we can
1077 handle this only if the target does not support different pointer or
1078 address modes depending on the address space. */
1079 if (!target_default_pointer_address_modes_p ())
1080 break;
1081 if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1082 break;
1083 /* Fall through. */
1084 case HIGH:
1085 case PRE_INC:
1086 case PRE_DEC:
1087 case POST_INC:
1088 case POST_DEC:
1089 case PRE_MODIFY:
1090 case POST_MODIFY:
1091 return find_base_value (XEXP (src, 0));
1093 case ZERO_EXTEND:
1094 case SIGN_EXTEND: /* used for NT/Alpha pointers */
1095 /* As we do not know which address space the pointer is refering to, we can
1096 handle this only if the target does not support different pointer or
1097 address modes depending on the address space. */
1098 if (!target_default_pointer_address_modes_p ())
1099 break;
1102 rtx temp = find_base_value (XEXP (src, 0));
1104 if (temp != 0 && CONSTANT_P (temp))
1105 temp = convert_memory_address (Pmode, temp);
1107 return temp;
1110 default:
1111 break;
1114 return 0;
1117 /* Called from init_alias_analysis indirectly through note_stores. */
1119 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1120 register N has been set in this function. */
1121 static char *reg_seen;
1123 /* Addresses which are known not to alias anything else are identified
1124 by a unique integer. */
1125 static int unique_id;
1127 static void
1128 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1130 unsigned regno;
1131 rtx src;
1132 int n;
1134 if (!REG_P (dest))
1135 return;
1137 regno = REGNO (dest);
1139 gcc_assert (regno < VEC_length (rtx, reg_base_value));
1141 /* If this spans multiple hard registers, then we must indicate that every
1142 register has an unusable value. */
1143 if (regno < FIRST_PSEUDO_REGISTER)
1144 n = hard_regno_nregs[regno][GET_MODE (dest)];
1145 else
1146 n = 1;
1147 if (n != 1)
1149 while (--n >= 0)
1151 reg_seen[regno + n] = 1;
1152 new_reg_base_value[regno + n] = 0;
1154 return;
1157 if (set)
1159 /* A CLOBBER wipes out any old value but does not prevent a previously
1160 unset register from acquiring a base address (i.e. reg_seen is not
1161 set). */
1162 if (GET_CODE (set) == CLOBBER)
1164 new_reg_base_value[regno] = 0;
1165 return;
1167 src = SET_SRC (set);
1169 else
1171 if (reg_seen[regno])
1173 new_reg_base_value[regno] = 0;
1174 return;
1176 reg_seen[regno] = 1;
1177 new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1178 GEN_INT (unique_id++));
1179 return;
1182 /* If this is not the first set of REGNO, see whether the new value
1183 is related to the old one. There are two cases of interest:
1185 (1) The register might be assigned an entirely new value
1186 that has the same base term as the original set.
1188 (2) The set might be a simple self-modification that
1189 cannot change REGNO's base value.
1191 If neither case holds, reject the original base value as invalid.
1192 Note that the following situation is not detected:
1194 extern int x, y; int *p = &x; p += (&y-&x);
1196 ANSI C does not allow computing the difference of addresses
1197 of distinct top level objects. */
1198 if (new_reg_base_value[regno] != 0
1199 && find_base_value (src) != new_reg_base_value[regno])
1200 switch (GET_CODE (src))
1202 case LO_SUM:
1203 case MINUS:
1204 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1205 new_reg_base_value[regno] = 0;
1206 break;
1207 case PLUS:
1208 /* If the value we add in the PLUS is also a valid base value,
1209 this might be the actual base value, and the original value
1210 an index. */
1212 rtx other = NULL_RTX;
1214 if (XEXP (src, 0) == dest)
1215 other = XEXP (src, 1);
1216 else if (XEXP (src, 1) == dest)
1217 other = XEXP (src, 0);
1219 if (! other || find_base_value (other))
1220 new_reg_base_value[regno] = 0;
1221 break;
1223 case AND:
1224 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1225 new_reg_base_value[regno] = 0;
1226 break;
1227 default:
1228 new_reg_base_value[regno] = 0;
1229 break;
1231 /* If this is the first set of a register, record the value. */
1232 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1233 && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1234 new_reg_base_value[regno] = find_base_value (src);
1236 reg_seen[regno] = 1;
1239 /* If a value is known for REGNO, return it. */
1242 get_reg_known_value (unsigned int regno)
1244 if (regno >= FIRST_PSEUDO_REGISTER)
1246 regno -= FIRST_PSEUDO_REGISTER;
1247 if (regno < reg_known_value_size)
1248 return reg_known_value[regno];
1250 return NULL;
1253 /* Set it. */
1255 static void
1256 set_reg_known_value (unsigned int regno, rtx val)
1258 if (regno >= FIRST_PSEUDO_REGISTER)
1260 regno -= FIRST_PSEUDO_REGISTER;
1261 if (regno < reg_known_value_size)
1262 reg_known_value[regno] = val;
1266 /* Similarly for reg_known_equiv_p. */
1268 bool
1269 get_reg_known_equiv_p (unsigned int regno)
1271 if (regno >= FIRST_PSEUDO_REGISTER)
1273 regno -= FIRST_PSEUDO_REGISTER;
1274 if (regno < reg_known_value_size)
1275 return reg_known_equiv_p[regno];
1277 return false;
1280 static void
1281 set_reg_known_equiv_p (unsigned int regno, bool val)
1283 if (regno >= FIRST_PSEUDO_REGISTER)
1285 regno -= FIRST_PSEUDO_REGISTER;
1286 if (regno < reg_known_value_size)
1287 reg_known_equiv_p[regno] = val;
1292 /* Returns a canonical version of X, from the point of view alias
1293 analysis. (For example, if X is a MEM whose address is a register,
1294 and the register has a known value (say a SYMBOL_REF), then a MEM
1295 whose address is the SYMBOL_REF is returned.) */
1298 canon_rtx (rtx x)
1300 /* Recursively look for equivalences. */
1301 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1303 rtx t = get_reg_known_value (REGNO (x));
1304 if (t == x)
1305 return x;
1306 if (t)
1307 return canon_rtx (t);
1310 if (GET_CODE (x) == PLUS)
1312 rtx x0 = canon_rtx (XEXP (x, 0));
1313 rtx x1 = canon_rtx (XEXP (x, 1));
1315 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1317 if (CONST_INT_P (x0))
1318 return plus_constant (x1, INTVAL (x0));
1319 else if (CONST_INT_P (x1))
1320 return plus_constant (x0, INTVAL (x1));
1321 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1325 /* This gives us much better alias analysis when called from
1326 the loop optimizer. Note we want to leave the original
1327 MEM alone, but need to return the canonicalized MEM with
1328 all the flags with their original values. */
1329 else if (MEM_P (x))
1330 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1332 return x;
1335 /* Return 1 if X and Y are identical-looking rtx's.
1336 Expect that X and Y has been already canonicalized.
1338 We use the data in reg_known_value above to see if two registers with
1339 different numbers are, in fact, equivalent. */
1341 static int
1342 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1344 int i;
1345 int j;
1346 enum rtx_code code;
1347 const char *fmt;
1349 if (x == 0 && y == 0)
1350 return 1;
1351 if (x == 0 || y == 0)
1352 return 0;
1354 if (x == y)
1355 return 1;
1357 code = GET_CODE (x);
1358 /* Rtx's of different codes cannot be equal. */
1359 if (code != GET_CODE (y))
1360 return 0;
1362 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1363 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1365 if (GET_MODE (x) != GET_MODE (y))
1366 return 0;
1368 /* Some RTL can be compared without a recursive examination. */
1369 switch (code)
1371 case REG:
1372 return REGNO (x) == REGNO (y);
1374 case LABEL_REF:
1375 return XEXP (x, 0) == XEXP (y, 0);
1377 case SYMBOL_REF:
1378 return XSTR (x, 0) == XSTR (y, 0);
1380 case VALUE:
1381 case CONST_INT:
1382 case CONST_DOUBLE:
1383 case CONST_FIXED:
1384 /* There's no need to compare the contents of CONST_DOUBLEs or
1385 CONST_INTs because pointer equality is a good enough
1386 comparison for these nodes. */
1387 return 0;
1389 default:
1390 break;
1393 /* canon_rtx knows how to handle plus. No need to canonicalize. */
1394 if (code == PLUS)
1395 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1396 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1397 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1398 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1399 /* For commutative operations, the RTX match if the operand match in any
1400 order. Also handle the simple binary and unary cases without a loop. */
1401 if (COMMUTATIVE_P (x))
1403 rtx xop0 = canon_rtx (XEXP (x, 0));
1404 rtx yop0 = canon_rtx (XEXP (y, 0));
1405 rtx yop1 = canon_rtx (XEXP (y, 1));
1407 return ((rtx_equal_for_memref_p (xop0, yop0)
1408 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1409 || (rtx_equal_for_memref_p (xop0, yop1)
1410 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1412 else if (NON_COMMUTATIVE_P (x))
1414 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1415 canon_rtx (XEXP (y, 0)))
1416 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1417 canon_rtx (XEXP (y, 1))));
1419 else if (UNARY_P (x))
1420 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1421 canon_rtx (XEXP (y, 0)));
1423 /* Compare the elements. If any pair of corresponding elements
1424 fail to match, return 0 for the whole things.
1426 Limit cases to types which actually appear in addresses. */
1428 fmt = GET_RTX_FORMAT (code);
1429 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1431 switch (fmt[i])
1433 case 'i':
1434 if (XINT (x, i) != XINT (y, i))
1435 return 0;
1436 break;
1438 case 'E':
1439 /* Two vectors must have the same length. */
1440 if (XVECLEN (x, i) != XVECLEN (y, i))
1441 return 0;
1443 /* And the corresponding elements must match. */
1444 for (j = 0; j < XVECLEN (x, i); j++)
1445 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1446 canon_rtx (XVECEXP (y, i, j))) == 0)
1447 return 0;
1448 break;
1450 case 'e':
1451 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1452 canon_rtx (XEXP (y, i))) == 0)
1453 return 0;
1454 break;
1456 /* This can happen for asm operands. */
1457 case 's':
1458 if (strcmp (XSTR (x, i), XSTR (y, i)))
1459 return 0;
1460 break;
1462 /* This can happen for an asm which clobbers memory. */
1463 case '0':
1464 break;
1466 /* It is believed that rtx's at this level will never
1467 contain anything but integers and other rtx's,
1468 except for within LABEL_REFs and SYMBOL_REFs. */
1469 default:
1470 gcc_unreachable ();
1473 return 1;
1477 find_base_term (rtx x)
1479 cselib_val *val;
1480 struct elt_loc_list *l;
1482 #if defined (FIND_BASE_TERM)
1483 /* Try machine-dependent ways to find the base term. */
1484 x = FIND_BASE_TERM (x);
1485 #endif
1487 switch (GET_CODE (x))
1489 case REG:
1490 return REG_BASE_VALUE (x);
1492 case TRUNCATE:
1493 /* As we do not know which address space the pointer is refering to, we can
1494 handle this only if the target does not support different pointer or
1495 address modes depending on the address space. */
1496 if (!target_default_pointer_address_modes_p ())
1497 return 0;
1498 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1499 return 0;
1500 /* Fall through. */
1501 case HIGH:
1502 case PRE_INC:
1503 case PRE_DEC:
1504 case POST_INC:
1505 case POST_DEC:
1506 case PRE_MODIFY:
1507 case POST_MODIFY:
1508 return find_base_term (XEXP (x, 0));
1510 case ZERO_EXTEND:
1511 case SIGN_EXTEND: /* Used for Alpha/NT pointers */
1512 /* As we do not know which address space the pointer is refering to, we can
1513 handle this only if the target does not support different pointer or
1514 address modes depending on the address space. */
1515 if (!target_default_pointer_address_modes_p ())
1516 return 0;
1519 rtx temp = find_base_term (XEXP (x, 0));
1521 if (temp != 0 && CONSTANT_P (temp))
1522 temp = convert_memory_address (Pmode, temp);
1524 return temp;
1527 case VALUE:
1528 val = CSELIB_VAL_PTR (x);
1529 if (!val)
1530 return 0;
1531 for (l = val->locs; l; l = l->next)
1532 if ((x = find_base_term (l->loc)) != 0)
1533 return x;
1534 return 0;
1536 case LO_SUM:
1537 /* The standard form is (lo_sum reg sym) so look only at the
1538 second operand. */
1539 return find_base_term (XEXP (x, 1));
1541 case CONST:
1542 x = XEXP (x, 0);
1543 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1544 return 0;
1545 /* Fall through. */
1546 case PLUS:
1547 case MINUS:
1549 rtx tmp1 = XEXP (x, 0);
1550 rtx tmp2 = XEXP (x, 1);
1552 /* This is a little bit tricky since we have to determine which of
1553 the two operands represents the real base address. Otherwise this
1554 routine may return the index register instead of the base register.
1556 That may cause us to believe no aliasing was possible, when in
1557 fact aliasing is possible.
1559 We use a few simple tests to guess the base register. Additional
1560 tests can certainly be added. For example, if one of the operands
1561 is a shift or multiply, then it must be the index register and the
1562 other operand is the base register. */
1564 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1565 return find_base_term (tmp2);
1567 /* If either operand is known to be a pointer, then use it
1568 to determine the base term. */
1569 if (REG_P (tmp1) && REG_POINTER (tmp1))
1571 rtx base = find_base_term (tmp1);
1572 if (base)
1573 return base;
1576 if (REG_P (tmp2) && REG_POINTER (tmp2))
1578 rtx base = find_base_term (tmp2);
1579 if (base)
1580 return base;
1583 /* Neither operand was known to be a pointer. Go ahead and find the
1584 base term for both operands. */
1585 tmp1 = find_base_term (tmp1);
1586 tmp2 = find_base_term (tmp2);
1588 /* If either base term is named object or a special address
1589 (like an argument or stack reference), then use it for the
1590 base term. */
1591 if (tmp1 != 0
1592 && (GET_CODE (tmp1) == SYMBOL_REF
1593 || GET_CODE (tmp1) == LABEL_REF
1594 || (GET_CODE (tmp1) == ADDRESS
1595 && GET_MODE (tmp1) != VOIDmode)))
1596 return tmp1;
1598 if (tmp2 != 0
1599 && (GET_CODE (tmp2) == SYMBOL_REF
1600 || GET_CODE (tmp2) == LABEL_REF
1601 || (GET_CODE (tmp2) == ADDRESS
1602 && GET_MODE (tmp2) != VOIDmode)))
1603 return tmp2;
1605 /* We could not determine which of the two operands was the
1606 base register and which was the index. So we can determine
1607 nothing from the base alias check. */
1608 return 0;
1611 case AND:
1612 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1613 return find_base_term (XEXP (x, 0));
1614 return 0;
1616 case SYMBOL_REF:
1617 case LABEL_REF:
1618 return x;
1620 default:
1621 return 0;
1625 /* Return 0 if the addresses X and Y are known to point to different
1626 objects, 1 if they might be pointers to the same object. */
1628 static int
1629 base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1630 enum machine_mode y_mode)
1632 rtx x_base = find_base_term (x);
1633 rtx y_base = find_base_term (y);
1635 /* If the address itself has no known base see if a known equivalent
1636 value has one. If either address still has no known base, nothing
1637 is known about aliasing. */
1638 if (x_base == 0)
1640 rtx x_c;
1642 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1643 return 1;
1645 x_base = find_base_term (x_c);
1646 if (x_base == 0)
1647 return 1;
1650 if (y_base == 0)
1652 rtx y_c;
1653 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1654 return 1;
1656 y_base = find_base_term (y_c);
1657 if (y_base == 0)
1658 return 1;
1661 /* If the base addresses are equal nothing is known about aliasing. */
1662 if (rtx_equal_p (x_base, y_base))
1663 return 1;
1665 /* The base addresses are different expressions. If they are not accessed
1666 via AND, there is no conflict. We can bring knowledge of object
1667 alignment into play here. For example, on alpha, "char a, b;" can
1668 alias one another, though "char a; long b;" cannot. AND addesses may
1669 implicitly alias surrounding objects; i.e. unaligned access in DImode
1670 via AND address can alias all surrounding object types except those
1671 with aligment 8 or higher. */
1672 if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1673 return 1;
1674 if (GET_CODE (x) == AND
1675 && (!CONST_INT_P (XEXP (x, 1))
1676 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1677 return 1;
1678 if (GET_CODE (y) == AND
1679 && (!CONST_INT_P (XEXP (y, 1))
1680 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1681 return 1;
1683 /* Differing symbols not accessed via AND never alias. */
1684 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1685 return 0;
1687 /* If one address is a stack reference there can be no alias:
1688 stack references using different base registers do not alias,
1689 a stack reference can not alias a parameter, and a stack reference
1690 can not alias a global. */
1691 if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1692 || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1693 return 0;
1695 return 1;
1698 /* Convert the address X into something we can use. This is done by returning
1699 it unchanged unless it is a value; in the latter case we call cselib to get
1700 a more useful rtx. */
1703 get_addr (rtx x)
1705 cselib_val *v;
1706 struct elt_loc_list *l;
1708 if (GET_CODE (x) != VALUE)
1709 return x;
1710 v = CSELIB_VAL_PTR (x);
1711 if (v)
1713 for (l = v->locs; l; l = l->next)
1714 if (CONSTANT_P (l->loc))
1715 return l->loc;
1716 for (l = v->locs; l; l = l->next)
1717 if (!REG_P (l->loc) && !MEM_P (l->loc))
1718 return l->loc;
1719 if (v->locs)
1720 return v->locs->loc;
1722 return x;
1725 /* Return the address of the (N_REFS + 1)th memory reference to ADDR
1726 where SIZE is the size in bytes of the memory reference. If ADDR
1727 is not modified by the memory reference then ADDR is returned. */
1729 static rtx
1730 addr_side_effect_eval (rtx addr, int size, int n_refs)
1732 int offset = 0;
1734 switch (GET_CODE (addr))
1736 case PRE_INC:
1737 offset = (n_refs + 1) * size;
1738 break;
1739 case PRE_DEC:
1740 offset = -(n_refs + 1) * size;
1741 break;
1742 case POST_INC:
1743 offset = n_refs * size;
1744 break;
1745 case POST_DEC:
1746 offset = -n_refs * size;
1747 break;
1749 default:
1750 return addr;
1753 if (offset)
1754 addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1755 GEN_INT (offset));
1756 else
1757 addr = XEXP (addr, 0);
1758 addr = canon_rtx (addr);
1760 return addr;
1763 /* Return one if X and Y (memory addresses) reference the
1764 same location in memory or if the references overlap.
1765 Return zero if they do not overlap, else return
1766 minus one in which case they still might reference the same location.
1768 C is an offset accumulator. When
1769 C is nonzero, we are testing aliases between X and Y + C.
1770 XSIZE is the size in bytes of the X reference,
1771 similarly YSIZE is the size in bytes for Y.
1772 Expect that canon_rtx has been already called for X and Y.
1774 If XSIZE or YSIZE is zero, we do not know the amount of memory being
1775 referenced (the reference was BLKmode), so make the most pessimistic
1776 assumptions.
1778 If XSIZE or YSIZE is negative, we may access memory outside the object
1779 being referenced as a side effect. This can happen when using AND to
1780 align memory references, as is done on the Alpha.
1782 Nice to notice that varying addresses cannot conflict with fp if no
1783 local variables had their addresses taken, but that's too hard now.
1785 ??? Contrary to the tree alias oracle this does not return
1786 one for X + non-constant and Y + non-constant when X and Y are equal.
1787 If that is fixed the TBAA hack for union type-punning can be removed. */
1789 static int
1790 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1792 if (GET_CODE (x) == VALUE)
1794 if (REG_P (y))
1796 struct elt_loc_list *l = NULL;
1797 if (CSELIB_VAL_PTR (x))
1798 for (l = CSELIB_VAL_PTR (x)->locs; l; l = l->next)
1799 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
1800 break;
1801 if (l)
1802 x = y;
1803 else
1804 x = get_addr (x);
1806 /* Don't call get_addr if y is the same VALUE. */
1807 else if (x != y)
1808 x = get_addr (x);
1810 if (GET_CODE (y) == VALUE)
1812 if (REG_P (x))
1814 struct elt_loc_list *l = NULL;
1815 if (CSELIB_VAL_PTR (y))
1816 for (l = CSELIB_VAL_PTR (y)->locs; l; l = l->next)
1817 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
1818 break;
1819 if (l)
1820 y = x;
1821 else
1822 y = get_addr (y);
1824 /* Don't call get_addr if x is the same VALUE. */
1825 else if (y != x)
1826 y = get_addr (y);
1828 if (GET_CODE (x) == HIGH)
1829 x = XEXP (x, 0);
1830 else if (GET_CODE (x) == LO_SUM)
1831 x = XEXP (x, 1);
1832 else
1833 x = addr_side_effect_eval (x, xsize, 0);
1834 if (GET_CODE (y) == HIGH)
1835 y = XEXP (y, 0);
1836 else if (GET_CODE (y) == LO_SUM)
1837 y = XEXP (y, 1);
1838 else
1839 y = addr_side_effect_eval (y, ysize, 0);
1841 if (rtx_equal_for_memref_p (x, y))
1843 if (xsize <= 0 || ysize <= 0)
1844 return 1;
1845 if (c >= 0 && xsize > c)
1846 return 1;
1847 if (c < 0 && ysize+c > 0)
1848 return 1;
1849 return 0;
1852 /* This code used to check for conflicts involving stack references and
1853 globals but the base address alias code now handles these cases. */
1855 if (GET_CODE (x) == PLUS)
1857 /* The fact that X is canonicalized means that this
1858 PLUS rtx is canonicalized. */
1859 rtx x0 = XEXP (x, 0);
1860 rtx x1 = XEXP (x, 1);
1862 if (GET_CODE (y) == PLUS)
1864 /* The fact that Y is canonicalized means that this
1865 PLUS rtx is canonicalized. */
1866 rtx y0 = XEXP (y, 0);
1867 rtx y1 = XEXP (y, 1);
1869 if (rtx_equal_for_memref_p (x1, y1))
1870 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1871 if (rtx_equal_for_memref_p (x0, y0))
1872 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1873 if (CONST_INT_P (x1))
1875 if (CONST_INT_P (y1))
1876 return memrefs_conflict_p (xsize, x0, ysize, y0,
1877 c - INTVAL (x1) + INTVAL (y1));
1878 else
1879 return memrefs_conflict_p (xsize, x0, ysize, y,
1880 c - INTVAL (x1));
1882 else if (CONST_INT_P (y1))
1883 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1885 return -1;
1887 else if (CONST_INT_P (x1))
1888 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1890 else if (GET_CODE (y) == PLUS)
1892 /* The fact that Y is canonicalized means that this
1893 PLUS rtx is canonicalized. */
1894 rtx y0 = XEXP (y, 0);
1895 rtx y1 = XEXP (y, 1);
1897 if (CONST_INT_P (y1))
1898 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1899 else
1900 return -1;
1903 if (GET_CODE (x) == GET_CODE (y))
1904 switch (GET_CODE (x))
1906 case MULT:
1908 /* Handle cases where we expect the second operands to be the
1909 same, and check only whether the first operand would conflict
1910 or not. */
1911 rtx x0, y0;
1912 rtx x1 = canon_rtx (XEXP (x, 1));
1913 rtx y1 = canon_rtx (XEXP (y, 1));
1914 if (! rtx_equal_for_memref_p (x1, y1))
1915 return -1;
1916 x0 = canon_rtx (XEXP (x, 0));
1917 y0 = canon_rtx (XEXP (y, 0));
1918 if (rtx_equal_for_memref_p (x0, y0))
1919 return (xsize == 0 || ysize == 0
1920 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1922 /* Can't properly adjust our sizes. */
1923 if (!CONST_INT_P (x1))
1924 return -1;
1925 xsize /= INTVAL (x1);
1926 ysize /= INTVAL (x1);
1927 c /= INTVAL (x1);
1928 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1931 default:
1932 break;
1935 /* Treat an access through an AND (e.g. a subword access on an Alpha)
1936 as an access with indeterminate size. Assume that references
1937 besides AND are aligned, so if the size of the other reference is
1938 at least as large as the alignment, assume no other overlap. */
1939 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
1941 if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
1942 xsize = -1;
1943 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
1945 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
1947 /* ??? If we are indexing far enough into the array/structure, we
1948 may yet be able to determine that we can not overlap. But we
1949 also need to that we are far enough from the end not to overlap
1950 a following reference, so we do nothing with that for now. */
1951 if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
1952 ysize = -1;
1953 return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
1956 if (CONSTANT_P (x))
1958 if (CONST_INT_P (x) && CONST_INT_P (y))
1960 c += (INTVAL (y) - INTVAL (x));
1961 return (xsize <= 0 || ysize <= 0
1962 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1965 if (GET_CODE (x) == CONST)
1967 if (GET_CODE (y) == CONST)
1968 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1969 ysize, canon_rtx (XEXP (y, 0)), c);
1970 else
1971 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1972 ysize, y, c);
1974 if (GET_CODE (y) == CONST)
1975 return memrefs_conflict_p (xsize, x, ysize,
1976 canon_rtx (XEXP (y, 0)), c);
1978 if (CONSTANT_P (y))
1979 return (xsize <= 0 || ysize <= 0
1980 || (rtx_equal_for_memref_p (x, y)
1981 && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
1983 return -1;
1986 return -1;
1989 /* Functions to compute memory dependencies.
1991 Since we process the insns in execution order, we can build tables
1992 to keep track of what registers are fixed (and not aliased), what registers
1993 are varying in known ways, and what registers are varying in unknown
1994 ways.
1996 If both memory references are volatile, then there must always be a
1997 dependence between the two references, since their order can not be
1998 changed. A volatile and non-volatile reference can be interchanged
1999 though.
2001 A MEM_IN_STRUCT reference at a non-AND varying address can never
2002 conflict with a non-MEM_IN_STRUCT reference at a fixed address. We
2003 also must allow AND addresses, because they may generate accesses
2004 outside the object being referenced. This is used to generate
2005 aligned addresses from unaligned addresses, for instance, the alpha
2006 storeqi_unaligned pattern. */
2008 /* Read dependence: X is read after read in MEM takes place. There can
2009 only be a dependence here if both reads are volatile. */
2012 read_dependence (const_rtx mem, const_rtx x)
2014 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
2017 /* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
2018 MEM2 is a reference to a structure at a varying address, or returns
2019 MEM2 if vice versa. Otherwise, returns NULL_RTX. If a non-NULL
2020 value is returned MEM1 and MEM2 can never alias. VARIES_P is used
2021 to decide whether or not an address may vary; it should return
2022 nonzero whenever variation is possible.
2023 MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2. */
2025 static const_rtx
2026 fixed_scalar_and_varying_struct_p (const_rtx mem1, const_rtx mem2, rtx mem1_addr,
2027 rtx mem2_addr,
2028 bool (*varies_p) (const_rtx, bool))
2030 if (! flag_strict_aliasing)
2031 return NULL_RTX;
2033 if (MEM_ALIAS_SET (mem2)
2034 && MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
2035 && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
2036 /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
2037 varying address. */
2038 return mem1;
2040 if (MEM_ALIAS_SET (mem1)
2041 && MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
2042 && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
2043 /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
2044 varying address. */
2045 return mem2;
2047 return NULL_RTX;
2050 /* Returns nonzero if something about the mode or address format MEM1
2051 indicates that it might well alias *anything*. */
2053 static int
2054 aliases_everything_p (const_rtx mem)
2056 if (GET_CODE (XEXP (mem, 0)) == AND)
2057 /* If the address is an AND, it's very hard to know at what it is
2058 actually pointing. */
2059 return 1;
2061 return 0;
2064 /* Return true if we can determine that the fields referenced cannot
2065 overlap for any pair of objects. */
2067 static bool
2068 nonoverlapping_component_refs_p (const_tree x, const_tree y)
2070 const_tree fieldx, fieldy, typex, typey, orig_y;
2072 if (!flag_strict_aliasing)
2073 return false;
2077 /* The comparison has to be done at a common type, since we don't
2078 know how the inheritance hierarchy works. */
2079 orig_y = y;
2082 fieldx = TREE_OPERAND (x, 1);
2083 typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2085 y = orig_y;
2088 fieldy = TREE_OPERAND (y, 1);
2089 typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2091 if (typex == typey)
2092 goto found;
2094 y = TREE_OPERAND (y, 0);
2096 while (y && TREE_CODE (y) == COMPONENT_REF);
2098 x = TREE_OPERAND (x, 0);
2100 while (x && TREE_CODE (x) == COMPONENT_REF);
2101 /* Never found a common type. */
2102 return false;
2104 found:
2105 /* If we're left with accessing different fields of a structure,
2106 then no overlap. */
2107 if (TREE_CODE (typex) == RECORD_TYPE
2108 && fieldx != fieldy)
2109 return true;
2111 /* The comparison on the current field failed. If we're accessing
2112 a very nested structure, look at the next outer level. */
2113 x = TREE_OPERAND (x, 0);
2114 y = TREE_OPERAND (y, 0);
2116 while (x && y
2117 && TREE_CODE (x) == COMPONENT_REF
2118 && TREE_CODE (y) == COMPONENT_REF);
2120 return false;
2123 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */
2125 static tree
2126 decl_for_component_ref (tree x)
2130 x = TREE_OPERAND (x, 0);
2132 while (x && TREE_CODE (x) == COMPONENT_REF);
2134 return x && DECL_P (x) ? x : NULL_TREE;
2137 /* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
2138 offset of the field reference. */
2140 static rtx
2141 adjust_offset_for_component_ref (tree x, rtx offset)
2143 HOST_WIDE_INT ioffset;
2145 if (! offset)
2146 return NULL_RTX;
2148 ioffset = INTVAL (offset);
2151 tree offset = component_ref_field_offset (x);
2152 tree field = TREE_OPERAND (x, 1);
2154 if (! host_integerp (offset, 1))
2155 return NULL_RTX;
2156 ioffset += (tree_low_cst (offset, 1)
2157 + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2158 / BITS_PER_UNIT));
2160 x = TREE_OPERAND (x, 0);
2162 while (x && TREE_CODE (x) == COMPONENT_REF);
2164 return GEN_INT (ioffset);
2167 /* Return nonzero if we can determine the exprs corresponding to memrefs
2168 X and Y and they do not overlap. */
2171 nonoverlapping_memrefs_p (const_rtx x, const_rtx y)
2173 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2174 rtx rtlx, rtly;
2175 rtx basex, basey;
2176 rtx moffsetx, moffsety;
2177 HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2179 /* Unless both have exprs, we can't tell anything. */
2180 if (exprx == 0 || expry == 0)
2181 return 0;
2183 /* For spill-slot accesses make sure we have valid offsets. */
2184 if ((exprx == get_spill_slot_decl (false)
2185 && ! MEM_OFFSET (x))
2186 || (expry == get_spill_slot_decl (false)
2187 && ! MEM_OFFSET (y)))
2188 return 0;
2190 /* If both are field references, we may be able to determine something. */
2191 if (TREE_CODE (exprx) == COMPONENT_REF
2192 && TREE_CODE (expry) == COMPONENT_REF
2193 && nonoverlapping_component_refs_p (exprx, expry))
2194 return 1;
2197 /* If the field reference test failed, look at the DECLs involved. */
2198 moffsetx = MEM_OFFSET (x);
2199 if (TREE_CODE (exprx) == COMPONENT_REF)
2201 tree t = decl_for_component_ref (exprx);
2202 if (! t)
2203 return 0;
2204 moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
2205 exprx = t;
2208 moffsety = MEM_OFFSET (y);
2209 if (TREE_CODE (expry) == COMPONENT_REF)
2211 tree t = decl_for_component_ref (expry);
2212 if (! t)
2213 return 0;
2214 moffsety = adjust_offset_for_component_ref (expry, moffsety);
2215 expry = t;
2218 if (! DECL_P (exprx) || ! DECL_P (expry))
2219 return 0;
2221 /* With invalid code we can end up storing into the constant pool.
2222 Bail out to avoid ICEing when creating RTL for this.
2223 See gfortran.dg/lto/20091028-2_0.f90. */
2224 if (TREE_CODE (exprx) == CONST_DECL
2225 || TREE_CODE (expry) == CONST_DECL)
2226 return 1;
2228 rtlx = DECL_RTL (exprx);
2229 rtly = DECL_RTL (expry);
2231 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2232 can't overlap unless they are the same because we never reuse that part
2233 of the stack frame used for locals for spilled pseudos. */
2234 if ((!MEM_P (rtlx) || !MEM_P (rtly))
2235 && ! rtx_equal_p (rtlx, rtly))
2236 return 1;
2238 /* If we have MEMs refering to different address spaces (which can
2239 potentially overlap), we cannot easily tell from the addresses
2240 whether the references overlap. */
2241 if (MEM_P (rtlx) && MEM_P (rtly)
2242 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2243 return 0;
2245 /* Get the base and offsets of both decls. If either is a register, we
2246 know both are and are the same, so use that as the base. The only
2247 we can avoid overlap is if we can deduce that they are nonoverlapping
2248 pieces of that decl, which is very rare. */
2249 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2250 if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2251 offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2253 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2254 if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2255 offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2257 /* If the bases are different, we know they do not overlap if both
2258 are constants or if one is a constant and the other a pointer into the
2259 stack frame. Otherwise a different base means we can't tell if they
2260 overlap or not. */
2261 if (! rtx_equal_p (basex, basey))
2262 return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2263 || (CONSTANT_P (basex) && REG_P (basey)
2264 && REGNO_PTR_FRAME_P (REGNO (basey)))
2265 || (CONSTANT_P (basey) && REG_P (basex)
2266 && REGNO_PTR_FRAME_P (REGNO (basex))));
2268 sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2269 : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
2270 : -1);
2271 sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2272 : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
2273 -1);
2275 /* If we have an offset for either memref, it can update the values computed
2276 above. */
2277 if (moffsetx)
2278 offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
2279 if (moffsety)
2280 offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
2282 /* If a memref has both a size and an offset, we can use the smaller size.
2283 We can't do this if the offset isn't known because we must view this
2284 memref as being anywhere inside the DECL's MEM. */
2285 if (MEM_SIZE (x) && moffsetx)
2286 sizex = INTVAL (MEM_SIZE (x));
2287 if (MEM_SIZE (y) && moffsety)
2288 sizey = INTVAL (MEM_SIZE (y));
2290 /* Put the values of the memref with the lower offset in X's values. */
2291 if (offsetx > offsety)
2293 tem = offsetx, offsetx = offsety, offsety = tem;
2294 tem = sizex, sizex = sizey, sizey = tem;
2297 /* If we don't know the size of the lower-offset value, we can't tell
2298 if they conflict. Otherwise, we do the test. */
2299 return sizex >= 0 && offsety >= offsetx + sizex;
2302 /* True dependence: X is read after store in MEM takes place. */
2305 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
2306 bool (*varies) (const_rtx, bool))
2308 rtx x_addr, mem_addr;
2309 rtx base;
2310 int ret;
2312 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2313 return 1;
2315 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2316 This is used in epilogue deallocation functions, and in cselib. */
2317 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2318 return 1;
2319 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2320 return 1;
2321 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2322 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2323 return 1;
2325 /* Read-only memory is by definition never modified, and therefore can't
2326 conflict with anything. We don't expect to find read-only set on MEM,
2327 but stupid user tricks can produce them, so don't die. */
2328 if (MEM_READONLY_P (x))
2329 return 0;
2331 /* If we have MEMs refering to different address spaces (which can
2332 potentially overlap), we cannot easily tell from the addresses
2333 whether the references overlap. */
2334 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2335 return 1;
2337 if (mem_mode == VOIDmode)
2338 mem_mode = GET_MODE (mem);
2340 x_addr = XEXP (x, 0);
2341 mem_addr = XEXP (mem, 0);
2342 if (!((GET_CODE (x_addr) == VALUE
2343 && GET_CODE (mem_addr) != VALUE
2344 && reg_mentioned_p (x_addr, mem_addr))
2345 || (GET_CODE (x_addr) != VALUE
2346 && GET_CODE (mem_addr) == VALUE
2347 && reg_mentioned_p (mem_addr, x_addr))))
2349 x_addr = get_addr (x_addr);
2350 mem_addr = get_addr (mem_addr);
2353 base = find_base_term (x_addr);
2354 if (base && (GET_CODE (base) == LABEL_REF
2355 || (GET_CODE (base) == SYMBOL_REF
2356 && CONSTANT_POOL_ADDRESS_P (base))))
2357 return 0;
2359 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2360 return 0;
2362 x_addr = canon_rtx (x_addr);
2363 mem_addr = canon_rtx (mem_addr);
2365 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2366 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2367 return ret;
2369 if (DIFFERENT_ALIAS_SETS_P (x, mem))
2370 return 0;
2372 if (nonoverlapping_memrefs_p (mem, x))
2373 return 0;
2375 if (aliases_everything_p (x))
2376 return 1;
2378 /* We cannot use aliases_everything_p to test MEM, since we must look
2379 at MEM_MODE, rather than GET_MODE (MEM). */
2380 if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2381 return 1;
2383 /* In true_dependence we also allow BLKmode to alias anything. Why
2384 don't we do this in anti_dependence and output_dependence? */
2385 if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2386 return 1;
2388 if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
2389 return 0;
2391 return rtx_refs_may_alias_p (x, mem, true);
2394 /* Canonical true dependence: X is read after store in MEM takes place.
2395 Variant of true_dependence which assumes MEM has already been
2396 canonicalized (hence we no longer do that here).
2397 The mem_addr argument has been added, since true_dependence computed
2398 this value prior to canonicalizing.
2399 If x_addr is non-NULL, it is used in preference of XEXP (x, 0). */
2402 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2403 const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
2405 int ret;
2407 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2408 return 1;
2410 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2411 This is used in epilogue deallocation functions. */
2412 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2413 return 1;
2414 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2415 return 1;
2416 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2417 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2418 return 1;
2420 /* Read-only memory is by definition never modified, and therefore can't
2421 conflict with anything. We don't expect to find read-only set on MEM,
2422 but stupid user tricks can produce them, so don't die. */
2423 if (MEM_READONLY_P (x))
2424 return 0;
2426 /* If we have MEMs refering to different address spaces (which can
2427 potentially overlap), we cannot easily tell from the addresses
2428 whether the references overlap. */
2429 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2430 return 1;
2432 if (! x_addr)
2434 x_addr = XEXP (x, 0);
2435 if (!((GET_CODE (x_addr) == VALUE
2436 && GET_CODE (mem_addr) != VALUE
2437 && reg_mentioned_p (x_addr, mem_addr))
2438 || (GET_CODE (x_addr) != VALUE
2439 && GET_CODE (mem_addr) == VALUE
2440 && reg_mentioned_p (mem_addr, x_addr))))
2441 x_addr = get_addr (x_addr);
2444 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2445 return 0;
2447 x_addr = canon_rtx (x_addr);
2448 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2449 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2450 return ret;
2452 if (DIFFERENT_ALIAS_SETS_P (x, mem))
2453 return 0;
2455 if (nonoverlapping_memrefs_p (x, mem))
2456 return 0;
2458 if (aliases_everything_p (x))
2459 return 1;
2461 /* We cannot use aliases_everything_p to test MEM, since we must look
2462 at MEM_MODE, rather than GET_MODE (MEM). */
2463 if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2464 return 1;
2466 /* In true_dependence we also allow BLKmode to alias anything. Why
2467 don't we do this in anti_dependence and output_dependence? */
2468 if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2469 return 1;
2471 if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
2472 return 0;
2474 return rtx_refs_may_alias_p (x, mem, true);
2477 /* Returns nonzero if a write to X might alias a previous read from
2478 (or, if WRITEP is nonzero, a write to) MEM. */
2480 static int
2481 write_dependence_p (const_rtx mem, const_rtx x, int writep)
2483 rtx x_addr, mem_addr;
2484 const_rtx fixed_scalar;
2485 rtx base;
2486 int ret;
2488 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2489 return 1;
2491 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2492 This is used in epilogue deallocation functions. */
2493 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2494 return 1;
2495 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2496 return 1;
2497 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2498 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2499 return 1;
2501 /* A read from read-only memory can't conflict with read-write memory. */
2502 if (!writep && MEM_READONLY_P (mem))
2503 return 0;
2505 /* If we have MEMs refering to different address spaces (which can
2506 potentially overlap), we cannot easily tell from the addresses
2507 whether the references overlap. */
2508 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2509 return 1;
2511 x_addr = XEXP (x, 0);
2512 mem_addr = XEXP (mem, 0);
2513 if (!((GET_CODE (x_addr) == VALUE
2514 && GET_CODE (mem_addr) != VALUE
2515 && reg_mentioned_p (x_addr, mem_addr))
2516 || (GET_CODE (x_addr) != VALUE
2517 && GET_CODE (mem_addr) == VALUE
2518 && reg_mentioned_p (mem_addr, x_addr))))
2520 x_addr = get_addr (x_addr);
2521 mem_addr = get_addr (mem_addr);
2524 if (! writep)
2526 base = find_base_term (mem_addr);
2527 if (base && (GET_CODE (base) == LABEL_REF
2528 || (GET_CODE (base) == SYMBOL_REF
2529 && CONSTANT_POOL_ADDRESS_P (base))))
2530 return 0;
2533 if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2534 GET_MODE (mem)))
2535 return 0;
2537 x_addr = canon_rtx (x_addr);
2538 mem_addr = canon_rtx (mem_addr);
2540 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2541 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2542 return ret;
2544 if (nonoverlapping_memrefs_p (x, mem))
2545 return 0;
2547 fixed_scalar
2548 = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2549 rtx_addr_varies_p);
2551 if ((fixed_scalar == mem && !aliases_everything_p (x))
2552 || (fixed_scalar == x && !aliases_everything_p (mem)))
2553 return 0;
2555 return rtx_refs_may_alias_p (x, mem, false);
2558 /* Anti dependence: X is written after read in MEM takes place. */
2561 anti_dependence (const_rtx mem, const_rtx x)
2563 return write_dependence_p (mem, x, /*writep=*/0);
2566 /* Output dependence: X is written after store in MEM takes place. */
2569 output_dependence (const_rtx mem, const_rtx x)
2571 return write_dependence_p (mem, x, /*writep=*/1);
2575 void
2576 init_alias_target (void)
2578 int i;
2580 memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2582 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2583 /* Check whether this register can hold an incoming pointer
2584 argument. FUNCTION_ARG_REGNO_P tests outgoing register
2585 numbers, so translate if necessary due to register windows. */
2586 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2587 && HARD_REGNO_MODE_OK (i, Pmode))
2588 static_reg_base_value[i]
2589 = gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2591 static_reg_base_value[STACK_POINTER_REGNUM]
2592 = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2593 static_reg_base_value[ARG_POINTER_REGNUM]
2594 = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2595 static_reg_base_value[FRAME_POINTER_REGNUM]
2596 = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2597 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2598 static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2599 = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2600 #endif
2603 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2604 to be memory reference. */
2605 static bool memory_modified;
2606 static void
2607 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2609 if (MEM_P (x))
2611 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2612 memory_modified = true;
2617 /* Return true when INSN possibly modify memory contents of MEM
2618 (i.e. address can be modified). */
2619 bool
2620 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2622 if (!INSN_P (insn))
2623 return false;
2624 memory_modified = false;
2625 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2626 return memory_modified;
2629 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE
2630 array. */
2632 void
2633 init_alias_analysis (void)
2635 unsigned int maxreg = max_reg_num ();
2636 int changed, pass;
2637 int i;
2638 unsigned int ui;
2639 rtx insn;
2641 timevar_push (TV_ALIAS_ANALYSIS);
2643 reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2644 reg_known_value = ggc_alloc_cleared_vec_rtx (reg_known_value_size);
2645 reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
2647 /* If we have memory allocated from the previous run, use it. */
2648 if (old_reg_base_value)
2649 reg_base_value = old_reg_base_value;
2651 if (reg_base_value)
2652 VEC_truncate (rtx, reg_base_value, 0);
2654 VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
2656 new_reg_base_value = XNEWVEC (rtx, maxreg);
2657 reg_seen = XNEWVEC (char, maxreg);
2659 /* The basic idea is that each pass through this loop will use the
2660 "constant" information from the previous pass to propagate alias
2661 information through another level of assignments.
2663 This could get expensive if the assignment chains are long. Maybe
2664 we should throttle the number of iterations, possibly based on
2665 the optimization level or flag_expensive_optimizations.
2667 We could propagate more information in the first pass by making use
2668 of DF_REG_DEF_COUNT to determine immediately that the alias information
2669 for a pseudo is "constant".
2671 A program with an uninitialized variable can cause an infinite loop
2672 here. Instead of doing a full dataflow analysis to detect such problems
2673 we just cap the number of iterations for the loop.
2675 The state of the arrays for the set chain in question does not matter
2676 since the program has undefined behavior. */
2678 pass = 0;
2681 /* Assume nothing will change this iteration of the loop. */
2682 changed = 0;
2684 /* We want to assign the same IDs each iteration of this loop, so
2685 start counting from zero each iteration of the loop. */
2686 unique_id = 0;
2688 /* We're at the start of the function each iteration through the
2689 loop, so we're copying arguments. */
2690 copying_arguments = true;
2692 /* Wipe the potential alias information clean for this pass. */
2693 memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2695 /* Wipe the reg_seen array clean. */
2696 memset (reg_seen, 0, maxreg);
2698 /* Mark all hard registers which may contain an address.
2699 The stack, frame and argument pointers may contain an address.
2700 An argument register which can hold a Pmode value may contain
2701 an address even if it is not in BASE_REGS.
2703 The address expression is VOIDmode for an argument and
2704 Pmode for other registers. */
2706 memcpy (new_reg_base_value, static_reg_base_value,
2707 FIRST_PSEUDO_REGISTER * sizeof (rtx));
2709 /* Walk the insns adding values to the new_reg_base_value array. */
2710 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2712 if (INSN_P (insn))
2714 rtx note, set;
2716 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2717 /* The prologue/epilogue insns are not threaded onto the
2718 insn chain until after reload has completed. Thus,
2719 there is no sense wasting time checking if INSN is in
2720 the prologue/epilogue until after reload has completed. */
2721 if (reload_completed
2722 && prologue_epilogue_contains (insn))
2723 continue;
2724 #endif
2726 /* If this insn has a noalias note, process it, Otherwise,
2727 scan for sets. A simple set will have no side effects
2728 which could change the base value of any other register. */
2730 if (GET_CODE (PATTERN (insn)) == SET
2731 && REG_NOTES (insn) != 0
2732 && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2733 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2734 else
2735 note_stores (PATTERN (insn), record_set, NULL);
2737 set = single_set (insn);
2739 if (set != 0
2740 && REG_P (SET_DEST (set))
2741 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2743 unsigned int regno = REGNO (SET_DEST (set));
2744 rtx src = SET_SRC (set);
2745 rtx t;
2747 note = find_reg_equal_equiv_note (insn);
2748 if (note && REG_NOTE_KIND (note) == REG_EQUAL
2749 && DF_REG_DEF_COUNT (regno) != 1)
2750 note = NULL_RTX;
2752 if (note != NULL_RTX
2753 && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2754 && ! rtx_varies_p (XEXP (note, 0), 1)
2755 && ! reg_overlap_mentioned_p (SET_DEST (set),
2756 XEXP (note, 0)))
2758 set_reg_known_value (regno, XEXP (note, 0));
2759 set_reg_known_equiv_p (regno,
2760 REG_NOTE_KIND (note) == REG_EQUIV);
2762 else if (DF_REG_DEF_COUNT (regno) == 1
2763 && GET_CODE (src) == PLUS
2764 && REG_P (XEXP (src, 0))
2765 && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2766 && CONST_INT_P (XEXP (src, 1)))
2768 t = plus_constant (t, INTVAL (XEXP (src, 1)));
2769 set_reg_known_value (regno, t);
2770 set_reg_known_equiv_p (regno, 0);
2772 else if (DF_REG_DEF_COUNT (regno) == 1
2773 && ! rtx_varies_p (src, 1))
2775 set_reg_known_value (regno, src);
2776 set_reg_known_equiv_p (regno, 0);
2780 else if (NOTE_P (insn)
2781 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2782 copying_arguments = false;
2785 /* Now propagate values from new_reg_base_value to reg_base_value. */
2786 gcc_assert (maxreg == (unsigned int) max_reg_num ());
2788 for (ui = 0; ui < maxreg; ui++)
2790 if (new_reg_base_value[ui]
2791 && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
2792 && ! rtx_equal_p (new_reg_base_value[ui],
2793 VEC_index (rtx, reg_base_value, ui)))
2795 VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
2796 changed = 1;
2800 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2802 /* Fill in the remaining entries. */
2803 for (i = 0; i < (int)reg_known_value_size; i++)
2804 if (reg_known_value[i] == 0)
2805 reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2807 /* Clean up. */
2808 free (new_reg_base_value);
2809 new_reg_base_value = 0;
2810 free (reg_seen);
2811 reg_seen = 0;
2812 timevar_pop (TV_ALIAS_ANALYSIS);
2815 void
2816 end_alias_analysis (void)
2818 old_reg_base_value = reg_base_value;
2819 ggc_free (reg_known_value);
2820 reg_known_value = 0;
2821 reg_known_value_size = 0;
2822 free (reg_known_equiv_p);
2823 reg_known_equiv_p = 0;
2826 #include "gt-alias.h"