1 /* Loop invariant motion.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
34 #include "gimple-iterator.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
42 #include "tree-affine.h"
43 #include "tree-ssa-propagate.h"
44 #include "trans-mem.h"
45 #include "gimple-fold.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-ssa-loop-niter.h"
52 /* TODO: Support for predicated code motion. I.e.
63 Where COND and INV are invariants, but evaluating INV may trap or be
64 invalid from some other reason if !COND. This may be transformed to
74 /* The auxiliary data kept for each statement. */
78 struct loop
*max_loop
; /* The outermost loop in that the statement
81 struct loop
*tgt_loop
; /* The loop out of that we want to move the
84 struct loop
*always_executed_in
;
85 /* The outermost loop for that we are sure
86 the statement is executed if the loop
89 unsigned cost
; /* Cost of the computation performed by the
92 unsigned ref
; /* The simple_mem_ref in this stmt or 0. */
94 vec
<gimple
*> depends
; /* Vector of statements that must be also
95 hoisted out of the loop when this statement
96 is hoisted; i.e. those that define the
97 operands of the statement and are inside of
101 /* Maps statements to their lim_aux_data. */
103 static hash_map
<gimple
*, lim_aux_data
*> *lim_aux_data_map
;
105 /* Description of a memory reference location. */
109 tree
*ref
; /* The reference itself. */
110 gimple
*stmt
; /* The statement in that it occurs. */
114 /* Description of a memory reference. */
118 unsigned id
: 30; /* ID assigned to the memory reference
119 (its index in memory_accesses.refs_list) */
120 unsigned ref_canonical
: 1; /* Whether mem.ref was canonicalized. */
121 unsigned ref_decomposed
: 1; /* Whether the ref was hashed from mem. */
122 hashval_t hash
; /* Its hash value. */
124 /* The memory access itself and associated caching of alias-oracle
128 bitmap stored
; /* The set of loops in that this memory location
130 vec
<mem_ref_loc
> accesses_in_loop
;
131 /* The locations of the accesses. Vector
132 indexed by the loop number. */
134 /* The following sets are computed on demand. We keep both set and
135 its complement, so that we know whether the information was
136 already computed or not. */
137 bitmap_head indep_loop
; /* The set of loops in that the memory
138 reference is independent, meaning:
139 If it is stored in the loop, this store
140 is independent on all other loads and
142 If it is only loaded, then it is independent
143 on all stores in the loop. */
144 bitmap_head dep_loop
; /* The complement of INDEP_LOOP. */
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148 to record (in)dependence against stores in the loop and its subloops, the
149 second to record (in)dependence against all references in the loop
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
153 /* Mem_ref hashtable helpers. */
155 struct mem_ref_hasher
: nofree_ptr_hash
<im_mem_ref
>
157 typedef ao_ref
*compare_type
;
158 static inline hashval_t
hash (const im_mem_ref
*);
159 static inline bool equal (const im_mem_ref
*, const ao_ref
*);
162 /* A hash function for struct im_mem_ref object OBJ. */
165 mem_ref_hasher::hash (const im_mem_ref
*mem
)
170 /* An equality function for struct im_mem_ref object MEM1 with
171 memory reference OBJ2. */
174 mem_ref_hasher::equal (const im_mem_ref
*mem1
, const ao_ref
*obj2
)
176 if (obj2
->max_size_known_p ())
177 return (mem1
->ref_decomposed
178 && operand_equal_p (mem1
->mem
.base
, obj2
->base
, 0)
179 && known_eq (mem1
->mem
.offset
, obj2
->offset
)
180 && known_eq (mem1
->mem
.size
, obj2
->size
)
181 && known_eq (mem1
->mem
.max_size
, obj2
->max_size
)
182 && mem1
->mem
.volatile_p
== obj2
->volatile_p
183 && (mem1
->mem
.ref_alias_set
== obj2
->ref_alias_set
184 /* We are not canonicalizing alias-sets but for the
185 special-case we didn't canonicalize yet and the
186 incoming ref is a alias-set zero MEM we pick
187 the correct one already. */
188 || (!mem1
->ref_canonical
189 && (TREE_CODE (obj2
->ref
) == MEM_REF
190 || TREE_CODE (obj2
->ref
) == TARGET_MEM_REF
)
191 && obj2
->ref_alias_set
== 0)
192 /* Likewise if there's a canonical ref with alias-set zero. */
193 || (mem1
->ref_canonical
&& mem1
->mem
.ref_alias_set
== 0))
194 && types_compatible_p (TREE_TYPE (mem1
->mem
.ref
),
195 TREE_TYPE (obj2
->ref
)));
197 return operand_equal_p (mem1
->mem
.ref
, obj2
->ref
, 0);
201 /* Description of memory accesses in loops. */
205 /* The hash table of memory references accessed in loops. */
206 hash_table
<mem_ref_hasher
> *refs
;
208 /* The list of memory references. */
209 vec
<im_mem_ref
*> refs_list
;
211 /* The set of memory references accessed in each loop. */
212 vec
<bitmap_head
> refs_in_loop
;
214 /* The set of memory references stored in each loop. */
215 vec
<bitmap_head
> refs_stored_in_loop
;
217 /* The set of memory references stored in each loop, including subloops . */
218 vec
<bitmap_head
> all_refs_stored_in_loop
;
220 /* Cache for expanding memory addresses. */
221 hash_map
<tree
, name_expansion
*> *ttae_cache
;
224 /* Obstack for the bitmaps in the above data structures. */
225 static bitmap_obstack lim_bitmap_obstack
;
226 static obstack mem_ref_obstack
;
228 static bool ref_indep_loop_p (struct loop
*, im_mem_ref
*);
229 static bool ref_always_accessed_p (struct loop
*, im_mem_ref
*, bool);
231 /* Minimum cost of an expensive expression. */
232 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
234 /* The outermost loop for which execution of the header guarantees that the
235 block will be executed. */
236 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
239 /* ID of the shared unanalyzable mem. */
240 #define UNANALYZABLE_MEM_ID 0
242 /* Whether the reference was analyzable. */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
245 static struct lim_aux_data
*
246 init_lim_data (gimple
*stmt
)
248 lim_aux_data
*p
= XCNEW (struct lim_aux_data
);
249 lim_aux_data_map
->put (stmt
, p
);
254 static struct lim_aux_data
*
255 get_lim_data (gimple
*stmt
)
257 lim_aux_data
**p
= lim_aux_data_map
->get (stmt
);
264 /* Releases the memory occupied by DATA. */
267 free_lim_aux_data (struct lim_aux_data
*data
)
269 data
->depends
.release ();
274 clear_lim_data (gimple
*stmt
)
276 lim_aux_data
**p
= lim_aux_data_map
->get (stmt
);
280 free_lim_aux_data (*p
);
285 /* The possibilities of statement movement. */
288 MOVE_IMPOSSIBLE
, /* No movement -- side effect expression. */
289 MOVE_PRESERVE_EXECUTION
, /* Must not cause the non-executed statement
290 become executed -- memory accesses, ... */
291 MOVE_POSSIBLE
/* Unlimited movement. */
295 /* If it is possible to hoist the statement STMT unconditionally,
296 returns MOVE_POSSIBLE.
297 If it is possible to hoist the statement STMT, but we must avoid making
298 it executed if it would not be executed in the original program (e.g.
299 because it may trap), return MOVE_PRESERVE_EXECUTION.
300 Otherwise return MOVE_IMPOSSIBLE. */
303 movement_possibility (gimple
*stmt
)
306 enum move_pos ret
= MOVE_POSSIBLE
;
308 if (flag_unswitch_loops
309 && gimple_code (stmt
) == GIMPLE_COND
)
311 /* If we perform unswitching, force the operands of the invariant
312 condition to be moved out of the loop. */
313 return MOVE_POSSIBLE
;
316 if (gimple_code (stmt
) == GIMPLE_PHI
317 && gimple_phi_num_args (stmt
) <= 2
318 && !virtual_operand_p (gimple_phi_result (stmt
))
319 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt
)))
320 return MOVE_POSSIBLE
;
322 if (gimple_get_lhs (stmt
) == NULL_TREE
)
323 return MOVE_IMPOSSIBLE
;
325 if (gimple_vdef (stmt
))
326 return MOVE_IMPOSSIBLE
;
328 if (stmt_ends_bb_p (stmt
)
329 || gimple_has_volatile_ops (stmt
)
330 || gimple_has_side_effects (stmt
)
331 || stmt_could_throw_p (cfun
, stmt
))
332 return MOVE_IMPOSSIBLE
;
334 if (is_gimple_call (stmt
))
336 /* While pure or const call is guaranteed to have no side effects, we
337 cannot move it arbitrarily. Consider code like
339 char *s = something ();
349 Here the strlen call cannot be moved out of the loop, even though
350 s is invariant. In addition to possibly creating a call with
351 invalid arguments, moving out a function call that is not executed
352 may cause performance regressions in case the call is costly and
353 not executed at all. */
354 ret
= MOVE_PRESERVE_EXECUTION
;
355 lhs
= gimple_call_lhs (stmt
);
357 else if (is_gimple_assign (stmt
))
358 lhs
= gimple_assign_lhs (stmt
);
360 return MOVE_IMPOSSIBLE
;
362 if (TREE_CODE (lhs
) == SSA_NAME
363 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
364 return MOVE_IMPOSSIBLE
;
366 if (TREE_CODE (lhs
) != SSA_NAME
367 || gimple_could_trap_p (stmt
))
368 return MOVE_PRESERVE_EXECUTION
;
370 /* Non local loads in a transaction cannot be hoisted out. Well,
371 unless the load happens on every path out of the loop, but we
372 don't take this into account yet. */
374 && gimple_in_transaction (stmt
)
375 && gimple_assign_single_p (stmt
))
377 tree rhs
= gimple_assign_rhs1 (stmt
);
378 if (DECL_P (rhs
) && is_global_var (rhs
))
382 fprintf (dump_file
, "Cannot hoist conditional load of ");
383 print_generic_expr (dump_file
, rhs
, TDF_SLIM
);
384 fprintf (dump_file
, " because it is in a transaction.\n");
386 return MOVE_IMPOSSIBLE
;
393 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
394 loop to that we could move the expression using DEF if it did not have
395 other operands, i.e. the outermost loop enclosing LOOP in that the value
396 of DEF is invariant. */
399 outermost_invariant_loop (tree def
, struct loop
*loop
)
403 struct loop
*max_loop
;
404 struct lim_aux_data
*lim_data
;
407 return superloop_at_depth (loop
, 1);
409 if (TREE_CODE (def
) != SSA_NAME
)
411 gcc_assert (is_gimple_min_invariant (def
));
412 return superloop_at_depth (loop
, 1);
415 def_stmt
= SSA_NAME_DEF_STMT (def
);
416 def_bb
= gimple_bb (def_stmt
);
418 return superloop_at_depth (loop
, 1);
420 max_loop
= find_common_loop (loop
, def_bb
->loop_father
);
422 lim_data
= get_lim_data (def_stmt
);
423 if (lim_data
!= NULL
&& lim_data
->max_loop
!= NULL
)
424 max_loop
= find_common_loop (max_loop
,
425 loop_outer (lim_data
->max_loop
));
426 if (max_loop
== loop
)
428 max_loop
= superloop_at_depth (loop
, loop_depth (max_loop
) + 1);
433 /* DATA is a structure containing information associated with a statement
434 inside LOOP. DEF is one of the operands of this statement.
436 Find the outermost loop enclosing LOOP in that value of DEF is invariant
437 and record this in DATA->max_loop field. If DEF itself is defined inside
438 this loop as well (i.e. we need to hoist it out of the loop if we want
439 to hoist the statement represented by DATA), record the statement in that
440 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
441 add the cost of the computation of DEF to the DATA->cost.
443 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
446 add_dependency (tree def
, struct lim_aux_data
*data
, struct loop
*loop
,
449 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
450 basic_block def_bb
= gimple_bb (def_stmt
);
451 struct loop
*max_loop
;
452 struct lim_aux_data
*def_data
;
457 max_loop
= outermost_invariant_loop (def
, loop
);
461 if (flow_loop_nested_p (data
->max_loop
, max_loop
))
462 data
->max_loop
= max_loop
;
464 def_data
= get_lim_data (def_stmt
);
469 /* Only add the cost if the statement defining DEF is inside LOOP,
470 i.e. if it is likely that by moving the invariants dependent
471 on it, we will be able to avoid creating a new register for
472 it (since it will be only used in these dependent invariants). */
473 && def_bb
->loop_father
== loop
)
474 data
->cost
+= def_data
->cost
;
476 data
->depends
.safe_push (def_stmt
);
481 /* Returns an estimate for a cost of statement STMT. The values here
482 are just ad-hoc constants, similar to costs for inlining. */
485 stmt_cost (gimple
*stmt
)
487 /* Always try to create possibilities for unswitching. */
488 if (gimple_code (stmt
) == GIMPLE_COND
489 || gimple_code (stmt
) == GIMPLE_PHI
)
490 return LIM_EXPENSIVE
;
492 /* We should be hoisting calls if possible. */
493 if (is_gimple_call (stmt
))
497 /* Unless the call is a builtin_constant_p; this always folds to a
498 constant, so moving it is useless. */
499 fndecl
= gimple_call_fndecl (stmt
);
500 if (fndecl
&& fndecl_built_in_p (fndecl
, BUILT_IN_CONSTANT_P
))
503 return LIM_EXPENSIVE
;
506 /* Hoisting memory references out should almost surely be a win. */
507 if (gimple_references_memory_p (stmt
))
508 return LIM_EXPENSIVE
;
510 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
513 switch (gimple_assign_rhs_code (stmt
))
516 case WIDEN_MULT_EXPR
:
517 case WIDEN_MULT_PLUS_EXPR
:
518 case WIDEN_MULT_MINUS_EXPR
:
530 /* Division and multiplication are usually expensive. */
531 return LIM_EXPENSIVE
;
535 case WIDEN_LSHIFT_EXPR
:
538 /* Shifts and rotates are usually expensive. */
539 return LIM_EXPENSIVE
;
542 /* Make vector construction cost proportional to the number
544 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt
));
548 /* Whether or not something is wrapped inside a PAREN_EXPR
549 should not change move cost. Nor should an intermediate
550 unpropagated SSA name copy. */
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559 REF is independent. If REF is not independent in LOOP, NULL is returned
563 outermost_indep_loop (struct loop
*outer
, struct loop
*loop
, im_mem_ref
*ref
)
567 if (ref
->stored
&& bitmap_bit_p (ref
->stored
, loop
->num
))
572 aloop
= superloop_at_depth (loop
, loop_depth (aloop
) + 1))
573 if ((!ref
->stored
|| !bitmap_bit_p (ref
->stored
, aloop
->num
))
574 && ref_indep_loop_p (aloop
, ref
))
577 if (ref_indep_loop_p (loop
, ref
))
583 /* If there is a simple load or store to a memory reference in STMT, returns
584 the location of the memory reference, and sets IS_STORE according to whether
585 it is a store or load. Otherwise, returns NULL. */
588 simple_mem_ref_in_stmt (gimple
*stmt
, bool *is_store
)
592 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
593 if (!gimple_assign_single_p (stmt
))
596 lhs
= gimple_assign_lhs_ptr (stmt
);
597 rhs
= gimple_assign_rhs1_ptr (stmt
);
599 if (TREE_CODE (*lhs
) == SSA_NAME
&& gimple_vuse (stmt
))
604 else if (gimple_vdef (stmt
)
605 && (TREE_CODE (*rhs
) == SSA_NAME
|| is_gimple_min_invariant (*rhs
)))
614 /* From a controlling predicate in DOM determine the arguments from
615 the PHI node PHI that are chosen if the predicate evaluates to
616 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617 they are non-NULL. Returns true if the arguments can be determined,
618 else return false. */
621 extract_true_false_args_from_phi (basic_block dom
, gphi
*phi
,
622 tree
*true_arg_p
, tree
*false_arg_p
)
625 if (! extract_true_false_controlled_edges (dom
, gimple_bb (phi
),
630 *true_arg_p
= PHI_ARG_DEF (phi
, te
->dest_idx
);
632 *false_arg_p
= PHI_ARG_DEF (phi
, fe
->dest_idx
);
637 /* Determine the outermost loop to that it is possible to hoist a statement
638 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
639 the outermost loop in that the value computed by STMT is invariant.
640 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641 we preserve the fact whether STMT is executed. It also fills other related
642 information to LIM_DATA (STMT).
644 The function returns false if STMT cannot be hoisted outside of the loop it
645 is defined in, and true otherwise. */
648 determine_max_movement (gimple
*stmt
, bool must_preserve_exec
)
650 basic_block bb
= gimple_bb (stmt
);
651 struct loop
*loop
= bb
->loop_father
;
653 struct lim_aux_data
*lim_data
= get_lim_data (stmt
);
657 if (must_preserve_exec
)
658 level
= ALWAYS_EXECUTED_IN (bb
);
660 level
= superloop_at_depth (loop
, 1);
661 lim_data
->max_loop
= level
;
663 if (gphi
*phi
= dyn_cast
<gphi
*> (stmt
))
666 unsigned min_cost
= UINT_MAX
;
667 unsigned total_cost
= 0;
668 struct lim_aux_data
*def_data
;
670 /* We will end up promoting dependencies to be unconditionally
671 evaluated. For this reason the PHI cost (and thus the
672 cost we remove from the loop by doing the invariant motion)
673 is that of the cheapest PHI argument dependency chain. */
674 FOR_EACH_PHI_ARG (use_p
, phi
, iter
, SSA_OP_USE
)
676 val
= USE_FROM_PTR (use_p
);
678 if (TREE_CODE (val
) != SSA_NAME
)
680 /* Assign const 1 to constants. */
681 min_cost
= MIN (min_cost
, 1);
685 if (!add_dependency (val
, lim_data
, loop
, false))
688 gimple
*def_stmt
= SSA_NAME_DEF_STMT (val
);
689 if (gimple_bb (def_stmt
)
690 && gimple_bb (def_stmt
)->loop_father
== loop
)
692 def_data
= get_lim_data (def_stmt
);
695 min_cost
= MIN (min_cost
, def_data
->cost
);
696 total_cost
+= def_data
->cost
;
701 min_cost
= MIN (min_cost
, total_cost
);
702 lim_data
->cost
+= min_cost
;
704 if (gimple_phi_num_args (phi
) > 1)
706 basic_block dom
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
708 if (gsi_end_p (gsi_last_bb (dom
)))
710 cond
= gsi_stmt (gsi_last_bb (dom
));
711 if (gimple_code (cond
) != GIMPLE_COND
)
713 /* Verify that this is an extended form of a diamond and
714 the PHI arguments are completely controlled by the
716 if (!extract_true_false_args_from_phi (dom
, phi
, NULL
, NULL
))
719 /* Fold in dependencies and cost of the condition. */
720 FOR_EACH_SSA_TREE_OPERAND (val
, cond
, iter
, SSA_OP_USE
)
722 if (!add_dependency (val
, lim_data
, loop
, false))
724 def_data
= get_lim_data (SSA_NAME_DEF_STMT (val
));
726 lim_data
->cost
+= def_data
->cost
;
729 /* We want to avoid unconditionally executing very expensive
730 operations. As costs for our dependencies cannot be
731 negative just claim we are not invariand for this case.
732 We also are not sure whether the control-flow inside the
734 if (total_cost
- min_cost
>= 2 * LIM_EXPENSIVE
736 && total_cost
/ min_cost
<= 2))
739 /* Assume that the control-flow in the loop will vanish.
740 ??? We should verify this and not artificially increase
741 the cost if that is not the case. */
742 lim_data
->cost
+= stmt_cost (stmt
);
748 FOR_EACH_SSA_TREE_OPERAND (val
, stmt
, iter
, SSA_OP_USE
)
749 if (!add_dependency (val
, lim_data
, loop
, true))
752 if (gimple_vuse (stmt
))
755 = lim_data
? memory_accesses
.refs_list
[lim_data
->ref
] : NULL
;
757 && MEM_ANALYZABLE (ref
))
759 lim_data
->max_loop
= outermost_indep_loop (lim_data
->max_loop
,
761 if (!lim_data
->max_loop
)
764 else if (! add_dependency (gimple_vuse (stmt
), lim_data
, loop
, false))
768 lim_data
->cost
+= stmt_cost (stmt
);
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774 and that one of the operands of this statement is computed by STMT.
775 Ensure that STMT (together with all the statements that define its
776 operands) is hoisted at least out of the loop LEVEL. */
779 set_level (gimple
*stmt
, struct loop
*orig_loop
, struct loop
*level
)
781 struct loop
*stmt_loop
= gimple_bb (stmt
)->loop_father
;
782 struct lim_aux_data
*lim_data
;
786 stmt_loop
= find_common_loop (orig_loop
, stmt_loop
);
787 lim_data
= get_lim_data (stmt
);
788 if (lim_data
!= NULL
&& lim_data
->tgt_loop
!= NULL
)
789 stmt_loop
= find_common_loop (stmt_loop
,
790 loop_outer (lim_data
->tgt_loop
));
791 if (flow_loop_nested_p (stmt_loop
, level
))
794 gcc_assert (level
== lim_data
->max_loop
795 || flow_loop_nested_p (lim_data
->max_loop
, level
));
797 lim_data
->tgt_loop
= level
;
798 FOR_EACH_VEC_ELT (lim_data
->depends
, i
, dep_stmt
)
799 set_level (dep_stmt
, orig_loop
, level
);
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803 For now we chose the outermost possible loop. TODO -- use profiling
804 information to set it more sanely. */
807 set_profitable_level (gimple
*stmt
)
809 set_level (stmt
, gimple_bb (stmt
)->loop_father
, get_lim_data (stmt
)->max_loop
);
812 /* Returns true if STMT is a call that has side effects. */
815 nonpure_call_p (gimple
*stmt
)
817 if (gimple_code (stmt
) != GIMPLE_CALL
)
820 return gimple_has_side_effects (stmt
);
823 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
826 rewrite_reciprocal (gimple_stmt_iterator
*bsi
)
828 gassign
*stmt
, *stmt1
, *stmt2
;
829 tree name
, lhs
, type
;
831 gimple_stmt_iterator gsi
;
833 stmt
= as_a
<gassign
*> (gsi_stmt (*bsi
));
834 lhs
= gimple_assign_lhs (stmt
);
835 type
= TREE_TYPE (lhs
);
837 real_one
= build_one_cst (type
);
839 name
= make_temp_ssa_name (type
, NULL
, "reciptmp");
840 stmt1
= gimple_build_assign (name
, RDIV_EXPR
, real_one
,
841 gimple_assign_rhs2 (stmt
));
842 stmt2
= gimple_build_assign (lhs
, MULT_EXPR
, name
,
843 gimple_assign_rhs1 (stmt
));
845 /* Replace division stmt with reciprocal and multiply stmts.
846 The multiply stmt is not invariant, so update iterator
847 and avoid rescanning. */
849 gsi_insert_before (bsi
, stmt1
, GSI_NEW_STMT
);
850 gsi_replace (&gsi
, stmt2
, true);
852 /* Continue processing with invariant reciprocal statement. */
856 /* Check if the pattern at *BSI is a bittest of the form
857 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
860 rewrite_bittest (gimple_stmt_iterator
*bsi
)
867 tree lhs
, name
, t
, a
, b
;
870 stmt
= as_a
<gassign
*> (gsi_stmt (*bsi
));
871 lhs
= gimple_assign_lhs (stmt
);
873 /* Verify that the single use of lhs is a comparison against zero. */
874 if (TREE_CODE (lhs
) != SSA_NAME
875 || !single_imm_use (lhs
, &use
, &use_stmt
))
877 cond_stmt
= dyn_cast
<gcond
*> (use_stmt
);
880 if (gimple_cond_lhs (cond_stmt
) != lhs
881 || (gimple_cond_code (cond_stmt
) != NE_EXPR
882 && gimple_cond_code (cond_stmt
) != EQ_EXPR
)
883 || !integer_zerop (gimple_cond_rhs (cond_stmt
)))
886 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
887 stmt1
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
888 if (gimple_code (stmt1
) != GIMPLE_ASSIGN
)
891 /* There is a conversion in between possibly inserted by fold. */
892 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1
)))
894 t
= gimple_assign_rhs1 (stmt1
);
895 if (TREE_CODE (t
) != SSA_NAME
896 || !has_single_use (t
))
898 stmt1
= SSA_NAME_DEF_STMT (t
);
899 if (gimple_code (stmt1
) != GIMPLE_ASSIGN
)
903 /* Verify that B is loop invariant but A is not. Verify that with
904 all the stmt walking we are still in the same loop. */
905 if (gimple_assign_rhs_code (stmt1
) != RSHIFT_EXPR
906 || loop_containing_stmt (stmt1
) != loop_containing_stmt (stmt
))
909 a
= gimple_assign_rhs1 (stmt1
);
910 b
= gimple_assign_rhs2 (stmt1
);
912 if (outermost_invariant_loop (b
, loop_containing_stmt (stmt1
)) != NULL
913 && outermost_invariant_loop (a
, loop_containing_stmt (stmt1
)) == NULL
)
915 gimple_stmt_iterator rsi
;
918 t
= fold_build2 (LSHIFT_EXPR
, TREE_TYPE (a
),
919 build_int_cst (TREE_TYPE (a
), 1), b
);
920 name
= make_temp_ssa_name (TREE_TYPE (a
), NULL
, "shifttmp");
921 stmt1
= gimple_build_assign (name
, t
);
924 t
= fold_build2 (BIT_AND_EXPR
, TREE_TYPE (a
), a
, name
);
925 name
= make_temp_ssa_name (TREE_TYPE (a
), NULL
, "shifttmp");
926 stmt2
= gimple_build_assign (name
, t
);
928 /* Replace the SSA_NAME we compare against zero. Adjust
929 the type of zero accordingly. */
931 gimple_cond_set_rhs (cond_stmt
,
932 build_int_cst_type (TREE_TYPE (name
),
935 /* Don't use gsi_replace here, none of the new assignments sets
936 the variable originally set in stmt. Move bsi to stmt1, and
937 then remove the original stmt, so that we get a chance to
938 retain debug info for it. */
940 gsi_insert_before (bsi
, stmt1
, GSI_NEW_STMT
);
941 gsi_insert_before (&rsi
, stmt2
, GSI_SAME_STMT
);
942 gimple
*to_release
= gsi_stmt (rsi
);
943 gsi_remove (&rsi
, true);
944 release_defs (to_release
);
952 /* For each statement determines the outermost loop in that it is invariant,
953 - statements on whose motion it depends and the cost of the computation.
954 - This information is stored to the LIM_DATA structure associated with
956 class invariantness_dom_walker
: public dom_walker
959 invariantness_dom_walker (cdi_direction direction
)
960 : dom_walker (direction
) {}
962 virtual edge
before_dom_children (basic_block
);
965 /* Determine the outermost loops in that statements in basic block BB are
966 invariant, and record them to the LIM_DATA associated with the statements.
967 Callback for dom_walker. */
970 invariantness_dom_walker::before_dom_children (basic_block bb
)
973 gimple_stmt_iterator bsi
;
975 bool maybe_never
= ALWAYS_EXECUTED_IN (bb
) == NULL
;
976 struct loop
*outermost
= ALWAYS_EXECUTED_IN (bb
);
977 struct lim_aux_data
*lim_data
;
979 if (!loop_outer (bb
->loop_father
))
982 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
983 fprintf (dump_file
, "Basic block %d (loop %d -- depth %d):\n\n",
984 bb
->index
, bb
->loop_father
->num
, loop_depth (bb
->loop_father
));
986 /* Look at PHI nodes, but only if there is at most two.
987 ??? We could relax this further by post-processing the inserted
988 code and transforming adjacent cond-exprs with the same predicate
989 to control flow again. */
990 bsi
= gsi_start_phis (bb
);
992 && ((gsi_next (&bsi
), gsi_end_p (bsi
))
993 || (gsi_next (&bsi
), gsi_end_p (bsi
))))
994 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
996 stmt
= gsi_stmt (bsi
);
998 pos
= movement_possibility (stmt
);
999 if (pos
== MOVE_IMPOSSIBLE
)
1002 lim_data
= get_lim_data (stmt
);
1004 lim_data
= init_lim_data (stmt
);
1005 lim_data
->always_executed_in
= outermost
;
1007 if (!determine_max_movement (stmt
, false))
1009 lim_data
->max_loop
= NULL
;
1013 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1015 print_gimple_stmt (dump_file
, stmt
, 2);
1016 fprintf (dump_file
, " invariant up to level %d, cost %d.\n\n",
1017 loop_depth (lim_data
->max_loop
),
1021 if (lim_data
->cost
>= LIM_EXPENSIVE
)
1022 set_profitable_level (stmt
);
1025 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1027 stmt
= gsi_stmt (bsi
);
1029 pos
= movement_possibility (stmt
);
1030 if (pos
== MOVE_IMPOSSIBLE
)
1032 if (nonpure_call_p (stmt
))
1037 /* Make sure to note always_executed_in for stores to make
1038 store-motion work. */
1039 else if (stmt_makes_single_store (stmt
))
1041 struct lim_aux_data
*lim_data
= get_lim_data (stmt
);
1043 lim_data
= init_lim_data (stmt
);
1044 lim_data
->always_executed_in
= outermost
;
1049 if (is_gimple_assign (stmt
)
1050 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
1051 == GIMPLE_BINARY_RHS
))
1053 tree op0
= gimple_assign_rhs1 (stmt
);
1054 tree op1
= gimple_assign_rhs2 (stmt
);
1055 struct loop
*ol1
= outermost_invariant_loop (op1
,
1056 loop_containing_stmt (stmt
));
1058 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 to be hoisted out of loop, saving expensive divide. */
1060 if (pos
== MOVE_POSSIBLE
1061 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
1062 && flag_unsafe_math_optimizations
1063 && !flag_trapping_math
1065 && outermost_invariant_loop (op0
, ol1
) == NULL
)
1066 stmt
= rewrite_reciprocal (&bsi
);
1068 /* If the shift count is invariant, convert (A >> B) & 1 to
1069 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 saving an expensive shift. */
1071 if (pos
== MOVE_POSSIBLE
1072 && gimple_assign_rhs_code (stmt
) == BIT_AND_EXPR
1073 && integer_onep (op1
)
1074 && TREE_CODE (op0
) == SSA_NAME
1075 && has_single_use (op0
))
1076 stmt
= rewrite_bittest (&bsi
);
1079 lim_data
= get_lim_data (stmt
);
1081 lim_data
= init_lim_data (stmt
);
1082 lim_data
->always_executed_in
= outermost
;
1084 if (maybe_never
&& pos
== MOVE_PRESERVE_EXECUTION
)
1087 if (!determine_max_movement (stmt
, pos
== MOVE_PRESERVE_EXECUTION
))
1089 lim_data
->max_loop
= NULL
;
1093 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1095 print_gimple_stmt (dump_file
, stmt
, 2);
1096 fprintf (dump_file
, " invariant up to level %d, cost %d.\n\n",
1097 loop_depth (lim_data
->max_loop
),
1101 if (lim_data
->cost
>= LIM_EXPENSIVE
)
1102 set_profitable_level (stmt
);
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108 data stored in LIM_DATA structures associated with each statement. Callback
1109 for walk_dominator_tree. */
1112 move_computations_worker (basic_block bb
)
1116 struct lim_aux_data
*lim_data
;
1117 unsigned int todo
= 0;
1119 if (!loop_outer (bb
->loop_father
))
1122 for (gphi_iterator bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); )
1125 gphi
*stmt
= bsi
.phi ();
1127 lim_data
= get_lim_data (stmt
);
1128 if (lim_data
== NULL
)
1134 cost
= lim_data
->cost
;
1135 level
= lim_data
->tgt_loop
;
1136 clear_lim_data (stmt
);
1144 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1146 fprintf (dump_file
, "Moving PHI node\n");
1147 print_gimple_stmt (dump_file
, stmt
, 0);
1148 fprintf (dump_file
, "(cost %u) out of loop %d.\n\n",
1152 if (gimple_phi_num_args (stmt
) == 1)
1154 tree arg
= PHI_ARG_DEF (stmt
, 0);
1155 new_stmt
= gimple_build_assign (gimple_phi_result (stmt
),
1156 TREE_CODE (arg
), arg
);
1160 basic_block dom
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1161 gimple
*cond
= gsi_stmt (gsi_last_bb (dom
));
1162 tree arg0
= NULL_TREE
, arg1
= NULL_TREE
, t
;
1163 /* Get the PHI arguments corresponding to the true and false
1165 extract_true_false_args_from_phi (dom
, stmt
, &arg0
, &arg1
);
1166 gcc_assert (arg0
&& arg1
);
1167 t
= build2 (gimple_cond_code (cond
), boolean_type_node
,
1168 gimple_cond_lhs (cond
), gimple_cond_rhs (cond
));
1169 new_stmt
= gimple_build_assign (gimple_phi_result (stmt
),
1170 COND_EXPR
, t
, arg0
, arg1
);
1171 todo
|= TODO_cleanup_cfg
;
1173 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt
)))
1174 && (!ALWAYS_EXECUTED_IN (bb
)
1175 || (ALWAYS_EXECUTED_IN (bb
) != level
1176 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1178 tree lhs
= gimple_assign_lhs (new_stmt
);
1179 SSA_NAME_RANGE_INFO (lhs
) = NULL
;
1181 gsi_insert_on_edge (loop_preheader_edge (level
), new_stmt
);
1182 remove_phi_node (&bsi
, false);
1185 for (gimple_stmt_iterator bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); )
1189 gimple
*stmt
= gsi_stmt (bsi
);
1191 lim_data
= get_lim_data (stmt
);
1192 if (lim_data
== NULL
)
1198 cost
= lim_data
->cost
;
1199 level
= lim_data
->tgt_loop
;
1200 clear_lim_data (stmt
);
1208 /* We do not really want to move conditionals out of the loop; we just
1209 placed it here to force its operands to be moved if necessary. */
1210 if (gimple_code (stmt
) == GIMPLE_COND
)
1213 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1215 fprintf (dump_file
, "Moving statement\n");
1216 print_gimple_stmt (dump_file
, stmt
, 0);
1217 fprintf (dump_file
, "(cost %u) out of loop %d.\n\n",
1221 e
= loop_preheader_edge (level
);
1222 gcc_assert (!gimple_vdef (stmt
));
1223 if (gimple_vuse (stmt
))
1225 /* The new VUSE is the one from the virtual PHI in the loop
1226 header or the one already present. */
1228 for (gsi2
= gsi_start_phis (e
->dest
);
1229 !gsi_end_p (gsi2
); gsi_next (&gsi2
))
1231 gphi
*phi
= gsi2
.phi ();
1232 if (virtual_operand_p (gimple_phi_result (phi
)))
1234 gimple_set_vuse (stmt
, PHI_ARG_DEF_FROM_EDGE (phi
, e
));
1239 gsi_remove (&bsi
, false);
1240 if (gimple_has_lhs (stmt
)
1241 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
1242 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt
)))
1243 && (!ALWAYS_EXECUTED_IN (bb
)
1244 || !(ALWAYS_EXECUTED_IN (bb
) == level
1245 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1247 tree lhs
= gimple_get_lhs (stmt
);
1248 SSA_NAME_RANGE_INFO (lhs
) = NULL
;
1250 /* In case this is a stmt that is not unconditionally executed
1251 when the target loop header is executed and the stmt may
1252 invoke undefined integer or pointer overflow rewrite it to
1253 unsigned arithmetic. */
1254 if (is_gimple_assign (stmt
)
1255 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt
)))
1256 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt
)))
1257 && arith_code_with_undefined_signed_overflow
1258 (gimple_assign_rhs_code (stmt
))
1259 && (!ALWAYS_EXECUTED_IN (bb
)
1260 || !(ALWAYS_EXECUTED_IN (bb
) == level
1261 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1262 gsi_insert_seq_on_edge (e
, rewrite_to_defined_overflow (stmt
));
1264 gsi_insert_on_edge (e
, stmt
);
1270 /* Hoist the statements out of the loops prescribed by data stored in
1271 LIM_DATA structures associated with each statement.*/
1274 move_computations (void)
1276 int *rpo
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
1277 int n
= pre_and_rev_post_order_compute_fn (cfun
, NULL
, rpo
, false);
1280 for (int i
= 0; i
< n
; ++i
)
1281 todo
|= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun
, rpo
[i
]));
1285 gsi_commit_edge_inserts ();
1286 if (need_ssa_update_p (cfun
))
1287 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1292 /* Checks whether the statement defining variable *INDEX can be hoisted
1293 out of the loop passed in DATA. Callback for for_each_index. */
1296 may_move_till (tree ref
, tree
*index
, void *data
)
1298 struct loop
*loop
= (struct loop
*) data
, *max_loop
;
1300 /* If REF is an array reference, check also that the step and the lower
1301 bound is invariant in LOOP. */
1302 if (TREE_CODE (ref
) == ARRAY_REF
)
1304 tree step
= TREE_OPERAND (ref
, 3);
1305 tree lbound
= TREE_OPERAND (ref
, 2);
1307 max_loop
= outermost_invariant_loop (step
, loop
);
1311 max_loop
= outermost_invariant_loop (lbound
, loop
);
1316 max_loop
= outermost_invariant_loop (*index
, loop
);
1323 /* If OP is SSA NAME, force the statement that defines it to be
1324 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1327 force_move_till_op (tree op
, struct loop
*orig_loop
, struct loop
*loop
)
1332 || is_gimple_min_invariant (op
))
1335 gcc_assert (TREE_CODE (op
) == SSA_NAME
);
1337 stmt
= SSA_NAME_DEF_STMT (op
);
1338 if (gimple_nop_p (stmt
))
1341 set_level (stmt
, orig_loop
, loop
);
1344 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1345 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1351 struct loop
*orig_loop
;
1355 force_move_till (tree ref
, tree
*index
, void *data
)
1357 struct fmt_data
*fmt_data
= (struct fmt_data
*) data
;
1359 if (TREE_CODE (ref
) == ARRAY_REF
)
1361 tree step
= TREE_OPERAND (ref
, 3);
1362 tree lbound
= TREE_OPERAND (ref
, 2);
1364 force_move_till_op (step
, fmt_data
->orig_loop
, fmt_data
->loop
);
1365 force_move_till_op (lbound
, fmt_data
->orig_loop
, fmt_data
->loop
);
1368 force_move_till_op (*index
, fmt_data
->orig_loop
, fmt_data
->loop
);
1373 /* A function to free the mem_ref object OBJ. */
1376 memref_free (struct im_mem_ref
*mem
)
1378 mem
->accesses_in_loop
.release ();
1381 /* Allocates and returns a memory reference description for MEM whose hash
1382 value is HASH and id is ID. */
1385 mem_ref_alloc (ao_ref
*mem
, unsigned hash
, unsigned id
)
1387 im_mem_ref
*ref
= XOBNEW (&mem_ref_obstack
, struct im_mem_ref
);
1391 ao_ref_init (&ref
->mem
, error_mark_node
);
1393 ref
->ref_canonical
= false;
1394 ref
->ref_decomposed
= false;
1397 bitmap_initialize (&ref
->indep_loop
, &lim_bitmap_obstack
);
1398 bitmap_initialize (&ref
->dep_loop
, &lim_bitmap_obstack
);
1399 ref
->accesses_in_loop
.create (1);
1404 /* Records memory reference location *LOC in LOOP to the memory reference
1405 description REF. The reference occurs in statement STMT. */
1408 record_mem_ref_loc (im_mem_ref
*ref
, gimple
*stmt
, tree
*loc
)
1413 ref
->accesses_in_loop
.safe_push (aref
);
1416 /* Set the LOOP bit in REF stored bitmap and allocate that if
1417 necessary. Return whether a bit was changed. */
1420 set_ref_stored_in_loop (im_mem_ref
*ref
, struct loop
*loop
)
1423 ref
->stored
= BITMAP_ALLOC (&lim_bitmap_obstack
);
1424 return bitmap_set_bit (ref
->stored
, loop
->num
);
1427 /* Marks reference REF as stored in LOOP. */
1430 mark_ref_stored (im_mem_ref
*ref
, struct loop
*loop
)
1432 while (loop
!= current_loops
->tree_root
1433 && set_ref_stored_in_loop (ref
, loop
))
1434 loop
= loop_outer (loop
);
1437 /* Gathers memory references in statement STMT in LOOP, storing the
1438 information about them in the memory_accesses structure. Marks
1439 the vops accessed through unrecognized statements there as
1443 gather_mem_refs_stmt (struct loop
*loop
, gimple
*stmt
)
1452 if (!gimple_vuse (stmt
))
1455 mem
= simple_mem_ref_in_stmt (stmt
, &is_stored
);
1458 /* We use the shared mem_ref for all unanalyzable refs. */
1459 id
= UNANALYZABLE_MEM_ID
;
1460 ref
= memory_accesses
.refs_list
[id
];
1461 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1463 fprintf (dump_file
, "Unanalyzed memory reference %u: ", id
);
1464 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1466 is_stored
= gimple_vdef (stmt
);
1470 /* We are looking for equal refs that might differ in structure
1471 such as a.b vs. MEM[&a + 4]. So we key off the ao_ref but
1472 make sure we can canonicalize the ref in the hashtable if
1473 non-operand_equal_p refs are found. For the lookup we mark
1474 the case we want strict equality with aor.max_size == -1. */
1476 ao_ref_init (&aor
, *mem
);
1478 ao_ref_alias_set (&aor
);
1479 HOST_WIDE_INT offset
, size
, max_size
;
1480 poly_int64 saved_maxsize
= aor
.max_size
, mem_off
;
1482 bool ref_decomposed
;
1483 if (aor
.max_size_known_p ()
1484 && aor
.offset
.is_constant (&offset
)
1485 && aor
.size
.is_constant (&size
)
1486 && aor
.max_size
.is_constant (&max_size
)
1488 && (size
% BITS_PER_UNIT
) == 0
1489 /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1490 size. Make sure this is consistent with the extraction. */
1491 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem
)))
1492 && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem
))),
1494 && (mem_base
= get_addr_base_and_unit_offset (aor
.ref
, &mem_off
)))
1496 ref_decomposed
= true;
1497 hash
= iterative_hash_expr (ao_ref_base (&aor
), 0);
1498 hash
= iterative_hash_host_wide_int (offset
, hash
);
1499 hash
= iterative_hash_host_wide_int (size
, hash
);
1503 ref_decomposed
= false;
1504 hash
= iterative_hash_expr (aor
.ref
, 0);
1507 slot
= memory_accesses
.refs
->find_slot_with_hash (&aor
, hash
, INSERT
);
1508 aor
.max_size
= saved_maxsize
;
1511 if (!(*slot
)->ref_canonical
1512 && !operand_equal_p (*mem
, (*slot
)->mem
.ref
, 0))
1514 /* If we didn't yet canonicalize the hashtable ref (which
1515 we'll end up using for code insertion) and hit a second
1516 equal ref that is not structurally equivalent create
1517 a canonical ref which is a bare MEM_REF. */
1518 if (TREE_CODE (*mem
) == MEM_REF
1519 || TREE_CODE (*mem
) == TARGET_MEM_REF
)
1521 (*slot
)->mem
.ref
= *mem
;
1522 (*slot
)->mem
.base_alias_set
= ao_ref_base_alias_set (&aor
);
1526 tree ref_alias_type
= reference_alias_ptr_type (*mem
);
1527 unsigned int ref_align
= get_object_alignment (*mem
);
1528 tree ref_type
= TREE_TYPE (*mem
);
1529 tree tmp
= build_fold_addr_expr (unshare_expr (mem_base
));
1530 if (TYPE_ALIGN (ref_type
) != ref_align
)
1531 ref_type
= build_aligned_type (ref_type
, ref_align
);
1533 = fold_build2 (MEM_REF
, ref_type
, tmp
,
1534 build_int_cst (ref_alias_type
, mem_off
));
1535 if ((*slot
)->mem
.volatile_p
)
1536 TREE_THIS_VOLATILE ((*slot
)->mem
.ref
) = 1;
1537 gcc_checking_assert (TREE_CODE ((*slot
)->mem
.ref
) == MEM_REF
1538 && is_gimple_mem_ref_addr
1539 (TREE_OPERAND ((*slot
)->mem
.ref
,
1541 (*slot
)->mem
.base_alias_set
= (*slot
)->mem
.ref_alias_set
;
1543 (*slot
)->ref_canonical
= true;
1550 id
= memory_accesses
.refs_list
.length ();
1551 ref
= mem_ref_alloc (&aor
, hash
, id
);
1552 ref
->ref_decomposed
= ref_decomposed
;
1553 memory_accesses
.refs_list
.safe_push (ref
);
1556 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1558 fprintf (dump_file
, "Memory reference %u: ", id
);
1559 print_generic_expr (dump_file
, ref
->mem
.ref
, TDF_SLIM
);
1560 fprintf (dump_file
, "\n");
1564 record_mem_ref_loc (ref
, stmt
, mem
);
1566 bitmap_set_bit (&memory_accesses
.refs_in_loop
[loop
->num
], ref
->id
);
1569 bitmap_set_bit (&memory_accesses
.refs_stored_in_loop
[loop
->num
], ref
->id
);
1570 mark_ref_stored (ref
, loop
);
1572 init_lim_data (stmt
)->ref
= ref
->id
;
1576 static unsigned *bb_loop_postorder
;
1578 /* qsort sort function to sort blocks after their loop fathers postorder. */
1581 sort_bbs_in_loop_postorder_cmp (const void *bb1_
, const void *bb2_
)
1583 basic_block bb1
= *(basic_block
*)const_cast<void *>(bb1_
);
1584 basic_block bb2
= *(basic_block
*)const_cast<void *>(bb2_
);
1585 struct loop
*loop1
= bb1
->loop_father
;
1586 struct loop
*loop2
= bb2
->loop_father
;
1587 if (loop1
->num
== loop2
->num
)
1588 return bb1
->index
- bb2
->index
;
1589 return bb_loop_postorder
[loop1
->num
] < bb_loop_postorder
[loop2
->num
] ? -1 : 1;
1592 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1595 sort_locs_in_loop_postorder_cmp (const void *loc1_
, const void *loc2_
)
1597 mem_ref_loc
*loc1
= (mem_ref_loc
*)const_cast<void *>(loc1_
);
1598 mem_ref_loc
*loc2
= (mem_ref_loc
*)const_cast<void *>(loc2_
);
1599 struct loop
*loop1
= gimple_bb (loc1
->stmt
)->loop_father
;
1600 struct loop
*loop2
= gimple_bb (loc2
->stmt
)->loop_father
;
1601 if (loop1
->num
== loop2
->num
)
1603 return bb_loop_postorder
[loop1
->num
] < bb_loop_postorder
[loop2
->num
] ? -1 : 1;
1606 /* Gathers memory references in loops. */
1609 analyze_memory_references (void)
1611 gimple_stmt_iterator bsi
;
1612 basic_block bb
, *bbs
;
1613 struct loop
*loop
, *outer
;
1616 /* Collect all basic-blocks in loops and sort them after their
1619 bbs
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
);
1620 FOR_EACH_BB_FN (bb
, cfun
)
1621 if (bb
->loop_father
!= current_loops
->tree_root
)
1624 qsort (bbs
, n
, sizeof (basic_block
), sort_bbs_in_loop_postorder_cmp
);
1626 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1627 That results in better locality for all the bitmaps. */
1628 for (i
= 0; i
< n
; ++i
)
1630 basic_block bb
= bbs
[i
];
1631 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1632 gather_mem_refs_stmt (bb
->loop_father
, gsi_stmt (bsi
));
1635 /* Sort the location list of gathered memory references after their
1636 loop postorder number. */
1638 FOR_EACH_VEC_ELT (memory_accesses
.refs_list
, i
, ref
)
1639 ref
->accesses_in_loop
.qsort (sort_locs_in_loop_postorder_cmp
);
1642 // free (bb_loop_postorder);
1644 /* Propagate the information about accessed memory references up
1645 the loop hierarchy. */
1646 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
1648 /* Finalize the overall touched references (including subloops). */
1649 bitmap_ior_into (&memory_accesses
.all_refs_stored_in_loop
[loop
->num
],
1650 &memory_accesses
.refs_stored_in_loop
[loop
->num
]);
1652 /* Propagate the information about accessed memory references up
1653 the loop hierarchy. */
1654 outer
= loop_outer (loop
);
1655 if (outer
== current_loops
->tree_root
)
1658 bitmap_ior_into (&memory_accesses
.all_refs_stored_in_loop
[outer
->num
],
1659 &memory_accesses
.all_refs_stored_in_loop
[loop
->num
]);
1663 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1664 tree_to_aff_combination_expand. */
1667 mem_refs_may_alias_p (im_mem_ref
*mem1
, im_mem_ref
*mem2
,
1668 hash_map
<tree
, name_expansion
*> **ttae_cache
)
1670 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1671 object and their offset differ in such a way that the locations cannot
1672 overlap, then they cannot alias. */
1673 poly_widest_int size1
, size2
;
1674 aff_tree off1
, off2
;
1676 /* Perform basic offset and type-based disambiguation. */
1677 if (!refs_may_alias_p_1 (&mem1
->mem
, &mem2
->mem
, true))
1680 /* The expansion of addresses may be a bit expensive, thus we only do
1681 the check at -O2 and higher optimization levels. */
1685 get_inner_reference_aff (mem1
->mem
.ref
, &off1
, &size1
);
1686 get_inner_reference_aff (mem2
->mem
.ref
, &off2
, &size2
);
1687 aff_combination_expand (&off1
, ttae_cache
);
1688 aff_combination_expand (&off2
, ttae_cache
);
1689 aff_combination_scale (&off1
, -1);
1690 aff_combination_add (&off2
, &off1
);
1692 if (aff_comb_cannot_overlap_p (&off2
, size1
, size2
))
1698 /* Compare function for bsearch searching for reference locations
1702 find_ref_loc_in_loop_cmp (const void *loop_
, const void *loc_
)
1704 struct loop
*loop
= (struct loop
*)const_cast<void *>(loop_
);
1705 mem_ref_loc
*loc
= (mem_ref_loc
*)const_cast<void *>(loc_
);
1706 struct loop
*loc_loop
= gimple_bb (loc
->stmt
)->loop_father
;
1707 if (loop
->num
== loc_loop
->num
1708 || flow_loop_nested_p (loop
, loc_loop
))
1710 return (bb_loop_postorder
[loop
->num
] < bb_loop_postorder
[loc_loop
->num
]
1714 /* Iterates over all locations of REF in LOOP and its subloops calling
1715 fn.operator() with the location as argument. When that operator
1716 returns true the iteration is stopped and true is returned.
1717 Otherwise false is returned. */
1719 template <typename FN
>
1721 for_all_locs_in_loop (struct loop
*loop
, im_mem_ref
*ref
, FN fn
)
1726 /* Search for the cluster of locs in the accesses_in_loop vector
1727 which is sorted after postorder index of the loop father. */
1728 loc
= ref
->accesses_in_loop
.bsearch (loop
, find_ref_loc_in_loop_cmp
);
1732 /* We have found one location inside loop or its sub-loops. Iterate
1733 both forward and backward to cover the whole cluster. */
1734 i
= loc
- ref
->accesses_in_loop
.address ();
1738 mem_ref_loc
*l
= &ref
->accesses_in_loop
[i
];
1739 if (!flow_bb_inside_loop_p (loop
, gimple_bb (l
->stmt
)))
1744 for (i
= loc
- ref
->accesses_in_loop
.address ();
1745 i
< ref
->accesses_in_loop
.length (); ++i
)
1747 mem_ref_loc
*l
= &ref
->accesses_in_loop
[i
];
1748 if (!flow_bb_inside_loop_p (loop
, gimple_bb (l
->stmt
)))
1757 /* Rewrites location LOC by TMP_VAR. */
1759 struct rewrite_mem_ref_loc
1761 rewrite_mem_ref_loc (tree tmp_var_
) : tmp_var (tmp_var_
) {}
1762 bool operator () (mem_ref_loc
*loc
);
1767 rewrite_mem_ref_loc::operator () (mem_ref_loc
*loc
)
1769 *loc
->ref
= tmp_var
;
1770 update_stmt (loc
->stmt
);
1774 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1777 rewrite_mem_refs (struct loop
*loop
, im_mem_ref
*ref
, tree tmp_var
)
1779 for_all_locs_in_loop (loop
, ref
, rewrite_mem_ref_loc (tmp_var
));
1782 /* Stores the first reference location in LOCP. */
1784 struct first_mem_ref_loc_1
1786 first_mem_ref_loc_1 (mem_ref_loc
**locp_
) : locp (locp_
) {}
1787 bool operator () (mem_ref_loc
*loc
);
1792 first_mem_ref_loc_1::operator () (mem_ref_loc
*loc
)
1798 /* Returns the first reference location to REF in LOOP. */
1800 static mem_ref_loc
*
1801 first_mem_ref_loc (struct loop
*loop
, im_mem_ref
*ref
)
1803 mem_ref_loc
*locp
= NULL
;
1804 for_all_locs_in_loop (loop
, ref
, first_mem_ref_loc_1 (&locp
));
1808 struct prev_flag_edges
{
1809 /* Edge to insert new flag comparison code. */
1810 edge append_cond_position
;
1812 /* Edge for fall through from previous flag comparison. */
1813 edge last_cond_fallthru
;
1816 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1819 The store is only done if MEM has changed. We do this so no
1820 changes to MEM occur on code paths that did not originally store
1823 The common case for execute_sm will transform:
1843 This function will generate:
1862 execute_sm_if_changed (edge ex
, tree mem
, tree tmp_var
, tree flag
,
1863 edge preheader
, hash_set
<basic_block
> *flag_bbs
)
1865 basic_block new_bb
, then_bb
, old_dest
;
1866 bool loop_has_only_one_exit
;
1867 edge then_old_edge
, orig_ex
= ex
;
1868 gimple_stmt_iterator gsi
;
1870 struct prev_flag_edges
*prev_edges
= (struct prev_flag_edges
*) ex
->aux
;
1871 bool irr
= ex
->flags
& EDGE_IRREDUCIBLE_LOOP
;
1873 profile_count count_sum
= profile_count::zero ();
1874 int nbbs
= 0, ncount
= 0;
1875 profile_probability flag_probability
= profile_probability::uninitialized ();
1877 /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1880 This code may look fancy, but it cannot update profile very realistically
1881 because we do not know the probability that flag will be true at given
1884 We look for two interesting extremes
1885 - when exit is dominated by block setting the flag, we know it will
1886 always be true. This is a common case.
1887 - when all blocks setting the flag have very low frequency we know
1888 it will likely be false.
1889 In all other cases we default to 2/3 for flag being true. */
1891 for (hash_set
<basic_block
>::iterator it
= flag_bbs
->begin ();
1892 it
!= flag_bbs
->end (); ++it
)
1894 if ((*it
)->count
.initialized_p ())
1895 count_sum
+= (*it
)->count
, ncount
++;
1896 if (dominated_by_p (CDI_DOMINATORS
, ex
->src
, *it
))
1897 flag_probability
= profile_probability::always ();
1901 profile_probability cap
= profile_probability::always ().apply_scale (2, 3);
1903 if (flag_probability
.initialized_p ())
1905 else if (ncount
== nbbs
1906 && preheader
->count () >= count_sum
&& preheader
->count ().nonzero_p ())
1908 flag_probability
= count_sum
.probability_in (preheader
->count ());
1909 if (flag_probability
> cap
)
1910 flag_probability
= cap
;
1913 if (!flag_probability
.initialized_p ())
1914 flag_probability
= cap
;
1916 /* ?? Insert store after previous store if applicable. See note
1919 ex
= prev_edges
->append_cond_position
;
1921 loop_has_only_one_exit
= single_pred_p (ex
->dest
);
1923 if (loop_has_only_one_exit
)
1924 ex
= split_block_after_labels (ex
->dest
);
1927 for (gphi_iterator gpi
= gsi_start_phis (ex
->dest
);
1928 !gsi_end_p (gpi
); gsi_next (&gpi
))
1930 gphi
*phi
= gpi
.phi ();
1931 if (virtual_operand_p (gimple_phi_result (phi
)))
1934 /* When the destination has a non-virtual PHI node with multiple
1935 predecessors make sure we preserve the PHI structure by
1936 forcing a forwarder block so that hoisting of that PHI will
1943 old_dest
= ex
->dest
;
1944 new_bb
= split_edge (ex
);
1945 then_bb
= create_empty_bb (new_bb
);
1946 then_bb
->count
= new_bb
->count
.apply_probability (flag_probability
);
1948 then_bb
->flags
= BB_IRREDUCIBLE_LOOP
;
1949 add_bb_to_loop (then_bb
, new_bb
->loop_father
);
1951 gsi
= gsi_start_bb (new_bb
);
1952 stmt
= gimple_build_cond (NE_EXPR
, flag
, boolean_false_node
,
1953 NULL_TREE
, NULL_TREE
);
1954 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1956 gsi
= gsi_start_bb (then_bb
);
1957 /* Insert actual store. */
1958 stmt
= gimple_build_assign (unshare_expr (mem
), tmp_var
);
1959 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1961 edge e1
= single_succ_edge (new_bb
);
1962 edge e2
= make_edge (new_bb
, then_bb
,
1963 EDGE_TRUE_VALUE
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0));
1964 e2
->probability
= flag_probability
;
1966 e1
->flags
|= EDGE_FALSE_VALUE
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0);
1967 e1
->flags
&= ~EDGE_FALLTHRU
;
1969 e1
->probability
= flag_probability
.invert ();
1971 then_old_edge
= make_single_succ_edge (then_bb
, old_dest
,
1972 EDGE_FALLTHRU
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0));
1974 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, new_bb
);
1978 basic_block prevbb
= prev_edges
->last_cond_fallthru
->src
;
1979 redirect_edge_succ (prev_edges
->last_cond_fallthru
, new_bb
);
1980 set_immediate_dominator (CDI_DOMINATORS
, new_bb
, prevbb
);
1981 set_immediate_dominator (CDI_DOMINATORS
, old_dest
,
1982 recompute_dominator (CDI_DOMINATORS
, old_dest
));
1985 /* ?? Because stores may alias, they must happen in the exact
1986 sequence they originally happened. Save the position right after
1987 the (_lsm) store we just created so we can continue appending after
1988 it and maintain the original order. */
1990 struct prev_flag_edges
*p
;
1993 orig_ex
->aux
= NULL
;
1994 alloc_aux_for_edge (orig_ex
, sizeof (struct prev_flag_edges
));
1995 p
= (struct prev_flag_edges
*) orig_ex
->aux
;
1996 p
->append_cond_position
= then_old_edge
;
1997 p
->last_cond_fallthru
= find_edge (new_bb
, old_dest
);
1998 orig_ex
->aux
= (void *) p
;
2001 if (!loop_has_only_one_exit
)
2002 for (gphi_iterator gpi
= gsi_start_phis (old_dest
);
2003 !gsi_end_p (gpi
); gsi_next (&gpi
))
2005 gphi
*phi
= gpi
.phi ();
2008 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2009 if (gimple_phi_arg_edge (phi
, i
)->src
== new_bb
)
2011 tree arg
= gimple_phi_arg_def (phi
, i
);
2012 add_phi_arg (phi
, arg
, then_old_edge
, UNKNOWN_LOCATION
);
2018 /* When REF is set on the location, set flag indicating the store. */
2020 struct sm_set_flag_if_changed
2022 sm_set_flag_if_changed (tree flag_
, hash_set
<basic_block
> *bbs_
)
2023 : flag (flag_
), bbs (bbs_
) {}
2024 bool operator () (mem_ref_loc
*loc
);
2026 hash_set
<basic_block
> *bbs
;
2030 sm_set_flag_if_changed::operator () (mem_ref_loc
*loc
)
2032 /* Only set the flag for writes. */
2033 if (is_gimple_assign (loc
->stmt
)
2034 && gimple_assign_lhs_ptr (loc
->stmt
) == loc
->ref
)
2036 gimple_stmt_iterator gsi
= gsi_for_stmt (loc
->stmt
);
2037 gimple
*stmt
= gimple_build_assign (flag
, boolean_true_node
);
2038 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2039 bbs
->add (gimple_bb (stmt
));
2044 /* Helper function for execute_sm. On every location where REF is
2045 set, set an appropriate flag indicating the store. */
2048 execute_sm_if_changed_flag_set (struct loop
*loop
, im_mem_ref
*ref
,
2049 hash_set
<basic_block
> *bbs
)
2052 char *str
= get_lsm_tmp_name (ref
->mem
.ref
, ~0, "_flag");
2053 flag
= create_tmp_reg (boolean_type_node
, str
);
2054 for_all_locs_in_loop (loop
, ref
, sm_set_flag_if_changed (flag
, bbs
));
2058 /* Executes store motion of memory reference REF from LOOP.
2059 Exits from the LOOP are stored in EXITS. The initialization of the
2060 temporary variable is put to the preheader of the loop, and assignments
2061 to the reference from the temporary variable are emitted to exits. */
2064 execute_sm (struct loop
*loop
, vec
<edge
> exits
, im_mem_ref
*ref
)
2066 tree tmp_var
, store_flag
= NULL_TREE
;
2069 struct fmt_data fmt_data
;
2071 struct lim_aux_data
*lim_data
;
2072 bool multi_threaded_model_p
= false;
2073 gimple_stmt_iterator gsi
;
2074 hash_set
<basic_block
> flag_bbs
;
2076 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2078 fprintf (dump_file
, "Executing store motion of ");
2079 print_generic_expr (dump_file
, ref
->mem
.ref
);
2080 fprintf (dump_file
, " from loop %d\n", loop
->num
);
2083 tmp_var
= create_tmp_reg (TREE_TYPE (ref
->mem
.ref
),
2084 get_lsm_tmp_name (ref
->mem
.ref
, ~0));
2086 fmt_data
.loop
= loop
;
2087 fmt_data
.orig_loop
= loop
;
2088 for_each_index (&ref
->mem
.ref
, force_move_till
, &fmt_data
);
2090 if (bb_in_transaction (loop_preheader_edge (loop
)->src
)
2091 || (! PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
)
2092 && ! ref_always_accessed_p (loop
, ref
, true)))
2093 multi_threaded_model_p
= true;
2095 if (multi_threaded_model_p
)
2096 store_flag
= execute_sm_if_changed_flag_set (loop
, ref
, &flag_bbs
);
2098 rewrite_mem_refs (loop
, ref
, tmp_var
);
2100 /* Emit the load code on a random exit edge or into the latch if
2101 the loop does not exit, so that we are sure it will be processed
2102 by move_computations after all dependencies. */
2103 gsi
= gsi_for_stmt (first_mem_ref_loc (loop
, ref
)->stmt
);
2105 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2106 load altogether, since the store is predicated by a flag. We
2107 could, do the load only if it was originally in the loop. */
2108 load
= gimple_build_assign (tmp_var
, unshare_expr (ref
->mem
.ref
));
2109 lim_data
= init_lim_data (load
);
2110 lim_data
->max_loop
= loop
;
2111 lim_data
->tgt_loop
= loop
;
2112 gsi_insert_before (&gsi
, load
, GSI_SAME_STMT
);
2114 if (multi_threaded_model_p
)
2116 load
= gimple_build_assign (store_flag
, boolean_false_node
);
2117 lim_data
= init_lim_data (load
);
2118 lim_data
->max_loop
= loop
;
2119 lim_data
->tgt_loop
= loop
;
2120 gsi_insert_before (&gsi
, load
, GSI_SAME_STMT
);
2123 /* Sink the store to every exit from the loop. */
2124 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2125 if (!multi_threaded_model_p
)
2128 store
= gimple_build_assign (unshare_expr (ref
->mem
.ref
), tmp_var
);
2129 gsi_insert_on_edge (ex
, store
);
2132 execute_sm_if_changed (ex
, ref
->mem
.ref
, tmp_var
, store_flag
,
2133 loop_preheader_edge (loop
), &flag_bbs
);
2136 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2137 edges of the LOOP. */
2140 hoist_memory_references (struct loop
*loop
, bitmap mem_refs
,
2147 EXECUTE_IF_SET_IN_BITMAP (mem_refs
, 0, i
, bi
)
2149 ref
= memory_accesses
.refs_list
[i
];
2150 execute_sm (loop
, exits
, ref
);
2154 struct ref_always_accessed
2156 ref_always_accessed (struct loop
*loop_
, bool stored_p_
)
2157 : loop (loop_
), stored_p (stored_p_
) {}
2158 bool operator () (mem_ref_loc
*loc
);
2164 ref_always_accessed::operator () (mem_ref_loc
*loc
)
2166 struct loop
*must_exec
;
2168 if (!get_lim_data (loc
->stmt
))
2171 /* If we require an always executed store make sure the statement
2172 stores to the reference. */
2175 tree lhs
= gimple_get_lhs (loc
->stmt
);
2177 || lhs
!= *loc
->ref
)
2181 must_exec
= get_lim_data (loc
->stmt
)->always_executed_in
;
2185 if (must_exec
== loop
2186 || flow_loop_nested_p (must_exec
, loop
))
2192 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2193 make sure REF is always stored to in LOOP. */
2196 ref_always_accessed_p (struct loop
*loop
, im_mem_ref
*ref
, bool stored_p
)
2198 return for_all_locs_in_loop (loop
, ref
,
2199 ref_always_accessed (loop
, stored_p
));
2202 /* Returns true if REF1 and REF2 are independent. */
2205 refs_independent_p (im_mem_ref
*ref1
, im_mem_ref
*ref2
)
2210 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2211 fprintf (dump_file
, "Querying dependency of refs %u and %u: ",
2212 ref1
->id
, ref2
->id
);
2214 if (mem_refs_may_alias_p (ref1
, ref2
, &memory_accesses
.ttae_cache
))
2216 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2217 fprintf (dump_file
, "dependent.\n");
2222 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2223 fprintf (dump_file
, "independent.\n");
2228 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2229 and its super-loops. */
2232 record_dep_loop (struct loop
*loop
, im_mem_ref
*ref
, bool stored_p
)
2234 /* We can propagate dependent-in-loop bits up the loop
2235 hierarchy to all outer loops. */
2236 while (loop
!= current_loops
->tree_root
2237 && bitmap_set_bit (&ref
->dep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2238 loop
= loop_outer (loop
);
2241 /* Returns true if REF is independent on all other memory
2242 references in LOOP. */
2245 ref_indep_loop_p_1 (struct loop
*loop
, im_mem_ref
*ref
, bool stored_p
)
2247 stored_p
|= (ref
->stored
&& bitmap_bit_p (ref
->stored
, loop
->num
));
2249 bool indep_p
= true;
2250 bitmap refs_to_check
;
2253 refs_to_check
= &memory_accesses
.refs_in_loop
[loop
->num
];
2255 refs_to_check
= &memory_accesses
.refs_stored_in_loop
[loop
->num
];
2257 if (bitmap_bit_p (refs_to_check
, UNANALYZABLE_MEM_ID
))
2261 if (bitmap_bit_p (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2263 if (bitmap_bit_p (&ref
->dep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2266 struct loop
*inner
= loop
->inner
;
2269 if (!ref_indep_loop_p_1 (inner
, ref
, stored_p
))
2274 inner
= inner
->next
;
2281 EXECUTE_IF_SET_IN_BITMAP (refs_to_check
, 0, i
, bi
)
2283 im_mem_ref
*aref
= memory_accesses
.refs_list
[i
];
2284 if (!refs_independent_p (ref
, aref
))
2293 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2294 fprintf (dump_file
, "Querying dependencies of ref %u in loop %d: %s\n",
2295 ref
->id
, loop
->num
, indep_p
? "independent" : "dependent");
2297 /* Record the computed result in the cache. */
2300 if (bitmap_set_bit (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
))
2303 /* If it's independend against all refs then it's independent
2304 against stores, too. */
2305 bitmap_set_bit (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, false));
2310 record_dep_loop (loop
, ref
, stored_p
);
2313 /* If it's dependent against stores it's dependent against
2315 record_dep_loop (loop
, ref
, true);
2322 /* Returns true if REF is independent on all other memory references in
2326 ref_indep_loop_p (struct loop
*loop
, im_mem_ref
*ref
)
2328 gcc_checking_assert (MEM_ANALYZABLE (ref
));
2330 return ref_indep_loop_p_1 (loop
, ref
, false);
2333 /* Returns true if we can perform store motion of REF from LOOP. */
2336 can_sm_ref_p (struct loop
*loop
, im_mem_ref
*ref
)
2340 /* Can't hoist unanalyzable refs. */
2341 if (!MEM_ANALYZABLE (ref
))
2344 /* It should be movable. */
2345 if (!is_gimple_reg_type (TREE_TYPE (ref
->mem
.ref
))
2346 || TREE_THIS_VOLATILE (ref
->mem
.ref
)
2347 || !for_each_index (&ref
->mem
.ref
, may_move_till
, loop
))
2350 /* If it can throw fail, we do not properly update EH info. */
2351 if (tree_could_throw_p (ref
->mem
.ref
))
2354 /* If it can trap, it must be always executed in LOOP.
2355 Readonly memory locations may trap when storing to them, but
2356 tree_could_trap_p is a predicate for rvalues, so check that
2358 base
= get_base_address (ref
->mem
.ref
);
2359 if ((tree_could_trap_p (ref
->mem
.ref
)
2360 || (DECL_P (base
) && TREE_READONLY (base
)))
2361 && !ref_always_accessed_p (loop
, ref
, true))
2364 /* And it must be independent on all other memory references
2366 if (!ref_indep_loop_p (loop
, ref
))
2372 /* Marks the references in LOOP for that store motion should be performed
2373 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2374 motion was performed in one of the outer loops. */
2377 find_refs_for_sm (struct loop
*loop
, bitmap sm_executed
, bitmap refs_to_sm
)
2379 bitmap refs
= &memory_accesses
.all_refs_stored_in_loop
[loop
->num
];
2384 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs
, sm_executed
, 0, i
, bi
)
2386 ref
= memory_accesses
.refs_list
[i
];
2387 if (can_sm_ref_p (loop
, ref
))
2388 bitmap_set_bit (refs_to_sm
, i
);
2392 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2393 for a store motion optimization (i.e. whether we can insert statement
2397 loop_suitable_for_sm (struct loop
*loop ATTRIBUTE_UNUSED
,
2403 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2404 if (ex
->flags
& (EDGE_ABNORMAL
| EDGE_EH
))
2410 /* Try to perform store motion for all memory references modified inside
2411 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2412 store motion was executed in one of the outer loops. */
2415 store_motion_loop (struct loop
*loop
, bitmap sm_executed
)
2417 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2418 struct loop
*subloop
;
2419 bitmap sm_in_loop
= BITMAP_ALLOC (&lim_bitmap_obstack
);
2421 if (loop_suitable_for_sm (loop
, exits
))
2423 find_refs_for_sm (loop
, sm_executed
, sm_in_loop
);
2424 hoist_memory_references (loop
, sm_in_loop
, exits
);
2428 bitmap_ior_into (sm_executed
, sm_in_loop
);
2429 for (subloop
= loop
->inner
; subloop
!= NULL
; subloop
= subloop
->next
)
2430 store_motion_loop (subloop
, sm_executed
);
2431 bitmap_and_compl_into (sm_executed
, sm_in_loop
);
2432 BITMAP_FREE (sm_in_loop
);
2435 /* Try to perform store motion for all memory references modified inside
2442 bitmap sm_executed
= BITMAP_ALLOC (&lim_bitmap_obstack
);
2444 for (loop
= current_loops
->tree_root
->inner
; loop
!= NULL
; loop
= loop
->next
)
2445 store_motion_loop (loop
, sm_executed
);
2447 BITMAP_FREE (sm_executed
);
2448 gsi_commit_edge_inserts ();
2451 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2452 for each such basic block bb records the outermost loop for that execution
2453 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2454 blocks that contain a nonpure call. */
2457 fill_always_executed_in_1 (struct loop
*loop
, sbitmap contains_call
)
2459 basic_block bb
= NULL
, *bbs
, last
= NULL
;
2462 struct loop
*inn_loop
= loop
;
2464 if (ALWAYS_EXECUTED_IN (loop
->header
) == NULL
)
2466 bbs
= get_loop_body_in_dom_order (loop
);
2468 for (i
= 0; i
< loop
->num_nodes
; i
++)
2473 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
2476 if (bitmap_bit_p (contains_call
, bb
->index
))
2479 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2481 /* If there is an exit from this BB. */
2482 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
2484 /* Or we enter a possibly non-finite loop. */
2485 if (flow_loop_nested_p (bb
->loop_father
,
2486 e
->dest
->loop_father
)
2487 && ! finite_loop_p (e
->dest
->loop_father
))
2493 /* A loop might be infinite (TODO use simple loop analysis
2494 to disprove this if possible). */
2495 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
2498 if (!flow_bb_inside_loop_p (inn_loop
, bb
))
2501 if (bb
->loop_father
->header
== bb
)
2503 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
2506 /* In a loop that is always entered we may proceed anyway.
2507 But record that we entered it and stop once we leave it. */
2508 inn_loop
= bb
->loop_father
;
2514 SET_ALWAYS_EXECUTED_IN (last
, loop
);
2515 if (last
== loop
->header
)
2517 last
= get_immediate_dominator (CDI_DOMINATORS
, last
);
2523 for (loop
= loop
->inner
; loop
; loop
= loop
->next
)
2524 fill_always_executed_in_1 (loop
, contains_call
);
2527 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2528 for each such basic block bb records the outermost loop for that execution
2529 of its header implies execution of bb. */
2532 fill_always_executed_in (void)
2537 auto_sbitmap
contains_call (last_basic_block_for_fn (cfun
));
2538 bitmap_clear (contains_call
);
2539 FOR_EACH_BB_FN (bb
, cfun
)
2541 gimple_stmt_iterator gsi
;
2542 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2544 if (nonpure_call_p (gsi_stmt (gsi
)))
2548 if (!gsi_end_p (gsi
))
2549 bitmap_set_bit (contains_call
, bb
->index
);
2552 for (loop
= current_loops
->tree_root
->inner
; loop
; loop
= loop
->next
)
2553 fill_always_executed_in_1 (loop
, contains_call
);
2557 /* Compute the global information needed by the loop invariant motion pass. */
2560 tree_ssa_lim_initialize (void)
2565 bitmap_obstack_initialize (&lim_bitmap_obstack
);
2566 gcc_obstack_init (&mem_ref_obstack
);
2567 lim_aux_data_map
= new hash_map
<gimple
*, lim_aux_data
*>;
2570 compute_transaction_bits ();
2572 alloc_aux_for_edges (0);
2574 memory_accesses
.refs
= new hash_table
<mem_ref_hasher
> (100);
2575 memory_accesses
.refs_list
.create (100);
2576 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2577 memory_accesses
.refs_list
.quick_push
2578 (mem_ref_alloc (NULL
, 0, UNANALYZABLE_MEM_ID
));
2580 memory_accesses
.refs_in_loop
.create (number_of_loops (cfun
));
2581 memory_accesses
.refs_in_loop
.quick_grow (number_of_loops (cfun
));
2582 memory_accesses
.refs_stored_in_loop
.create (number_of_loops (cfun
));
2583 memory_accesses
.refs_stored_in_loop
.quick_grow (number_of_loops (cfun
));
2584 memory_accesses
.all_refs_stored_in_loop
.create (number_of_loops (cfun
));
2585 memory_accesses
.all_refs_stored_in_loop
.quick_grow (number_of_loops (cfun
));
2587 for (i
= 0; i
< number_of_loops (cfun
); i
++)
2589 bitmap_initialize (&memory_accesses
.refs_in_loop
[i
],
2590 &lim_bitmap_obstack
);
2591 bitmap_initialize (&memory_accesses
.refs_stored_in_loop
[i
],
2592 &lim_bitmap_obstack
);
2593 bitmap_initialize (&memory_accesses
.all_refs_stored_in_loop
[i
],
2594 &lim_bitmap_obstack
);
2597 memory_accesses
.ttae_cache
= NULL
;
2599 /* Initialize bb_loop_postorder with a mapping from loop->num to
2600 its postorder index. */
2602 bb_loop_postorder
= XNEWVEC (unsigned, number_of_loops (cfun
));
2603 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2604 bb_loop_postorder
[loop
->num
] = i
++;
2607 /* Cleans up after the invariant motion pass. */
2610 tree_ssa_lim_finalize (void)
2616 free_aux_for_edges ();
2618 FOR_EACH_BB_FN (bb
, cfun
)
2619 SET_ALWAYS_EXECUTED_IN (bb
, NULL
);
2621 bitmap_obstack_release (&lim_bitmap_obstack
);
2622 delete lim_aux_data_map
;
2624 delete memory_accesses
.refs
;
2625 memory_accesses
.refs
= NULL
;
2627 FOR_EACH_VEC_ELT (memory_accesses
.refs_list
, i
, ref
)
2629 memory_accesses
.refs_list
.release ();
2630 obstack_free (&mem_ref_obstack
, NULL
);
2632 memory_accesses
.refs_in_loop
.release ();
2633 memory_accesses
.refs_stored_in_loop
.release ();
2634 memory_accesses
.all_refs_stored_in_loop
.release ();
2636 if (memory_accesses
.ttae_cache
)
2637 free_affine_expand_cache (&memory_accesses
.ttae_cache
);
2639 free (bb_loop_postorder
);
2642 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2643 i.e. those that are likely to be win regardless of the register pressure. */
2650 tree_ssa_lim_initialize ();
2652 /* Gathers information about memory accesses in the loops. */
2653 analyze_memory_references ();
2655 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2656 fill_always_executed_in ();
2658 /* For each statement determine the outermost loop in that it is
2659 invariant and cost for computing the invariant. */
2660 invariantness_dom_walker (CDI_DOMINATORS
)
2661 .walk (cfun
->cfg
->x_entry_block_ptr
);
2663 /* Execute store motion. Force the necessary invariants to be moved
2664 out of the loops as well. */
2667 /* Move the expressions that are expensive enough. */
2668 todo
= move_computations ();
2670 tree_ssa_lim_finalize ();
2675 /* Loop invariant motion pass. */
2679 const pass_data pass_data_lim
=
2681 GIMPLE_PASS
, /* type */
2683 OPTGROUP_LOOP
, /* optinfo_flags */
2685 PROP_cfg
, /* properties_required */
2686 0, /* properties_provided */
2687 0, /* properties_destroyed */
2688 0, /* todo_flags_start */
2689 0, /* todo_flags_finish */
2692 class pass_lim
: public gimple_opt_pass
2695 pass_lim (gcc::context
*ctxt
)
2696 : gimple_opt_pass (pass_data_lim
, ctxt
)
2699 /* opt_pass methods: */
2700 opt_pass
* clone () { return new pass_lim (m_ctxt
); }
2701 virtual bool gate (function
*) { return flag_tree_loop_im
!= 0; }
2702 virtual unsigned int execute (function
*);
2704 }; // class pass_lim
2707 pass_lim::execute (function
*fun
)
2709 bool in_loop_pipeline
= scev_initialized_p ();
2710 if (!in_loop_pipeline
)
2711 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
2713 if (number_of_loops (fun
) <= 1)
2715 unsigned int todo
= tree_ssa_lim ();
2717 if (!in_loop_pipeline
)
2718 loop_optimizer_finalize ();
2727 make_pass_lim (gcc::context
*ctxt
)
2729 return new pass_lim (ctxt
);