1 /* Loop invariant motion.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "double-int.h"
34 #include "fold-const.h"
37 #include "hard-reg-set.h"
40 #include "dominance.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
46 #include "hash-table.h"
47 #include "tree-ssa-alias.h"
48 #include "internal-fn.h"
50 #include "gimple-expr.h"
54 #include "gimple-iterator.h"
55 #include "gimple-ssa.h"
57 #include "tree-phinodes.h"
58 #include "ssa-iterators.h"
59 #include "stringpool.h"
60 #include "tree-ssanames.h"
61 #include "tree-ssa-loop-manip.h"
62 #include "tree-ssa-loop.h"
63 #include "tree-into-ssa.h"
67 #include "tree-pass.h"
69 #include "tree-affine.h"
70 #include "tree-ssa-propagate.h"
71 #include "trans-mem.h"
72 #include "gimple-fold.h"
74 /* TODO: Support for predicated code motion. I.e.
85 Where COND and INV are invariants, but evaluating INV may trap or be
86 invalid from some other reason if !COND. This may be transformed to
96 /* The auxiliary data kept for each statement. */
100 struct loop
*max_loop
; /* The outermost loop in that the statement
103 struct loop
*tgt_loop
; /* The loop out of that we want to move the
106 struct loop
*always_executed_in
;
107 /* The outermost loop for that we are sure
108 the statement is executed if the loop
111 unsigned cost
; /* Cost of the computation performed by the
114 vec
<gimple
> depends
; /* Vector of statements that must be also
115 hoisted out of the loop when this statement
116 is hoisted; i.e. those that define the
117 operands of the statement and are inside of
118 the MAX_LOOP loop. */
121 /* Maps statements to their lim_aux_data. */
123 static hash_map
<gimple
, lim_aux_data
*> *lim_aux_data_map
;
125 /* Description of a memory reference location. */
127 typedef struct mem_ref_loc
129 tree
*ref
; /* The reference itself. */
130 gimple stmt
; /* The statement in that it occurs. */
134 /* Description of a memory reference. */
136 typedef struct im_mem_ref
138 unsigned id
; /* ID assigned to the memory reference
139 (its index in memory_accesses.refs_list) */
140 hashval_t hash
; /* Its hash value. */
142 /* The memory access itself and associated caching of alias-oracle
146 bitmap stored
; /* The set of loops in that this memory location
148 vec
<mem_ref_loc
> accesses_in_loop
;
149 /* The locations of the accesses. Vector
150 indexed by the loop number. */
152 /* The following sets are computed on demand. We keep both set and
153 its complement, so that we know whether the information was
154 already computed or not. */
155 bitmap_head indep_loop
; /* The set of loops in that the memory
156 reference is independent, meaning:
157 If it is stored in the loop, this store
158 is independent on all other loads and
160 If it is only loaded, then it is independent
161 on all stores in the loop. */
162 bitmap_head dep_loop
; /* The complement of INDEP_LOOP. */
165 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
166 to record (in)dependence against stores in the loop and its subloops, the
167 second to record (in)dependence against all references in the loop
169 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
171 /* Mem_ref hashtable helpers. */
173 struct mem_ref_hasher
: typed_noop_remove
<im_mem_ref
>
175 typedef im_mem_ref
*value_type
;
176 typedef tree_node
*compare_type
;
177 static inline hashval_t
hash (const im_mem_ref
*);
178 static inline bool equal (const im_mem_ref
*, const tree_node
*);
181 /* A hash function for struct im_mem_ref object OBJ. */
184 mem_ref_hasher::hash (const im_mem_ref
*mem
)
189 /* An equality function for struct im_mem_ref object MEM1 with
190 memory reference OBJ2. */
193 mem_ref_hasher::equal (const im_mem_ref
*mem1
, const tree_node
*obj2
)
195 return operand_equal_p (mem1
->mem
.ref
, (const_tree
) obj2
, 0);
199 /* Description of memory accesses in loops. */
203 /* The hash table of memory references accessed in loops. */
204 hash_table
<mem_ref_hasher
> *refs
;
206 /* The list of memory references. */
207 vec
<mem_ref_p
> refs_list
;
209 /* The set of memory references accessed in each loop. */
210 vec
<bitmap_head
> refs_in_loop
;
212 /* The set of memory references stored in each loop. */
213 vec
<bitmap_head
> refs_stored_in_loop
;
215 /* The set of memory references stored in each loop, including subloops . */
216 vec
<bitmap_head
> all_refs_stored_in_loop
;
218 /* Cache for expanding memory addresses. */
219 hash_map
<tree
, name_expansion
*> *ttae_cache
;
222 /* Obstack for the bitmaps in the above data structures. */
223 static bitmap_obstack lim_bitmap_obstack
;
224 static obstack mem_ref_obstack
;
226 static bool ref_indep_loop_p (struct loop
*, mem_ref_p
);
228 /* Minimum cost of an expensive expression. */
229 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
231 /* The outermost loop for which execution of the header guarantees that the
232 block will be executed. */
233 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
234 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
236 /* ID of the shared unanalyzable mem. */
237 #define UNANALYZABLE_MEM_ID 0
239 /* Whether the reference was analyzable. */
240 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
242 static struct lim_aux_data
*
243 init_lim_data (gimple stmt
)
245 lim_aux_data
*p
= XCNEW (struct lim_aux_data
);
246 lim_aux_data_map
->put (stmt
, p
);
251 static struct lim_aux_data
*
252 get_lim_data (gimple stmt
)
254 lim_aux_data
**p
= lim_aux_data_map
->get (stmt
);
261 /* Releases the memory occupied by DATA. */
264 free_lim_aux_data (struct lim_aux_data
*data
)
266 data
->depends
.release ();
271 clear_lim_data (gimple stmt
)
273 lim_aux_data
**p
= lim_aux_data_map
->get (stmt
);
277 free_lim_aux_data (*p
);
282 /* The possibilities of statement movement. */
285 MOVE_IMPOSSIBLE
, /* No movement -- side effect expression. */
286 MOVE_PRESERVE_EXECUTION
, /* Must not cause the non-executed statement
287 become executed -- memory accesses, ... */
288 MOVE_POSSIBLE
/* Unlimited movement. */
292 /* If it is possible to hoist the statement STMT unconditionally,
293 returns MOVE_POSSIBLE.
294 If it is possible to hoist the statement STMT, but we must avoid making
295 it executed if it would not be executed in the original program (e.g.
296 because it may trap), return MOVE_PRESERVE_EXECUTION.
297 Otherwise return MOVE_IMPOSSIBLE. */
300 movement_possibility (gimple stmt
)
303 enum move_pos ret
= MOVE_POSSIBLE
;
305 if (flag_unswitch_loops
306 && gimple_code (stmt
) == GIMPLE_COND
)
308 /* If we perform unswitching, force the operands of the invariant
309 condition to be moved out of the loop. */
310 return MOVE_POSSIBLE
;
313 if (gimple_code (stmt
) == GIMPLE_PHI
314 && gimple_phi_num_args (stmt
) <= 2
315 && !virtual_operand_p (gimple_phi_result (stmt
))
316 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt
)))
317 return MOVE_POSSIBLE
;
319 if (gimple_get_lhs (stmt
) == NULL_TREE
)
320 return MOVE_IMPOSSIBLE
;
322 if (gimple_vdef (stmt
))
323 return MOVE_IMPOSSIBLE
;
325 if (stmt_ends_bb_p (stmt
)
326 || gimple_has_volatile_ops (stmt
)
327 || gimple_has_side_effects (stmt
)
328 || stmt_could_throw_p (stmt
))
329 return MOVE_IMPOSSIBLE
;
331 if (is_gimple_call (stmt
))
333 /* While pure or const call is guaranteed to have no side effects, we
334 cannot move it arbitrarily. Consider code like
336 char *s = something ();
346 Here the strlen call cannot be moved out of the loop, even though
347 s is invariant. In addition to possibly creating a call with
348 invalid arguments, moving out a function call that is not executed
349 may cause performance regressions in case the call is costly and
350 not executed at all. */
351 ret
= MOVE_PRESERVE_EXECUTION
;
352 lhs
= gimple_call_lhs (stmt
);
354 else if (is_gimple_assign (stmt
))
355 lhs
= gimple_assign_lhs (stmt
);
357 return MOVE_IMPOSSIBLE
;
359 if (TREE_CODE (lhs
) == SSA_NAME
360 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
361 return MOVE_IMPOSSIBLE
;
363 if (TREE_CODE (lhs
) != SSA_NAME
364 || gimple_could_trap_p (stmt
))
365 return MOVE_PRESERVE_EXECUTION
;
367 /* Non local loads in a transaction cannot be hoisted out. Well,
368 unless the load happens on every path out of the loop, but we
369 don't take this into account yet. */
371 && gimple_in_transaction (stmt
)
372 && gimple_assign_single_p (stmt
))
374 tree rhs
= gimple_assign_rhs1 (stmt
);
375 if (DECL_P (rhs
) && is_global_var (rhs
))
379 fprintf (dump_file
, "Cannot hoist conditional load of ");
380 print_generic_expr (dump_file
, rhs
, TDF_SLIM
);
381 fprintf (dump_file
, " because it is in a transaction.\n");
383 return MOVE_IMPOSSIBLE
;
390 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
391 loop to that we could move the expression using DEF if it did not have
392 other operands, i.e. the outermost loop enclosing LOOP in that the value
393 of DEF is invariant. */
396 outermost_invariant_loop (tree def
, struct loop
*loop
)
400 struct loop
*max_loop
;
401 struct lim_aux_data
*lim_data
;
404 return superloop_at_depth (loop
, 1);
406 if (TREE_CODE (def
) != SSA_NAME
)
408 gcc_assert (is_gimple_min_invariant (def
));
409 return superloop_at_depth (loop
, 1);
412 def_stmt
= SSA_NAME_DEF_STMT (def
);
413 def_bb
= gimple_bb (def_stmt
);
415 return superloop_at_depth (loop
, 1);
417 max_loop
= find_common_loop (loop
, def_bb
->loop_father
);
419 lim_data
= get_lim_data (def_stmt
);
420 if (lim_data
!= NULL
&& lim_data
->max_loop
!= NULL
)
421 max_loop
= find_common_loop (max_loop
,
422 loop_outer (lim_data
->max_loop
));
423 if (max_loop
== loop
)
425 max_loop
= superloop_at_depth (loop
, loop_depth (max_loop
) + 1);
430 /* DATA is a structure containing information associated with a statement
431 inside LOOP. DEF is one of the operands of this statement.
433 Find the outermost loop enclosing LOOP in that value of DEF is invariant
434 and record this in DATA->max_loop field. If DEF itself is defined inside
435 this loop as well (i.e. we need to hoist it out of the loop if we want
436 to hoist the statement represented by DATA), record the statement in that
437 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
438 add the cost of the computation of DEF to the DATA->cost.
440 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
443 add_dependency (tree def
, struct lim_aux_data
*data
, struct loop
*loop
,
446 gimple def_stmt
= SSA_NAME_DEF_STMT (def
);
447 basic_block def_bb
= gimple_bb (def_stmt
);
448 struct loop
*max_loop
;
449 struct lim_aux_data
*def_data
;
454 max_loop
= outermost_invariant_loop (def
, loop
);
458 if (flow_loop_nested_p (data
->max_loop
, max_loop
))
459 data
->max_loop
= max_loop
;
461 def_data
= get_lim_data (def_stmt
);
466 /* Only add the cost if the statement defining DEF is inside LOOP,
467 i.e. if it is likely that by moving the invariants dependent
468 on it, we will be able to avoid creating a new register for
469 it (since it will be only used in these dependent invariants). */
470 && def_bb
->loop_father
== loop
)
471 data
->cost
+= def_data
->cost
;
473 data
->depends
.safe_push (def_stmt
);
478 /* Returns an estimate for a cost of statement STMT. The values here
479 are just ad-hoc constants, similar to costs for inlining. */
482 stmt_cost (gimple stmt
)
484 /* Always try to create possibilities for unswitching. */
485 if (gimple_code (stmt
) == GIMPLE_COND
486 || gimple_code (stmt
) == GIMPLE_PHI
)
487 return LIM_EXPENSIVE
;
489 /* We should be hoisting calls if possible. */
490 if (is_gimple_call (stmt
))
494 /* Unless the call is a builtin_constant_p; this always folds to a
495 constant, so moving it is useless. */
496 fndecl
= gimple_call_fndecl (stmt
);
498 && DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
499 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_CONSTANT_P
)
502 return LIM_EXPENSIVE
;
505 /* Hoisting memory references out should almost surely be a win. */
506 if (gimple_references_memory_p (stmt
))
507 return LIM_EXPENSIVE
;
509 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
512 switch (gimple_assign_rhs_code (stmt
))
515 case WIDEN_MULT_EXPR
:
516 case WIDEN_MULT_PLUS_EXPR
:
517 case WIDEN_MULT_MINUS_EXPR
:
530 /* Division and multiplication are usually expensive. */
531 return LIM_EXPENSIVE
;
535 case WIDEN_LSHIFT_EXPR
:
538 /* Shifts and rotates are usually expensive. */
539 return LIM_EXPENSIVE
;
542 /* Make vector construction cost proportional to the number
544 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt
));
548 /* Whether or not something is wrapped inside a PAREN_EXPR
549 should not change move cost. Nor should an intermediate
550 unpropagated SSA name copy. */
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559 REF is independent. If REF is not independent in LOOP, NULL is returned
563 outermost_indep_loop (struct loop
*outer
, struct loop
*loop
, mem_ref_p ref
)
567 if (ref
->stored
&& bitmap_bit_p (ref
->stored
, loop
->num
))
572 aloop
= superloop_at_depth (loop
, loop_depth (aloop
) + 1))
573 if ((!ref
->stored
|| !bitmap_bit_p (ref
->stored
, aloop
->num
))
574 && ref_indep_loop_p (aloop
, ref
))
577 if (ref_indep_loop_p (loop
, ref
))
583 /* If there is a simple load or store to a memory reference in STMT, returns
584 the location of the memory reference, and sets IS_STORE according to whether
585 it is a store or load. Otherwise, returns NULL. */
588 simple_mem_ref_in_stmt (gimple stmt
, bool *is_store
)
592 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
593 if (!gimple_assign_single_p (stmt
))
596 lhs
= gimple_assign_lhs_ptr (stmt
);
597 rhs
= gimple_assign_rhs1_ptr (stmt
);
599 if (TREE_CODE (*lhs
) == SSA_NAME
&& gimple_vuse (stmt
))
604 else if (gimple_vdef (stmt
)
605 && (TREE_CODE (*rhs
) == SSA_NAME
|| is_gimple_min_invariant (*rhs
)))
614 /* Returns the memory reference contained in STMT. */
617 mem_ref_in_stmt (gimple stmt
)
620 tree
*mem
= simple_mem_ref_in_stmt (stmt
, &store
);
628 hash
= iterative_hash_expr (*mem
, 0);
629 ref
= memory_accesses
.refs
->find_with_hash (*mem
, hash
);
631 gcc_assert (ref
!= NULL
);
635 /* From a controlling predicate in DOM determine the arguments from
636 the PHI node PHI that are chosen if the predicate evaluates to
637 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
638 they are non-NULL. Returns true if the arguments can be determined,
639 else return false. */
642 extract_true_false_args_from_phi (basic_block dom
, gphi
*phi
,
643 tree
*true_arg_p
, tree
*false_arg_p
)
645 basic_block bb
= gimple_bb (phi
);
646 edge true_edge
, false_edge
, tem
;
647 tree arg0
= NULL_TREE
, arg1
= NULL_TREE
;
649 /* We have to verify that one edge into the PHI node is dominated
650 by the true edge of the predicate block and the other edge
651 dominated by the false edge. This ensures that the PHI argument
652 we are going to take is completely determined by the path we
653 take from the predicate block.
654 We can only use BB dominance checks below if the destination of
655 the true/false edges are dominated by their edge, thus only
656 have a single predecessor. */
657 extract_true_false_edges_from_block (dom
, &true_edge
, &false_edge
);
658 tem
= EDGE_PRED (bb
, 0);
660 || (single_pred_p (true_edge
->dest
)
661 && (tem
->src
== true_edge
->dest
662 || dominated_by_p (CDI_DOMINATORS
,
663 tem
->src
, true_edge
->dest
))))
664 arg0
= PHI_ARG_DEF (phi
, tem
->dest_idx
);
665 else if (tem
== false_edge
666 || (single_pred_p (false_edge
->dest
)
667 && (tem
->src
== false_edge
->dest
668 || dominated_by_p (CDI_DOMINATORS
,
669 tem
->src
, false_edge
->dest
))))
670 arg1
= PHI_ARG_DEF (phi
, tem
->dest_idx
);
673 tem
= EDGE_PRED (bb
, 1);
675 || (single_pred_p (true_edge
->dest
)
676 && (tem
->src
== true_edge
->dest
677 || dominated_by_p (CDI_DOMINATORS
,
678 tem
->src
, true_edge
->dest
))))
679 arg0
= PHI_ARG_DEF (phi
, tem
->dest_idx
);
680 else if (tem
== false_edge
681 || (single_pred_p (false_edge
->dest
)
682 && (tem
->src
== false_edge
->dest
683 || dominated_by_p (CDI_DOMINATORS
,
684 tem
->src
, false_edge
->dest
))))
685 arg1
= PHI_ARG_DEF (phi
, tem
->dest_idx
);
699 /* Determine the outermost loop to that it is possible to hoist a statement
700 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
701 the outermost loop in that the value computed by STMT is invariant.
702 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
703 we preserve the fact whether STMT is executed. It also fills other related
704 information to LIM_DATA (STMT).
706 The function returns false if STMT cannot be hoisted outside of the loop it
707 is defined in, and true otherwise. */
710 determine_max_movement (gimple stmt
, bool must_preserve_exec
)
712 basic_block bb
= gimple_bb (stmt
);
713 struct loop
*loop
= bb
->loop_father
;
715 struct lim_aux_data
*lim_data
= get_lim_data (stmt
);
719 if (must_preserve_exec
)
720 level
= ALWAYS_EXECUTED_IN (bb
);
722 level
= superloop_at_depth (loop
, 1);
723 lim_data
->max_loop
= level
;
725 if (gphi
*phi
= dyn_cast
<gphi
*> (stmt
))
728 unsigned min_cost
= UINT_MAX
;
729 unsigned total_cost
= 0;
730 struct lim_aux_data
*def_data
;
732 /* We will end up promoting dependencies to be unconditionally
733 evaluated. For this reason the PHI cost (and thus the
734 cost we remove from the loop by doing the invariant motion)
735 is that of the cheapest PHI argument dependency chain. */
736 FOR_EACH_PHI_ARG (use_p
, phi
, iter
, SSA_OP_USE
)
738 val
= USE_FROM_PTR (use_p
);
740 if (TREE_CODE (val
) != SSA_NAME
)
742 /* Assign const 1 to constants. */
743 min_cost
= MIN (min_cost
, 1);
747 if (!add_dependency (val
, lim_data
, loop
, false))
750 gimple def_stmt
= SSA_NAME_DEF_STMT (val
);
751 if (gimple_bb (def_stmt
)
752 && gimple_bb (def_stmt
)->loop_father
== loop
)
754 def_data
= get_lim_data (def_stmt
);
757 min_cost
= MIN (min_cost
, def_data
->cost
);
758 total_cost
+= def_data
->cost
;
763 min_cost
= MIN (min_cost
, total_cost
);
764 lim_data
->cost
+= min_cost
;
766 if (gimple_phi_num_args (phi
) > 1)
768 basic_block dom
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
770 if (gsi_end_p (gsi_last_bb (dom
)))
772 cond
= gsi_stmt (gsi_last_bb (dom
));
773 if (gimple_code (cond
) != GIMPLE_COND
)
775 /* Verify that this is an extended form of a diamond and
776 the PHI arguments are completely controlled by the
778 if (!extract_true_false_args_from_phi (dom
, phi
, NULL
, NULL
))
781 /* Fold in dependencies and cost of the condition. */
782 FOR_EACH_SSA_TREE_OPERAND (val
, cond
, iter
, SSA_OP_USE
)
784 if (!add_dependency (val
, lim_data
, loop
, false))
786 def_data
= get_lim_data (SSA_NAME_DEF_STMT (val
));
788 total_cost
+= def_data
->cost
;
791 /* We want to avoid unconditionally executing very expensive
792 operations. As costs for our dependencies cannot be
793 negative just claim we are not invariand for this case.
794 We also are not sure whether the control-flow inside the
796 if (total_cost
- min_cost
>= 2 * LIM_EXPENSIVE
798 && total_cost
/ min_cost
<= 2))
801 /* Assume that the control-flow in the loop will vanish.
802 ??? We should verify this and not artificially increase
803 the cost if that is not the case. */
804 lim_data
->cost
+= stmt_cost (stmt
);
810 FOR_EACH_SSA_TREE_OPERAND (val
, stmt
, iter
, SSA_OP_USE
)
811 if (!add_dependency (val
, lim_data
, loop
, true))
814 if (gimple_vuse (stmt
))
816 mem_ref_p ref
= mem_ref_in_stmt (stmt
);
821 = outermost_indep_loop (lim_data
->max_loop
, loop
, ref
);
822 if (!lim_data
->max_loop
)
827 if ((val
= gimple_vuse (stmt
)) != NULL_TREE
)
829 if (!add_dependency (val
, lim_data
, loop
, false))
835 lim_data
->cost
+= stmt_cost (stmt
);
840 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
841 and that one of the operands of this statement is computed by STMT.
842 Ensure that STMT (together with all the statements that define its
843 operands) is hoisted at least out of the loop LEVEL. */
846 set_level (gimple stmt
, struct loop
*orig_loop
, struct loop
*level
)
848 struct loop
*stmt_loop
= gimple_bb (stmt
)->loop_father
;
849 struct lim_aux_data
*lim_data
;
853 stmt_loop
= find_common_loop (orig_loop
, stmt_loop
);
854 lim_data
= get_lim_data (stmt
);
855 if (lim_data
!= NULL
&& lim_data
->tgt_loop
!= NULL
)
856 stmt_loop
= find_common_loop (stmt_loop
,
857 loop_outer (lim_data
->tgt_loop
));
858 if (flow_loop_nested_p (stmt_loop
, level
))
861 gcc_assert (level
== lim_data
->max_loop
862 || flow_loop_nested_p (lim_data
->max_loop
, level
));
864 lim_data
->tgt_loop
= level
;
865 FOR_EACH_VEC_ELT (lim_data
->depends
, i
, dep_stmt
)
866 set_level (dep_stmt
, orig_loop
, level
);
869 /* Determines an outermost loop from that we want to hoist the statement STMT.
870 For now we chose the outermost possible loop. TODO -- use profiling
871 information to set it more sanely. */
874 set_profitable_level (gimple stmt
)
876 set_level (stmt
, gimple_bb (stmt
)->loop_father
, get_lim_data (stmt
)->max_loop
);
879 /* Returns true if STMT is a call that has side effects. */
882 nonpure_call_p (gimple stmt
)
884 if (gimple_code (stmt
) != GIMPLE_CALL
)
887 return gimple_has_side_effects (stmt
);
890 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
893 rewrite_reciprocal (gimple_stmt_iterator
*bsi
)
895 gassign
*stmt
, *stmt1
, *stmt2
;
896 tree name
, lhs
, type
;
898 gimple_stmt_iterator gsi
;
900 stmt
= as_a
<gassign
*> (gsi_stmt (*bsi
));
901 lhs
= gimple_assign_lhs (stmt
);
902 type
= TREE_TYPE (lhs
);
904 real_one
= build_one_cst (type
);
906 name
= make_temp_ssa_name (type
, NULL
, "reciptmp");
907 stmt1
= gimple_build_assign (name
, RDIV_EXPR
, real_one
,
908 gimple_assign_rhs2 (stmt
));
909 stmt2
= gimple_build_assign (lhs
, MULT_EXPR
, name
,
910 gimple_assign_rhs1 (stmt
));
912 /* Replace division stmt with reciprocal and multiply stmts.
913 The multiply stmt is not invariant, so update iterator
914 and avoid rescanning. */
916 gsi_insert_before (bsi
, stmt1
, GSI_NEW_STMT
);
917 gsi_replace (&gsi
, stmt2
, true);
919 /* Continue processing with invariant reciprocal statement. */
923 /* Check if the pattern at *BSI is a bittest of the form
924 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
927 rewrite_bittest (gimple_stmt_iterator
*bsi
)
934 tree lhs
, name
, t
, a
, b
;
937 stmt
= as_a
<gassign
*> (gsi_stmt (*bsi
));
938 lhs
= gimple_assign_lhs (stmt
);
940 /* Verify that the single use of lhs is a comparison against zero. */
941 if (TREE_CODE (lhs
) != SSA_NAME
942 || !single_imm_use (lhs
, &use
, &use_stmt
))
944 cond_stmt
= dyn_cast
<gcond
*> (use_stmt
);
947 if (gimple_cond_lhs (cond_stmt
) != lhs
948 || (gimple_cond_code (cond_stmt
) != NE_EXPR
949 && gimple_cond_code (cond_stmt
) != EQ_EXPR
)
950 || !integer_zerop (gimple_cond_rhs (cond_stmt
)))
953 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
954 stmt1
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
955 if (gimple_code (stmt1
) != GIMPLE_ASSIGN
)
958 /* There is a conversion in between possibly inserted by fold. */
959 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1
)))
961 t
= gimple_assign_rhs1 (stmt1
);
962 if (TREE_CODE (t
) != SSA_NAME
963 || !has_single_use (t
))
965 stmt1
= SSA_NAME_DEF_STMT (t
);
966 if (gimple_code (stmt1
) != GIMPLE_ASSIGN
)
970 /* Verify that B is loop invariant but A is not. Verify that with
971 all the stmt walking we are still in the same loop. */
972 if (gimple_assign_rhs_code (stmt1
) != RSHIFT_EXPR
973 || loop_containing_stmt (stmt1
) != loop_containing_stmt (stmt
))
976 a
= gimple_assign_rhs1 (stmt1
);
977 b
= gimple_assign_rhs2 (stmt1
);
979 if (outermost_invariant_loop (b
, loop_containing_stmt (stmt1
)) != NULL
980 && outermost_invariant_loop (a
, loop_containing_stmt (stmt1
)) == NULL
)
982 gimple_stmt_iterator rsi
;
985 t
= fold_build2 (LSHIFT_EXPR
, TREE_TYPE (a
),
986 build_int_cst (TREE_TYPE (a
), 1), b
);
987 name
= make_temp_ssa_name (TREE_TYPE (a
), NULL
, "shifttmp");
988 stmt1
= gimple_build_assign (name
, t
);
991 t
= fold_build2 (BIT_AND_EXPR
, TREE_TYPE (a
), a
, name
);
992 name
= make_temp_ssa_name (TREE_TYPE (a
), NULL
, "shifttmp");
993 stmt2
= gimple_build_assign (name
, t
);
995 /* Replace the SSA_NAME we compare against zero. Adjust
996 the type of zero accordingly. */
998 gimple_cond_set_rhs (cond_stmt
,
999 build_int_cst_type (TREE_TYPE (name
),
1002 /* Don't use gsi_replace here, none of the new assignments sets
1003 the variable originally set in stmt. Move bsi to stmt1, and
1004 then remove the original stmt, so that we get a chance to
1005 retain debug info for it. */
1007 gsi_insert_before (bsi
, stmt1
, GSI_NEW_STMT
);
1008 gsi_insert_before (&rsi
, stmt2
, GSI_SAME_STMT
);
1009 gsi_remove (&rsi
, true);
1017 /* For each statement determines the outermost loop in that it is invariant,
1018 - statements on whose motion it depends and the cost of the computation.
1019 - This information is stored to the LIM_DATA structure associated with
1020 - each statement. */
1021 class invariantness_dom_walker
: public dom_walker
1024 invariantness_dom_walker (cdi_direction direction
)
1025 : dom_walker (direction
) {}
1027 virtual void before_dom_children (basic_block
);
1030 /* Determine the outermost loops in that statements in basic block BB are
1031 invariant, and record them to the LIM_DATA associated with the statements.
1032 Callback for dom_walker. */
1035 invariantness_dom_walker::before_dom_children (basic_block bb
)
1038 gimple_stmt_iterator bsi
;
1040 bool maybe_never
= ALWAYS_EXECUTED_IN (bb
) == NULL
;
1041 struct loop
*outermost
= ALWAYS_EXECUTED_IN (bb
);
1042 struct lim_aux_data
*lim_data
;
1044 if (!loop_outer (bb
->loop_father
))
1047 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1048 fprintf (dump_file
, "Basic block %d (loop %d -- depth %d):\n\n",
1049 bb
->index
, bb
->loop_father
->num
, loop_depth (bb
->loop_father
));
1051 /* Look at PHI nodes, but only if there is at most two.
1052 ??? We could relax this further by post-processing the inserted
1053 code and transforming adjacent cond-exprs with the same predicate
1054 to control flow again. */
1055 bsi
= gsi_start_phis (bb
);
1056 if (!gsi_end_p (bsi
)
1057 && ((gsi_next (&bsi
), gsi_end_p (bsi
))
1058 || (gsi_next (&bsi
), gsi_end_p (bsi
))))
1059 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1061 stmt
= gsi_stmt (bsi
);
1063 pos
= movement_possibility (stmt
);
1064 if (pos
== MOVE_IMPOSSIBLE
)
1067 lim_data
= init_lim_data (stmt
);
1068 lim_data
->always_executed_in
= outermost
;
1070 if (!determine_max_movement (stmt
, false))
1072 lim_data
->max_loop
= NULL
;
1076 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1078 print_gimple_stmt (dump_file
, stmt
, 2, 0);
1079 fprintf (dump_file
, " invariant up to level %d, cost %d.\n\n",
1080 loop_depth (lim_data
->max_loop
),
1084 if (lim_data
->cost
>= LIM_EXPENSIVE
)
1085 set_profitable_level (stmt
);
1088 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1090 stmt
= gsi_stmt (bsi
);
1092 pos
= movement_possibility (stmt
);
1093 if (pos
== MOVE_IMPOSSIBLE
)
1095 if (nonpure_call_p (stmt
))
1100 /* Make sure to note always_executed_in for stores to make
1101 store-motion work. */
1102 else if (stmt_makes_single_store (stmt
))
1104 struct lim_aux_data
*lim_data
= init_lim_data (stmt
);
1105 lim_data
->always_executed_in
= outermost
;
1110 if (is_gimple_assign (stmt
)
1111 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
))
1112 == GIMPLE_BINARY_RHS
))
1114 tree op0
= gimple_assign_rhs1 (stmt
);
1115 tree op1
= gimple_assign_rhs2 (stmt
);
1116 struct loop
*ol1
= outermost_invariant_loop (op1
,
1117 loop_containing_stmt (stmt
));
1119 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1120 to be hoisted out of loop, saving expensive divide. */
1121 if (pos
== MOVE_POSSIBLE
1122 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
1123 && flag_unsafe_math_optimizations
1124 && !flag_trapping_math
1126 && outermost_invariant_loop (op0
, ol1
) == NULL
)
1127 stmt
= rewrite_reciprocal (&bsi
);
1129 /* If the shift count is invariant, convert (A >> B) & 1 to
1130 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1131 saving an expensive shift. */
1132 if (pos
== MOVE_POSSIBLE
1133 && gimple_assign_rhs_code (stmt
) == BIT_AND_EXPR
1134 && integer_onep (op1
)
1135 && TREE_CODE (op0
) == SSA_NAME
1136 && has_single_use (op0
))
1137 stmt
= rewrite_bittest (&bsi
);
1140 lim_data
= init_lim_data (stmt
);
1141 lim_data
->always_executed_in
= outermost
;
1143 if (maybe_never
&& pos
== MOVE_PRESERVE_EXECUTION
)
1146 if (!determine_max_movement (stmt
, pos
== MOVE_PRESERVE_EXECUTION
))
1148 lim_data
->max_loop
= NULL
;
1152 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1154 print_gimple_stmt (dump_file
, stmt
, 2, 0);
1155 fprintf (dump_file
, " invariant up to level %d, cost %d.\n\n",
1156 loop_depth (lim_data
->max_loop
),
1160 if (lim_data
->cost
>= LIM_EXPENSIVE
)
1161 set_profitable_level (stmt
);
1165 class move_computations_dom_walker
: public dom_walker
1168 move_computations_dom_walker (cdi_direction direction
)
1169 : dom_walker (direction
), todo_ (0) {}
1171 virtual void before_dom_children (basic_block
);
1176 /* Hoist the statements in basic block BB out of the loops prescribed by
1177 data stored in LIM_DATA structures associated with each statement. Callback
1178 for walk_dominator_tree. */
1181 move_computations_dom_walker::before_dom_children (basic_block bb
)
1185 struct lim_aux_data
*lim_data
;
1187 if (!loop_outer (bb
->loop_father
))
1190 for (gphi_iterator bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); )
1193 gphi
*stmt
= bsi
.phi ();
1195 lim_data
= get_lim_data (stmt
);
1196 if (lim_data
== NULL
)
1202 cost
= lim_data
->cost
;
1203 level
= lim_data
->tgt_loop
;
1204 clear_lim_data (stmt
);
1212 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1214 fprintf (dump_file
, "Moving PHI node\n");
1215 print_gimple_stmt (dump_file
, stmt
, 0, 0);
1216 fprintf (dump_file
, "(cost %u) out of loop %d.\n\n",
1220 if (gimple_phi_num_args (stmt
) == 1)
1222 tree arg
= PHI_ARG_DEF (stmt
, 0);
1223 new_stmt
= gimple_build_assign (gimple_phi_result (stmt
),
1224 TREE_CODE (arg
), arg
);
1228 basic_block dom
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1229 gimple cond
= gsi_stmt (gsi_last_bb (dom
));
1230 tree arg0
= NULL_TREE
, arg1
= NULL_TREE
, t
;
1231 /* Get the PHI arguments corresponding to the true and false
1233 extract_true_false_args_from_phi (dom
, stmt
, &arg0
, &arg1
);
1234 gcc_assert (arg0
&& arg1
);
1235 t
= build2 (gimple_cond_code (cond
), boolean_type_node
,
1236 gimple_cond_lhs (cond
), gimple_cond_rhs (cond
));
1237 new_stmt
= gimple_build_assign (gimple_phi_result (stmt
),
1238 COND_EXPR
, t
, arg0
, arg1
);
1239 todo_
|= TODO_cleanup_cfg
;
1241 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt
)))
1242 && (!ALWAYS_EXECUTED_IN (bb
)
1243 || (ALWAYS_EXECUTED_IN (bb
) != level
1244 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1246 tree lhs
= gimple_assign_lhs (new_stmt
);
1247 SSA_NAME_RANGE_INFO (lhs
) = NULL
;
1248 SSA_NAME_ANTI_RANGE_P (lhs
) = 0;
1250 gsi_insert_on_edge (loop_preheader_edge (level
), new_stmt
);
1251 remove_phi_node (&bsi
, false);
1254 for (gimple_stmt_iterator bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); )
1258 gimple stmt
= gsi_stmt (bsi
);
1260 lim_data
= get_lim_data (stmt
);
1261 if (lim_data
== NULL
)
1267 cost
= lim_data
->cost
;
1268 level
= lim_data
->tgt_loop
;
1269 clear_lim_data (stmt
);
1277 /* We do not really want to move conditionals out of the loop; we just
1278 placed it here to force its operands to be moved if necessary. */
1279 if (gimple_code (stmt
) == GIMPLE_COND
)
1282 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1284 fprintf (dump_file
, "Moving statement\n");
1285 print_gimple_stmt (dump_file
, stmt
, 0, 0);
1286 fprintf (dump_file
, "(cost %u) out of loop %d.\n\n",
1290 e
= loop_preheader_edge (level
);
1291 gcc_assert (!gimple_vdef (stmt
));
1292 if (gimple_vuse (stmt
))
1294 /* The new VUSE is the one from the virtual PHI in the loop
1295 header or the one already present. */
1297 for (gsi2
= gsi_start_phis (e
->dest
);
1298 !gsi_end_p (gsi2
); gsi_next (&gsi2
))
1300 gphi
*phi
= gsi2
.phi ();
1301 if (virtual_operand_p (gimple_phi_result (phi
)))
1303 gimple_set_vuse (stmt
, PHI_ARG_DEF_FROM_EDGE (phi
, e
));
1308 gsi_remove (&bsi
, false);
1309 if (gimple_has_lhs (stmt
)
1310 && TREE_CODE (gimple_get_lhs (stmt
)) == SSA_NAME
1311 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt
)))
1312 && (!ALWAYS_EXECUTED_IN (bb
)
1313 || !(ALWAYS_EXECUTED_IN (bb
) == level
1314 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1316 tree lhs
= gimple_get_lhs (stmt
);
1317 SSA_NAME_RANGE_INFO (lhs
) = NULL
;
1318 SSA_NAME_ANTI_RANGE_P (lhs
) = 0;
1320 /* In case this is a stmt that is not unconditionally executed
1321 when the target loop header is executed and the stmt may
1322 invoke undefined integer or pointer overflow rewrite it to
1323 unsigned arithmetic. */
1324 if (is_gimple_assign (stmt
)
1325 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt
)))
1326 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt
)))
1327 && arith_code_with_undefined_signed_overflow
1328 (gimple_assign_rhs_code (stmt
))
1329 && (!ALWAYS_EXECUTED_IN (bb
)
1330 || !(ALWAYS_EXECUTED_IN (bb
) == level
1331 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb
), level
))))
1332 gsi_insert_seq_on_edge (e
, rewrite_to_defined_overflow (stmt
));
1334 gsi_insert_on_edge (e
, stmt
);
1338 /* Hoist the statements out of the loops prescribed by data stored in
1339 LIM_DATA structures associated with each statement.*/
1342 move_computations (void)
1344 move_computations_dom_walker
walker (CDI_DOMINATORS
);
1345 walker
.walk (cfun
->cfg
->x_entry_block_ptr
);
1347 gsi_commit_edge_inserts ();
1348 if (need_ssa_update_p (cfun
))
1349 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1351 return walker
.todo_
;
1354 /* Checks whether the statement defining variable *INDEX can be hoisted
1355 out of the loop passed in DATA. Callback for for_each_index. */
1358 may_move_till (tree ref
, tree
*index
, void *data
)
1360 struct loop
*loop
= (struct loop
*) data
, *max_loop
;
1362 /* If REF is an array reference, check also that the step and the lower
1363 bound is invariant in LOOP. */
1364 if (TREE_CODE (ref
) == ARRAY_REF
)
1366 tree step
= TREE_OPERAND (ref
, 3);
1367 tree lbound
= TREE_OPERAND (ref
, 2);
1369 max_loop
= outermost_invariant_loop (step
, loop
);
1373 max_loop
= outermost_invariant_loop (lbound
, loop
);
1378 max_loop
= outermost_invariant_loop (*index
, loop
);
1385 /* If OP is SSA NAME, force the statement that defines it to be
1386 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1389 force_move_till_op (tree op
, struct loop
*orig_loop
, struct loop
*loop
)
1394 || is_gimple_min_invariant (op
))
1397 gcc_assert (TREE_CODE (op
) == SSA_NAME
);
1399 stmt
= SSA_NAME_DEF_STMT (op
);
1400 if (gimple_nop_p (stmt
))
1403 set_level (stmt
, orig_loop
, loop
);
1406 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1407 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1413 struct loop
*orig_loop
;
1417 force_move_till (tree ref
, tree
*index
, void *data
)
1419 struct fmt_data
*fmt_data
= (struct fmt_data
*) data
;
1421 if (TREE_CODE (ref
) == ARRAY_REF
)
1423 tree step
= TREE_OPERAND (ref
, 3);
1424 tree lbound
= TREE_OPERAND (ref
, 2);
1426 force_move_till_op (step
, fmt_data
->orig_loop
, fmt_data
->loop
);
1427 force_move_till_op (lbound
, fmt_data
->orig_loop
, fmt_data
->loop
);
1430 force_move_till_op (*index
, fmt_data
->orig_loop
, fmt_data
->loop
);
1435 /* A function to free the mem_ref object OBJ. */
1438 memref_free (struct im_mem_ref
*mem
)
1440 mem
->accesses_in_loop
.release ();
1443 /* Allocates and returns a memory reference description for MEM whose hash
1444 value is HASH and id is ID. */
1447 mem_ref_alloc (tree mem
, unsigned hash
, unsigned id
)
1449 mem_ref_p ref
= XOBNEW (&mem_ref_obstack
, struct im_mem_ref
);
1450 ao_ref_init (&ref
->mem
, mem
);
1454 bitmap_initialize (&ref
->indep_loop
, &lim_bitmap_obstack
);
1455 bitmap_initialize (&ref
->dep_loop
, &lim_bitmap_obstack
);
1456 ref
->accesses_in_loop
.create (1);
1461 /* Records memory reference location *LOC in LOOP to the memory reference
1462 description REF. The reference occurs in statement STMT. */
1465 record_mem_ref_loc (mem_ref_p ref
, gimple stmt
, tree
*loc
)
1470 ref
->accesses_in_loop
.safe_push (aref
);
1473 /* Set the LOOP bit in REF stored bitmap and allocate that if
1474 necessary. Return whether a bit was changed. */
1477 set_ref_stored_in_loop (mem_ref_p ref
, struct loop
*loop
)
1480 ref
->stored
= BITMAP_ALLOC (&lim_bitmap_obstack
);
1481 return bitmap_set_bit (ref
->stored
, loop
->num
);
1484 /* Marks reference REF as stored in LOOP. */
1487 mark_ref_stored (mem_ref_p ref
, struct loop
*loop
)
1489 while (loop
!= current_loops
->tree_root
1490 && set_ref_stored_in_loop (ref
, loop
))
1491 loop
= loop_outer (loop
);
1494 /* Gathers memory references in statement STMT in LOOP, storing the
1495 information about them in the memory_accesses structure. Marks
1496 the vops accessed through unrecognized statements there as
1500 gather_mem_refs_stmt (struct loop
*loop
, gimple stmt
)
1509 if (!gimple_vuse (stmt
))
1512 mem
= simple_mem_ref_in_stmt (stmt
, &is_stored
);
1515 /* We use the shared mem_ref for all unanalyzable refs. */
1516 id
= UNANALYZABLE_MEM_ID
;
1517 ref
= memory_accesses
.refs_list
[id
];
1518 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1520 fprintf (dump_file
, "Unanalyzed memory reference %u: ", id
);
1521 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
1523 is_stored
= gimple_vdef (stmt
);
1527 hash
= iterative_hash_expr (*mem
, 0);
1528 slot
= memory_accesses
.refs
->find_slot_with_hash (*mem
, hash
, INSERT
);
1531 ref
= (mem_ref_p
) *slot
;
1536 id
= memory_accesses
.refs_list
.length ();
1537 ref
= mem_ref_alloc (*mem
, hash
, id
);
1538 memory_accesses
.refs_list
.safe_push (ref
);
1541 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1543 fprintf (dump_file
, "Memory reference %u: ", id
);
1544 print_generic_expr (dump_file
, ref
->mem
.ref
, TDF_SLIM
);
1545 fprintf (dump_file
, "\n");
1549 record_mem_ref_loc (ref
, stmt
, mem
);
1551 bitmap_set_bit (&memory_accesses
.refs_in_loop
[loop
->num
], ref
->id
);
1554 bitmap_set_bit (&memory_accesses
.refs_stored_in_loop
[loop
->num
], ref
->id
);
1555 mark_ref_stored (ref
, loop
);
1560 static unsigned *bb_loop_postorder
;
1562 /* qsort sort function to sort blocks after their loop fathers postorder. */
1565 sort_bbs_in_loop_postorder_cmp (const void *bb1_
, const void *bb2_
)
1567 basic_block bb1
= *(basic_block
*)const_cast<void *>(bb1_
);
1568 basic_block bb2
= *(basic_block
*)const_cast<void *>(bb2_
);
1569 struct loop
*loop1
= bb1
->loop_father
;
1570 struct loop
*loop2
= bb2
->loop_father
;
1571 if (loop1
->num
== loop2
->num
)
1573 return bb_loop_postorder
[loop1
->num
] < bb_loop_postorder
[loop2
->num
] ? -1 : 1;
1576 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1579 sort_locs_in_loop_postorder_cmp (const void *loc1_
, const void *loc2_
)
1581 mem_ref_loc
*loc1
= (mem_ref_loc
*)const_cast<void *>(loc1_
);
1582 mem_ref_loc
*loc2
= (mem_ref_loc
*)const_cast<void *>(loc2_
);
1583 struct loop
*loop1
= gimple_bb (loc1
->stmt
)->loop_father
;
1584 struct loop
*loop2
= gimple_bb (loc2
->stmt
)->loop_father
;
1585 if (loop1
->num
== loop2
->num
)
1587 return bb_loop_postorder
[loop1
->num
] < bb_loop_postorder
[loop2
->num
] ? -1 : 1;
1590 /* Gathers memory references in loops. */
1593 analyze_memory_references (void)
1595 gimple_stmt_iterator bsi
;
1596 basic_block bb
, *bbs
;
1597 struct loop
*loop
, *outer
;
1600 /* Collect all basic-blocks in loops and sort them after their
1603 bbs
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (cfun
) - NUM_FIXED_BLOCKS
);
1604 FOR_EACH_BB_FN (bb
, cfun
)
1605 if (bb
->loop_father
!= current_loops
->tree_root
)
1608 qsort (bbs
, n
, sizeof (basic_block
), sort_bbs_in_loop_postorder_cmp
);
1610 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1611 That results in better locality for all the bitmaps. */
1612 for (i
= 0; i
< n
; ++i
)
1614 basic_block bb
= bbs
[i
];
1615 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1616 gather_mem_refs_stmt (bb
->loop_father
, gsi_stmt (bsi
));
1619 /* Sort the location list of gathered memory references after their
1620 loop postorder number. */
1622 FOR_EACH_VEC_ELT (memory_accesses
.refs_list
, i
, ref
)
1623 ref
->accesses_in_loop
.qsort (sort_locs_in_loop_postorder_cmp
);
1626 // free (bb_loop_postorder);
1628 /* Propagate the information about accessed memory references up
1629 the loop hierarchy. */
1630 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
1632 /* Finalize the overall touched references (including subloops). */
1633 bitmap_ior_into (&memory_accesses
.all_refs_stored_in_loop
[loop
->num
],
1634 &memory_accesses
.refs_stored_in_loop
[loop
->num
]);
1636 /* Propagate the information about accessed memory references up
1637 the loop hierarchy. */
1638 outer
= loop_outer (loop
);
1639 if (outer
== current_loops
->tree_root
)
1642 bitmap_ior_into (&memory_accesses
.all_refs_stored_in_loop
[outer
->num
],
1643 &memory_accesses
.all_refs_stored_in_loop
[loop
->num
]);
1647 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1648 tree_to_aff_combination_expand. */
1651 mem_refs_may_alias_p (mem_ref_p mem1
, mem_ref_p mem2
,
1652 hash_map
<tree
, name_expansion
*> **ttae_cache
)
1654 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1655 object and their offset differ in such a way that the locations cannot
1656 overlap, then they cannot alias. */
1657 widest_int size1
, size2
;
1658 aff_tree off1
, off2
;
1660 /* Perform basic offset and type-based disambiguation. */
1661 if (!refs_may_alias_p_1 (&mem1
->mem
, &mem2
->mem
, true))
1664 /* The expansion of addresses may be a bit expensive, thus we only do
1665 the check at -O2 and higher optimization levels. */
1669 get_inner_reference_aff (mem1
->mem
.ref
, &off1
, &size1
);
1670 get_inner_reference_aff (mem2
->mem
.ref
, &off2
, &size2
);
1671 aff_combination_expand (&off1
, ttae_cache
);
1672 aff_combination_expand (&off2
, ttae_cache
);
1673 aff_combination_scale (&off1
, -1);
1674 aff_combination_add (&off2
, &off1
);
1676 if (aff_comb_cannot_overlap_p (&off2
, size1
, size2
))
1682 /* Compare function for bsearch searching for reference locations
1686 find_ref_loc_in_loop_cmp (const void *loop_
, const void *loc_
)
1688 struct loop
*loop
= (struct loop
*)const_cast<void *>(loop_
);
1689 mem_ref_loc
*loc
= (mem_ref_loc
*)const_cast<void *>(loc_
);
1690 struct loop
*loc_loop
= gimple_bb (loc
->stmt
)->loop_father
;
1691 if (loop
->num
== loc_loop
->num
1692 || flow_loop_nested_p (loop
, loc_loop
))
1694 return (bb_loop_postorder
[loop
->num
] < bb_loop_postorder
[loc_loop
->num
]
1698 /* Iterates over all locations of REF in LOOP and its subloops calling
1699 fn.operator() with the location as argument. When that operator
1700 returns true the iteration is stopped and true is returned.
1701 Otherwise false is returned. */
1703 template <typename FN
>
1705 for_all_locs_in_loop (struct loop
*loop
, mem_ref_p ref
, FN fn
)
1710 /* Search for the cluster of locs in the accesses_in_loop vector
1711 which is sorted after postorder index of the loop father. */
1712 loc
= ref
->accesses_in_loop
.bsearch (loop
, find_ref_loc_in_loop_cmp
);
1716 /* We have found one location inside loop or its sub-loops. Iterate
1717 both forward and backward to cover the whole cluster. */
1718 i
= loc
- ref
->accesses_in_loop
.address ();
1722 mem_ref_loc_p l
= &ref
->accesses_in_loop
[i
];
1723 if (!flow_bb_inside_loop_p (loop
, gimple_bb (l
->stmt
)))
1728 for (i
= loc
- ref
->accesses_in_loop
.address ();
1729 i
< ref
->accesses_in_loop
.length (); ++i
)
1731 mem_ref_loc_p l
= &ref
->accesses_in_loop
[i
];
1732 if (!flow_bb_inside_loop_p (loop
, gimple_bb (l
->stmt
)))
1741 /* Rewrites location LOC by TMP_VAR. */
1743 struct rewrite_mem_ref_loc
1745 rewrite_mem_ref_loc (tree tmp_var_
) : tmp_var (tmp_var_
) {}
1746 bool operator () (mem_ref_loc_p loc
);
1751 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc
)
1753 *loc
->ref
= tmp_var
;
1754 update_stmt (loc
->stmt
);
1758 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1761 rewrite_mem_refs (struct loop
*loop
, mem_ref_p ref
, tree tmp_var
)
1763 for_all_locs_in_loop (loop
, ref
, rewrite_mem_ref_loc (tmp_var
));
1766 /* Stores the first reference location in LOCP. */
1768 struct first_mem_ref_loc_1
1770 first_mem_ref_loc_1 (mem_ref_loc_p
*locp_
) : locp (locp_
) {}
1771 bool operator () (mem_ref_loc_p loc
);
1772 mem_ref_loc_p
*locp
;
1776 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc
)
1782 /* Returns the first reference location to REF in LOOP. */
1784 static mem_ref_loc_p
1785 first_mem_ref_loc (struct loop
*loop
, mem_ref_p ref
)
1787 mem_ref_loc_p locp
= NULL
;
1788 for_all_locs_in_loop (loop
, ref
, first_mem_ref_loc_1 (&locp
));
1792 struct prev_flag_edges
{
1793 /* Edge to insert new flag comparison code. */
1794 edge append_cond_position
;
1796 /* Edge for fall through from previous flag comparison. */
1797 edge last_cond_fallthru
;
1800 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1803 The store is only done if MEM has changed. We do this so no
1804 changes to MEM occur on code paths that did not originally store
1807 The common case for execute_sm will transform:
1827 This function will generate:
1846 execute_sm_if_changed (edge ex
, tree mem
, tree tmp_var
, tree flag
)
1848 basic_block new_bb
, then_bb
, old_dest
;
1849 bool loop_has_only_one_exit
;
1850 edge then_old_edge
, orig_ex
= ex
;
1851 gimple_stmt_iterator gsi
;
1853 struct prev_flag_edges
*prev_edges
= (struct prev_flag_edges
*) ex
->aux
;
1854 bool irr
= ex
->flags
& EDGE_IRREDUCIBLE_LOOP
;
1856 /* ?? Insert store after previous store if applicable. See note
1859 ex
= prev_edges
->append_cond_position
;
1861 loop_has_only_one_exit
= single_pred_p (ex
->dest
);
1863 if (loop_has_only_one_exit
)
1864 ex
= split_block_after_labels (ex
->dest
);
1866 old_dest
= ex
->dest
;
1867 new_bb
= split_edge (ex
);
1868 then_bb
= create_empty_bb (new_bb
);
1870 then_bb
->flags
= BB_IRREDUCIBLE_LOOP
;
1871 add_bb_to_loop (then_bb
, new_bb
->loop_father
);
1873 gsi
= gsi_start_bb (new_bb
);
1874 stmt
= gimple_build_cond (NE_EXPR
, flag
, boolean_false_node
,
1875 NULL_TREE
, NULL_TREE
);
1876 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1878 gsi
= gsi_start_bb (then_bb
);
1879 /* Insert actual store. */
1880 stmt
= gimple_build_assign (unshare_expr (mem
), tmp_var
);
1881 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1883 make_edge (new_bb
, then_bb
,
1884 EDGE_TRUE_VALUE
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0));
1885 make_edge (new_bb
, old_dest
,
1886 EDGE_FALSE_VALUE
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0));
1887 then_old_edge
= make_edge (then_bb
, old_dest
,
1888 EDGE_FALLTHRU
| (irr
? EDGE_IRREDUCIBLE_LOOP
: 0));
1890 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, new_bb
);
1894 basic_block prevbb
= prev_edges
->last_cond_fallthru
->src
;
1895 redirect_edge_succ (prev_edges
->last_cond_fallthru
, new_bb
);
1896 set_immediate_dominator (CDI_DOMINATORS
, new_bb
, prevbb
);
1897 set_immediate_dominator (CDI_DOMINATORS
, old_dest
,
1898 recompute_dominator (CDI_DOMINATORS
, old_dest
));
1901 /* ?? Because stores may alias, they must happen in the exact
1902 sequence they originally happened. Save the position right after
1903 the (_lsm) store we just created so we can continue appending after
1904 it and maintain the original order. */
1906 struct prev_flag_edges
*p
;
1909 orig_ex
->aux
= NULL
;
1910 alloc_aux_for_edge (orig_ex
, sizeof (struct prev_flag_edges
));
1911 p
= (struct prev_flag_edges
*) orig_ex
->aux
;
1912 p
->append_cond_position
= then_old_edge
;
1913 p
->last_cond_fallthru
= find_edge (new_bb
, old_dest
);
1914 orig_ex
->aux
= (void *) p
;
1917 if (!loop_has_only_one_exit
)
1918 for (gphi_iterator gpi
= gsi_start_phis (old_dest
);
1919 !gsi_end_p (gpi
); gsi_next (&gpi
))
1921 gphi
*phi
= gpi
.phi ();
1924 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1925 if (gimple_phi_arg_edge (phi
, i
)->src
== new_bb
)
1927 tree arg
= gimple_phi_arg_def (phi
, i
);
1928 add_phi_arg (phi
, arg
, then_old_edge
, UNKNOWN_LOCATION
);
1932 /* Remove the original fall through edge. This was the
1933 single_succ_edge (new_bb). */
1934 EDGE_SUCC (new_bb
, 0)->flags
&= ~EDGE_FALLTHRU
;
1937 /* When REF is set on the location, set flag indicating the store. */
1939 struct sm_set_flag_if_changed
1941 sm_set_flag_if_changed (tree flag_
) : flag (flag_
) {}
1942 bool operator () (mem_ref_loc_p loc
);
1947 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc
)
1949 /* Only set the flag for writes. */
1950 if (is_gimple_assign (loc
->stmt
)
1951 && gimple_assign_lhs_ptr (loc
->stmt
) == loc
->ref
)
1953 gimple_stmt_iterator gsi
= gsi_for_stmt (loc
->stmt
);
1954 gimple stmt
= gimple_build_assign (flag
, boolean_true_node
);
1955 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1960 /* Helper function for execute_sm. On every location where REF is
1961 set, set an appropriate flag indicating the store. */
1964 execute_sm_if_changed_flag_set (struct loop
*loop
, mem_ref_p ref
)
1967 char *str
= get_lsm_tmp_name (ref
->mem
.ref
, ~0, "_flag");
1968 flag
= create_tmp_reg (boolean_type_node
, str
);
1969 for_all_locs_in_loop (loop
, ref
, sm_set_flag_if_changed (flag
));
1973 /* Executes store motion of memory reference REF from LOOP.
1974 Exits from the LOOP are stored in EXITS. The initialization of the
1975 temporary variable is put to the preheader of the loop, and assignments
1976 to the reference from the temporary variable are emitted to exits. */
1979 execute_sm (struct loop
*loop
, vec
<edge
> exits
, mem_ref_p ref
)
1981 tree tmp_var
, store_flag
= NULL_TREE
;
1984 struct fmt_data fmt_data
;
1986 struct lim_aux_data
*lim_data
;
1987 bool multi_threaded_model_p
= false;
1988 gimple_stmt_iterator gsi
;
1990 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1992 fprintf (dump_file
, "Executing store motion of ");
1993 print_generic_expr (dump_file
, ref
->mem
.ref
, 0);
1994 fprintf (dump_file
, " from loop %d\n", loop
->num
);
1997 tmp_var
= create_tmp_reg (TREE_TYPE (ref
->mem
.ref
),
1998 get_lsm_tmp_name (ref
->mem
.ref
, ~0));
2000 fmt_data
.loop
= loop
;
2001 fmt_data
.orig_loop
= loop
;
2002 for_each_index (&ref
->mem
.ref
, force_move_till
, &fmt_data
);
2004 if (bb_in_transaction (loop_preheader_edge (loop
)->src
)
2005 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES
))
2006 multi_threaded_model_p
= true;
2008 if (multi_threaded_model_p
)
2009 store_flag
= execute_sm_if_changed_flag_set (loop
, ref
);
2011 rewrite_mem_refs (loop
, ref
, tmp_var
);
2013 /* Emit the load code on a random exit edge or into the latch if
2014 the loop does not exit, so that we are sure it will be processed
2015 by move_computations after all dependencies. */
2016 gsi
= gsi_for_stmt (first_mem_ref_loc (loop
, ref
)->stmt
);
2018 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2019 load altogether, since the store is predicated by a flag. We
2020 could, do the load only if it was originally in the loop. */
2021 load
= gimple_build_assign (tmp_var
, unshare_expr (ref
->mem
.ref
));
2022 lim_data
= init_lim_data (load
);
2023 lim_data
->max_loop
= loop
;
2024 lim_data
->tgt_loop
= loop
;
2025 gsi_insert_before (&gsi
, load
, GSI_SAME_STMT
);
2027 if (multi_threaded_model_p
)
2029 load
= gimple_build_assign (store_flag
, boolean_false_node
);
2030 lim_data
= init_lim_data (load
);
2031 lim_data
->max_loop
= loop
;
2032 lim_data
->tgt_loop
= loop
;
2033 gsi_insert_before (&gsi
, load
, GSI_SAME_STMT
);
2036 /* Sink the store to every exit from the loop. */
2037 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2038 if (!multi_threaded_model_p
)
2041 store
= gimple_build_assign (unshare_expr (ref
->mem
.ref
), tmp_var
);
2042 gsi_insert_on_edge (ex
, store
);
2045 execute_sm_if_changed (ex
, ref
->mem
.ref
, tmp_var
, store_flag
);
2048 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2049 edges of the LOOP. */
2052 hoist_memory_references (struct loop
*loop
, bitmap mem_refs
,
2059 EXECUTE_IF_SET_IN_BITMAP (mem_refs
, 0, i
, bi
)
2061 ref
= memory_accesses
.refs_list
[i
];
2062 execute_sm (loop
, exits
, ref
);
2066 struct ref_always_accessed
2068 ref_always_accessed (struct loop
*loop_
, bool stored_p_
)
2069 : loop (loop_
), stored_p (stored_p_
) {}
2070 bool operator () (mem_ref_loc_p loc
);
2076 ref_always_accessed::operator () (mem_ref_loc_p loc
)
2078 struct loop
*must_exec
;
2080 if (!get_lim_data (loc
->stmt
))
2083 /* If we require an always executed store make sure the statement
2084 stores to the reference. */
2087 tree lhs
= gimple_get_lhs (loc
->stmt
);
2089 || lhs
!= *loc
->ref
)
2093 must_exec
= get_lim_data (loc
->stmt
)->always_executed_in
;
2097 if (must_exec
== loop
2098 || flow_loop_nested_p (must_exec
, loop
))
2104 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2105 make sure REF is always stored to in LOOP. */
2108 ref_always_accessed_p (struct loop
*loop
, mem_ref_p ref
, bool stored_p
)
2110 return for_all_locs_in_loop (loop
, ref
,
2111 ref_always_accessed (loop
, stored_p
));
2114 /* Returns true if REF1 and REF2 are independent. */
2117 refs_independent_p (mem_ref_p ref1
, mem_ref_p ref2
)
2122 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2123 fprintf (dump_file
, "Querying dependency of refs %u and %u: ",
2124 ref1
->id
, ref2
->id
);
2126 if (mem_refs_may_alias_p (ref1
, ref2
, &memory_accesses
.ttae_cache
))
2128 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2129 fprintf (dump_file
, "dependent.\n");
2134 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2135 fprintf (dump_file
, "independent.\n");
2140 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2141 and its super-loops. */
2144 record_dep_loop (struct loop
*loop
, mem_ref_p ref
, bool stored_p
)
2146 /* We can propagate dependent-in-loop bits up the loop
2147 hierarchy to all outer loops. */
2148 while (loop
!= current_loops
->tree_root
2149 && bitmap_set_bit (&ref
->dep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2150 loop
= loop_outer (loop
);
2153 /* Returns true if REF is independent on all other memory references in
2157 ref_indep_loop_p_1 (struct loop
*loop
, mem_ref_p ref
, bool stored_p
)
2159 bitmap refs_to_check
;
2165 refs_to_check
= &memory_accesses
.refs_in_loop
[loop
->num
];
2167 refs_to_check
= &memory_accesses
.refs_stored_in_loop
[loop
->num
];
2169 if (bitmap_bit_p (refs_to_check
, UNANALYZABLE_MEM_ID
))
2172 EXECUTE_IF_SET_IN_BITMAP (refs_to_check
, 0, i
, bi
)
2174 aref
= memory_accesses
.refs_list
[i
];
2175 if (!refs_independent_p (ref
, aref
))
2182 /* Returns true if REF is independent on all other memory references in
2183 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2186 ref_indep_loop_p_2 (struct loop
*loop
, mem_ref_p ref
, bool stored_p
)
2188 stored_p
|= (ref
->stored
&& bitmap_bit_p (ref
->stored
, loop
->num
));
2190 if (bitmap_bit_p (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2192 if (bitmap_bit_p (&ref
->dep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
)))
2195 struct loop
*inner
= loop
->inner
;
2198 if (!ref_indep_loop_p_2 (inner
, ref
, stored_p
))
2200 inner
= inner
->next
;
2203 bool indep_p
= ref_indep_loop_p_1 (loop
, ref
, stored_p
);
2205 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2206 fprintf (dump_file
, "Querying dependencies of ref %u in loop %d: %s\n",
2207 ref
->id
, loop
->num
, indep_p
? "independent" : "dependent");
2209 /* Record the computed result in the cache. */
2212 if (bitmap_set_bit (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, stored_p
))
2215 /* If it's independend against all refs then it's independent
2216 against stores, too. */
2217 bitmap_set_bit (&ref
->indep_loop
, LOOP_DEP_BIT (loop
->num
, false));
2222 record_dep_loop (loop
, ref
, stored_p
);
2225 /* If it's dependent against stores it's dependent against
2227 record_dep_loop (loop
, ref
, true);
2234 /* Returns true if REF is independent on all other memory references in
2238 ref_indep_loop_p (struct loop
*loop
, mem_ref_p ref
)
2240 gcc_checking_assert (MEM_ANALYZABLE (ref
));
2242 return ref_indep_loop_p_2 (loop
, ref
, false);
2245 /* Returns true if we can perform store motion of REF from LOOP. */
2248 can_sm_ref_p (struct loop
*loop
, mem_ref_p ref
)
2252 /* Can't hoist unanalyzable refs. */
2253 if (!MEM_ANALYZABLE (ref
))
2256 /* It should be movable. */
2257 if (!is_gimple_reg_type (TREE_TYPE (ref
->mem
.ref
))
2258 || TREE_THIS_VOLATILE (ref
->mem
.ref
)
2259 || !for_each_index (&ref
->mem
.ref
, may_move_till
, loop
))
2262 /* If it can throw fail, we do not properly update EH info. */
2263 if (tree_could_throw_p (ref
->mem
.ref
))
2266 /* If it can trap, it must be always executed in LOOP.
2267 Readonly memory locations may trap when storing to them, but
2268 tree_could_trap_p is a predicate for rvalues, so check that
2270 base
= get_base_address (ref
->mem
.ref
);
2271 if ((tree_could_trap_p (ref
->mem
.ref
)
2272 || (DECL_P (base
) && TREE_READONLY (base
)))
2273 && !ref_always_accessed_p (loop
, ref
, true))
2276 /* And it must be independent on all other memory references
2278 if (!ref_indep_loop_p (loop
, ref
))
2284 /* Marks the references in LOOP for that store motion should be performed
2285 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2286 motion was performed in one of the outer loops. */
2289 find_refs_for_sm (struct loop
*loop
, bitmap sm_executed
, bitmap refs_to_sm
)
2291 bitmap refs
= &memory_accesses
.all_refs_stored_in_loop
[loop
->num
];
2296 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs
, sm_executed
, 0, i
, bi
)
2298 ref
= memory_accesses
.refs_list
[i
];
2299 if (can_sm_ref_p (loop
, ref
))
2300 bitmap_set_bit (refs_to_sm
, i
);
2304 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2305 for a store motion optimization (i.e. whether we can insert statement
2309 loop_suitable_for_sm (struct loop
*loop ATTRIBUTE_UNUSED
,
2315 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2316 if (ex
->flags
& (EDGE_ABNORMAL
| EDGE_EH
))
2322 /* Try to perform store motion for all memory references modified inside
2323 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2324 store motion was executed in one of the outer loops. */
2327 store_motion_loop (struct loop
*loop
, bitmap sm_executed
)
2329 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2330 struct loop
*subloop
;
2331 bitmap sm_in_loop
= BITMAP_ALLOC (&lim_bitmap_obstack
);
2333 if (loop_suitable_for_sm (loop
, exits
))
2335 find_refs_for_sm (loop
, sm_executed
, sm_in_loop
);
2336 hoist_memory_references (loop
, sm_in_loop
, exits
);
2340 bitmap_ior_into (sm_executed
, sm_in_loop
);
2341 for (subloop
= loop
->inner
; subloop
!= NULL
; subloop
= subloop
->next
)
2342 store_motion_loop (subloop
, sm_executed
);
2343 bitmap_and_compl_into (sm_executed
, sm_in_loop
);
2344 BITMAP_FREE (sm_in_loop
);
2347 /* Try to perform store motion for all memory references modified inside
2354 bitmap sm_executed
= BITMAP_ALLOC (&lim_bitmap_obstack
);
2356 for (loop
= current_loops
->tree_root
->inner
; loop
!= NULL
; loop
= loop
->next
)
2357 store_motion_loop (loop
, sm_executed
);
2359 BITMAP_FREE (sm_executed
);
2360 gsi_commit_edge_inserts ();
2363 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2364 for each such basic block bb records the outermost loop for that execution
2365 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2366 blocks that contain a nonpure call. */
2369 fill_always_executed_in_1 (struct loop
*loop
, sbitmap contains_call
)
2371 basic_block bb
= NULL
, *bbs
, last
= NULL
;
2374 struct loop
*inn_loop
= loop
;
2376 if (ALWAYS_EXECUTED_IN (loop
->header
) == NULL
)
2378 bbs
= get_loop_body_in_dom_order (loop
);
2380 for (i
= 0; i
< loop
->num_nodes
; i
++)
2385 if (dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
2388 if (bitmap_bit_p (contains_call
, bb
->index
))
2391 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2392 if (!flow_bb_inside_loop_p (loop
, e
->dest
))
2397 /* A loop might be infinite (TODO use simple loop analysis
2398 to disprove this if possible). */
2399 if (bb
->flags
& BB_IRREDUCIBLE_LOOP
)
2402 if (!flow_bb_inside_loop_p (inn_loop
, bb
))
2405 if (bb
->loop_father
->header
== bb
)
2407 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
))
2410 /* In a loop that is always entered we may proceed anyway.
2411 But record that we entered it and stop once we leave it. */
2412 inn_loop
= bb
->loop_father
;
2418 SET_ALWAYS_EXECUTED_IN (last
, loop
);
2419 if (last
== loop
->header
)
2421 last
= get_immediate_dominator (CDI_DOMINATORS
, last
);
2427 for (loop
= loop
->inner
; loop
; loop
= loop
->next
)
2428 fill_always_executed_in_1 (loop
, contains_call
);
2431 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2432 for each such basic block bb records the outermost loop for that execution
2433 of its header implies execution of bb. */
2436 fill_always_executed_in (void)
2438 sbitmap contains_call
= sbitmap_alloc (last_basic_block_for_fn (cfun
));
2442 bitmap_clear (contains_call
);
2443 FOR_EACH_BB_FN (bb
, cfun
)
2445 gimple_stmt_iterator gsi
;
2446 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2448 if (nonpure_call_p (gsi_stmt (gsi
)))
2452 if (!gsi_end_p (gsi
))
2453 bitmap_set_bit (contains_call
, bb
->index
);
2456 for (loop
= current_loops
->tree_root
->inner
; loop
; loop
= loop
->next
)
2457 fill_always_executed_in_1 (loop
, contains_call
);
2459 sbitmap_free (contains_call
);
2463 /* Compute the global information needed by the loop invariant motion pass. */
2466 tree_ssa_lim_initialize (void)
2471 bitmap_obstack_initialize (&lim_bitmap_obstack
);
2472 gcc_obstack_init (&mem_ref_obstack
);
2473 lim_aux_data_map
= new hash_map
<gimple
, lim_aux_data
*>;
2476 compute_transaction_bits ();
2478 alloc_aux_for_edges (0);
2480 memory_accesses
.refs
= new hash_table
<mem_ref_hasher
> (100);
2481 memory_accesses
.refs_list
.create (100);
2482 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2483 memory_accesses
.refs_list
.quick_push
2484 (mem_ref_alloc (error_mark_node
, 0, UNANALYZABLE_MEM_ID
));
2486 memory_accesses
.refs_in_loop
.create (number_of_loops (cfun
));
2487 memory_accesses
.refs_in_loop
.quick_grow (number_of_loops (cfun
));
2488 memory_accesses
.refs_stored_in_loop
.create (number_of_loops (cfun
));
2489 memory_accesses
.refs_stored_in_loop
.quick_grow (number_of_loops (cfun
));
2490 memory_accesses
.all_refs_stored_in_loop
.create (number_of_loops (cfun
));
2491 memory_accesses
.all_refs_stored_in_loop
.quick_grow (number_of_loops (cfun
));
2493 for (i
= 0; i
< number_of_loops (cfun
); i
++)
2495 bitmap_initialize (&memory_accesses
.refs_in_loop
[i
],
2496 &lim_bitmap_obstack
);
2497 bitmap_initialize (&memory_accesses
.refs_stored_in_loop
[i
],
2498 &lim_bitmap_obstack
);
2499 bitmap_initialize (&memory_accesses
.all_refs_stored_in_loop
[i
],
2500 &lim_bitmap_obstack
);
2503 memory_accesses
.ttae_cache
= NULL
;
2505 /* Initialize bb_loop_postorder with a mapping from loop->num to
2506 its postorder index. */
2508 bb_loop_postorder
= XNEWVEC (unsigned, number_of_loops (cfun
));
2509 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2510 bb_loop_postorder
[loop
->num
] = i
++;
2513 /* Cleans up after the invariant motion pass. */
2516 tree_ssa_lim_finalize (void)
2522 free_aux_for_edges ();
2524 FOR_EACH_BB_FN (bb
, cfun
)
2525 SET_ALWAYS_EXECUTED_IN (bb
, NULL
);
2527 bitmap_obstack_release (&lim_bitmap_obstack
);
2528 delete lim_aux_data_map
;
2530 delete memory_accesses
.refs
;
2531 memory_accesses
.refs
= NULL
;
2533 FOR_EACH_VEC_ELT (memory_accesses
.refs_list
, i
, ref
)
2535 memory_accesses
.refs_list
.release ();
2536 obstack_free (&mem_ref_obstack
, NULL
);
2538 memory_accesses
.refs_in_loop
.release ();
2539 memory_accesses
.refs_stored_in_loop
.release ();
2540 memory_accesses
.all_refs_stored_in_loop
.release ();
2542 if (memory_accesses
.ttae_cache
)
2543 free_affine_expand_cache (&memory_accesses
.ttae_cache
);
2545 free (bb_loop_postorder
);
2548 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2549 i.e. those that are likely to be win regardless of the register pressure. */
2556 tree_ssa_lim_initialize ();
2558 /* Gathers information about memory accesses in the loops. */
2559 analyze_memory_references ();
2561 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2562 fill_always_executed_in ();
2564 /* For each statement determine the outermost loop in that it is
2565 invariant and cost for computing the invariant. */
2566 invariantness_dom_walker (CDI_DOMINATORS
)
2567 .walk (cfun
->cfg
->x_entry_block_ptr
);
2569 /* Execute store motion. Force the necessary invariants to be moved
2570 out of the loops as well. */
2573 /* Move the expressions that are expensive enough. */
2574 todo
= move_computations ();
2576 tree_ssa_lim_finalize ();
2581 /* Loop invariant motion pass. */
2585 const pass_data pass_data_lim
=
2587 GIMPLE_PASS
, /* type */
2589 OPTGROUP_LOOP
, /* optinfo_flags */
2591 PROP_cfg
, /* properties_required */
2592 0, /* properties_provided */
2593 0, /* properties_destroyed */
2594 0, /* todo_flags_start */
2595 0, /* todo_flags_finish */
2598 class pass_lim
: public gimple_opt_pass
2601 pass_lim (gcc::context
*ctxt
)
2602 : gimple_opt_pass (pass_data_lim
, ctxt
)
2605 /* opt_pass methods: */
2606 opt_pass
* clone () { return new pass_lim (m_ctxt
); }
2607 virtual bool gate (function
*) { return flag_tree_loop_im
!= 0; }
2608 virtual unsigned int execute (function
*);
2610 }; // class pass_lim
2613 pass_lim::execute (function
*fun
)
2615 if (number_of_loops (fun
) <= 1)
2618 return tree_ssa_lim ();
2624 make_pass_lim (gcc::context
*ctxt
)
2626 return new pass_lim (ctxt
);