2013-10-11 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob84f50cdb92d357e4e91781c34ca69ebf0d2cc10e
1 /* Loop invariant motion.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "tree-ssa.h"
29 #include "cfgloop.h"
30 #include "domwalk.h"
31 #include "params.h"
32 #include "tree-pass.h"
33 #include "flags.h"
34 #include "hash-table.h"
35 #include "tree-affine.h"
36 #include "pointer-set.h"
37 #include "tree-ssa-propagate.h"
39 /* TODO: Support for predicated code motion. I.e.
41 while (1)
43 if (cond)
45 a = inv;
46 something;
50 Where COND and INV are invariants, but evaluating INV may trap or be
51 invalid from some other reason if !COND. This may be transformed to
53 if (cond)
54 a = inv;
55 while (1)
57 if (cond)
58 something;
59 } */
61 /* The auxiliary data kept for each statement. */
63 struct lim_aux_data
65 struct loop *max_loop; /* The outermost loop in that the statement
66 is invariant. */
68 struct loop *tgt_loop; /* The loop out of that we want to move the
69 invariant. */
71 struct loop *always_executed_in;
72 /* The outermost loop for that we are sure
73 the statement is executed if the loop
74 is entered. */
76 unsigned cost; /* Cost of the computation performed by the
77 statement. */
79 vec<gimple> depends; /* Vector of statements that must be also
80 hoisted out of the loop when this statement
81 is hoisted; i.e. those that define the
82 operands of the statement and are inside of
83 the MAX_LOOP loop. */
86 /* Maps statements to their lim_aux_data. */
88 static struct pointer_map_t *lim_aux_data_map;
90 /* Description of a memory reference location. */
92 typedef struct mem_ref_loc
94 tree *ref; /* The reference itself. */
95 gimple stmt; /* The statement in that it occurs. */
96 } *mem_ref_loc_p;
99 /* Description of a memory reference. */
101 typedef struct mem_ref
103 unsigned id; /* ID assigned to the memory reference
104 (its index in memory_accesses.refs_list) */
105 hashval_t hash; /* Its hash value. */
107 /* The memory access itself and associated caching of alias-oracle
108 query meta-data. */
109 ao_ref mem;
111 bitmap_head stored; /* The set of loops in that this memory location
112 is stored to. */
113 vec<vec<mem_ref_loc> > accesses_in_loop;
114 /* The locations of the accesses. Vector
115 indexed by the loop number. */
117 /* The following sets are computed on demand. We keep both set and
118 its complement, so that we know whether the information was
119 already computed or not. */
120 bitmap_head indep_loop; /* The set of loops in that the memory
121 reference is independent, meaning:
122 If it is stored in the loop, this store
123 is independent on all other loads and
124 stores.
125 If it is only loaded, then it is independent
126 on all stores in the loop. */
127 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
128 } *mem_ref_p;
130 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
131 to record (in)dependence against stores in the loop and its subloops, the
132 second to record (in)dependence against all references in the loop
133 and its subloops. */
134 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
136 /* Mem_ref hashtable helpers. */
138 struct mem_ref_hasher : typed_noop_remove <mem_ref>
140 typedef mem_ref value_type;
141 typedef tree_node compare_type;
142 static inline hashval_t hash (const value_type *);
143 static inline bool equal (const value_type *, const compare_type *);
146 /* A hash function for struct mem_ref object OBJ. */
148 inline hashval_t
149 mem_ref_hasher::hash (const value_type *mem)
151 return mem->hash;
154 /* An equality function for struct mem_ref object MEM1 with
155 memory reference OBJ2. */
157 inline bool
158 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
160 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
164 /* Description of memory accesses in loops. */
166 static struct
168 /* The hash table of memory references accessed in loops. */
169 hash_table <mem_ref_hasher> refs;
171 /* The list of memory references. */
172 vec<mem_ref_p> refs_list;
174 /* The set of memory references accessed in each loop. */
175 vec<bitmap_head> refs_in_loop;
177 /* The set of memory references stored in each loop. */
178 vec<bitmap_head> refs_stored_in_loop;
180 /* The set of memory references stored in each loop, including subloops . */
181 vec<bitmap_head> all_refs_stored_in_loop;
183 /* Cache for expanding memory addresses. */
184 struct pointer_map_t *ttae_cache;
185 } memory_accesses;
187 /* Obstack for the bitmaps in the above data structures. */
188 static bitmap_obstack lim_bitmap_obstack;
190 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
192 /* Minimum cost of an expensive expression. */
193 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
195 /* The outermost loop for which execution of the header guarantees that the
196 block will be executed. */
197 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
198 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
200 /* ID of the shared unanalyzable mem. */
201 #define UNANALYZABLE_MEM_ID 0
203 /* Whether the reference was analyzable. */
204 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
206 static struct lim_aux_data *
207 init_lim_data (gimple stmt)
209 void **p = pointer_map_insert (lim_aux_data_map, stmt);
211 *p = XCNEW (struct lim_aux_data);
212 return (struct lim_aux_data *) *p;
215 static struct lim_aux_data *
216 get_lim_data (gimple stmt)
218 void **p = pointer_map_contains (lim_aux_data_map, stmt);
219 if (!p)
220 return NULL;
222 return (struct lim_aux_data *) *p;
225 /* Releases the memory occupied by DATA. */
227 static void
228 free_lim_aux_data (struct lim_aux_data *data)
230 data->depends.release ();
231 free (data);
234 static void
235 clear_lim_data (gimple stmt)
237 void **p = pointer_map_contains (lim_aux_data_map, stmt);
238 if (!p)
239 return;
241 free_lim_aux_data ((struct lim_aux_data *) *p);
242 *p = NULL;
246 /* The possibilities of statement movement. */
247 enum move_pos
249 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
250 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
251 become executed -- memory accesses, ... */
252 MOVE_POSSIBLE /* Unlimited movement. */
256 /* If it is possible to hoist the statement STMT unconditionally,
257 returns MOVE_POSSIBLE.
258 If it is possible to hoist the statement STMT, but we must avoid making
259 it executed if it would not be executed in the original program (e.g.
260 because it may trap), return MOVE_PRESERVE_EXECUTION.
261 Otherwise return MOVE_IMPOSSIBLE. */
263 enum move_pos
264 movement_possibility (gimple stmt)
266 tree lhs;
267 enum move_pos ret = MOVE_POSSIBLE;
269 if (flag_unswitch_loops
270 && gimple_code (stmt) == GIMPLE_COND)
272 /* If we perform unswitching, force the operands of the invariant
273 condition to be moved out of the loop. */
274 return MOVE_POSSIBLE;
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && gimple_phi_num_args (stmt) <= 2
279 && !virtual_operand_p (gimple_phi_result (stmt))
280 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
281 return MOVE_POSSIBLE;
283 if (gimple_get_lhs (stmt) == NULL_TREE)
284 return MOVE_IMPOSSIBLE;
286 if (gimple_vdef (stmt))
287 return MOVE_IMPOSSIBLE;
289 if (stmt_ends_bb_p (stmt)
290 || gimple_has_volatile_ops (stmt)
291 || gimple_has_side_effects (stmt)
292 || stmt_could_throw_p (stmt))
293 return MOVE_IMPOSSIBLE;
295 if (is_gimple_call (stmt))
297 /* While pure or const call is guaranteed to have no side effects, we
298 cannot move it arbitrarily. Consider code like
300 char *s = something ();
302 while (1)
304 if (s)
305 t = strlen (s);
306 else
307 t = 0;
310 Here the strlen call cannot be moved out of the loop, even though
311 s is invariant. In addition to possibly creating a call with
312 invalid arguments, moving out a function call that is not executed
313 may cause performance regressions in case the call is costly and
314 not executed at all. */
315 ret = MOVE_PRESERVE_EXECUTION;
316 lhs = gimple_call_lhs (stmt);
318 else if (is_gimple_assign (stmt))
319 lhs = gimple_assign_lhs (stmt);
320 else
321 return MOVE_IMPOSSIBLE;
323 if (TREE_CODE (lhs) == SSA_NAME
324 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
325 return MOVE_IMPOSSIBLE;
327 if (TREE_CODE (lhs) != SSA_NAME
328 || gimple_could_trap_p (stmt))
329 return MOVE_PRESERVE_EXECUTION;
331 /* Non local loads in a transaction cannot be hoisted out. Well,
332 unless the load happens on every path out of the loop, but we
333 don't take this into account yet. */
334 if (flag_tm
335 && gimple_in_transaction (stmt)
336 && gimple_assign_single_p (stmt))
338 tree rhs = gimple_assign_rhs1 (stmt);
339 if (DECL_P (rhs) && is_global_var (rhs))
341 if (dump_file)
343 fprintf (dump_file, "Cannot hoist conditional load of ");
344 print_generic_expr (dump_file, rhs, TDF_SLIM);
345 fprintf (dump_file, " because it is in a transaction.\n");
347 return MOVE_IMPOSSIBLE;
351 return ret;
354 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
355 loop to that we could move the expression using DEF if it did not have
356 other operands, i.e. the outermost loop enclosing LOOP in that the value
357 of DEF is invariant. */
359 static struct loop *
360 outermost_invariant_loop (tree def, struct loop *loop)
362 gimple def_stmt;
363 basic_block def_bb;
364 struct loop *max_loop;
365 struct lim_aux_data *lim_data;
367 if (!def)
368 return superloop_at_depth (loop, 1);
370 if (TREE_CODE (def) != SSA_NAME)
372 gcc_assert (is_gimple_min_invariant (def));
373 return superloop_at_depth (loop, 1);
376 def_stmt = SSA_NAME_DEF_STMT (def);
377 def_bb = gimple_bb (def_stmt);
378 if (!def_bb)
379 return superloop_at_depth (loop, 1);
381 max_loop = find_common_loop (loop, def_bb->loop_father);
383 lim_data = get_lim_data (def_stmt);
384 if (lim_data != NULL && lim_data->max_loop != NULL)
385 max_loop = find_common_loop (max_loop,
386 loop_outer (lim_data->max_loop));
387 if (max_loop == loop)
388 return NULL;
389 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
391 return max_loop;
394 /* DATA is a structure containing information associated with a statement
395 inside LOOP. DEF is one of the operands of this statement.
397 Find the outermost loop enclosing LOOP in that value of DEF is invariant
398 and record this in DATA->max_loop field. If DEF itself is defined inside
399 this loop as well (i.e. we need to hoist it out of the loop if we want
400 to hoist the statement represented by DATA), record the statement in that
401 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
402 add the cost of the computation of DEF to the DATA->cost.
404 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
406 static bool
407 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
408 bool add_cost)
410 gimple def_stmt = SSA_NAME_DEF_STMT (def);
411 basic_block def_bb = gimple_bb (def_stmt);
412 struct loop *max_loop;
413 struct lim_aux_data *def_data;
415 if (!def_bb)
416 return true;
418 max_loop = outermost_invariant_loop (def, loop);
419 if (!max_loop)
420 return false;
422 if (flow_loop_nested_p (data->max_loop, max_loop))
423 data->max_loop = max_loop;
425 def_data = get_lim_data (def_stmt);
426 if (!def_data)
427 return true;
429 if (add_cost
430 /* Only add the cost if the statement defining DEF is inside LOOP,
431 i.e. if it is likely that by moving the invariants dependent
432 on it, we will be able to avoid creating a new register for
433 it (since it will be only used in these dependent invariants). */
434 && def_bb->loop_father == loop)
435 data->cost += def_data->cost;
437 data->depends.safe_push (def_stmt);
439 return true;
442 /* Returns an estimate for a cost of statement STMT. The values here
443 are just ad-hoc constants, similar to costs for inlining. */
445 static unsigned
446 stmt_cost (gimple stmt)
448 /* Always try to create possibilities for unswitching. */
449 if (gimple_code (stmt) == GIMPLE_COND
450 || gimple_code (stmt) == GIMPLE_PHI)
451 return LIM_EXPENSIVE;
453 /* We should be hoisting calls if possible. */
454 if (is_gimple_call (stmt))
456 tree fndecl;
458 /* Unless the call is a builtin_constant_p; this always folds to a
459 constant, so moving it is useless. */
460 fndecl = gimple_call_fndecl (stmt);
461 if (fndecl
462 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
463 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
464 return 0;
466 return LIM_EXPENSIVE;
469 /* Hoisting memory references out should almost surely be a win. */
470 if (gimple_references_memory_p (stmt))
471 return LIM_EXPENSIVE;
473 if (gimple_code (stmt) != GIMPLE_ASSIGN)
474 return 1;
476 switch (gimple_assign_rhs_code (stmt))
478 case MULT_EXPR:
479 case WIDEN_MULT_EXPR:
480 case WIDEN_MULT_PLUS_EXPR:
481 case WIDEN_MULT_MINUS_EXPR:
482 case DOT_PROD_EXPR:
483 case FMA_EXPR:
484 case TRUNC_DIV_EXPR:
485 case CEIL_DIV_EXPR:
486 case FLOOR_DIV_EXPR:
487 case ROUND_DIV_EXPR:
488 case EXACT_DIV_EXPR:
489 case CEIL_MOD_EXPR:
490 case FLOOR_MOD_EXPR:
491 case ROUND_MOD_EXPR:
492 case TRUNC_MOD_EXPR:
493 case RDIV_EXPR:
494 /* Division and multiplication are usually expensive. */
495 return LIM_EXPENSIVE;
497 case LSHIFT_EXPR:
498 case RSHIFT_EXPR:
499 case WIDEN_LSHIFT_EXPR:
500 case LROTATE_EXPR:
501 case RROTATE_EXPR:
502 /* Shifts and rotates are usually expensive. */
503 return LIM_EXPENSIVE;
505 case CONSTRUCTOR:
506 /* Make vector construction cost proportional to the number
507 of elements. */
508 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
510 case SSA_NAME:
511 case PAREN_EXPR:
512 /* Whether or not something is wrapped inside a PAREN_EXPR
513 should not change move cost. Nor should an intermediate
514 unpropagated SSA name copy. */
515 return 0;
517 default:
518 return 1;
522 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
523 REF is independent. If REF is not independent in LOOP, NULL is returned
524 instead. */
526 static struct loop *
527 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
529 struct loop *aloop;
531 if (bitmap_bit_p (&ref->stored, loop->num))
532 return NULL;
534 for (aloop = outer;
535 aloop != loop;
536 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
537 if (!bitmap_bit_p (&ref->stored, aloop->num)
538 && ref_indep_loop_p (aloop, ref))
539 return aloop;
541 if (ref_indep_loop_p (loop, ref))
542 return loop;
543 else
544 return NULL;
547 /* If there is a simple load or store to a memory reference in STMT, returns
548 the location of the memory reference, and sets IS_STORE according to whether
549 it is a store or load. Otherwise, returns NULL. */
551 static tree *
552 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
554 tree *lhs, *rhs;
556 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
557 if (!gimple_assign_single_p (stmt))
558 return NULL;
560 lhs = gimple_assign_lhs_ptr (stmt);
561 rhs = gimple_assign_rhs1_ptr (stmt);
563 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
565 *is_store = false;
566 return rhs;
568 else if (gimple_vdef (stmt)
569 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
571 *is_store = true;
572 return lhs;
574 else
575 return NULL;
578 /* Returns the memory reference contained in STMT. */
580 static mem_ref_p
581 mem_ref_in_stmt (gimple stmt)
583 bool store;
584 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
585 hashval_t hash;
586 mem_ref_p ref;
588 if (!mem)
589 return NULL;
590 gcc_assert (!store);
592 hash = iterative_hash_expr (*mem, 0);
593 ref = memory_accesses.refs.find_with_hash (*mem, hash);
595 gcc_assert (ref != NULL);
596 return ref;
599 /* From a controlling predicate in DOM determine the arguments from
600 the PHI node PHI that are chosen if the predicate evaluates to
601 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
602 they are non-NULL. Returns true if the arguments can be determined,
603 else return false. */
605 static bool
606 extract_true_false_args_from_phi (basic_block dom, gimple phi,
607 tree *true_arg_p, tree *false_arg_p)
609 basic_block bb = gimple_bb (phi);
610 edge true_edge, false_edge, tem;
611 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
613 /* We have to verify that one edge into the PHI node is dominated
614 by the true edge of the predicate block and the other edge
615 dominated by the false edge. This ensures that the PHI argument
616 we are going to take is completely determined by the path we
617 take from the predicate block.
618 We can only use BB dominance checks below if the destination of
619 the true/false edges are dominated by their edge, thus only
620 have a single predecessor. */
621 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
622 tem = EDGE_PRED (bb, 0);
623 if (tem == true_edge
624 || (single_pred_p (true_edge->dest)
625 && (tem->src == true_edge->dest
626 || dominated_by_p (CDI_DOMINATORS,
627 tem->src, true_edge->dest))))
628 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
629 else if (tem == false_edge
630 || (single_pred_p (false_edge->dest)
631 && (tem->src == false_edge->dest
632 || dominated_by_p (CDI_DOMINATORS,
633 tem->src, false_edge->dest))))
634 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
635 else
636 return false;
637 tem = EDGE_PRED (bb, 1);
638 if (tem == true_edge
639 || (single_pred_p (true_edge->dest)
640 && (tem->src == true_edge->dest
641 || dominated_by_p (CDI_DOMINATORS,
642 tem->src, true_edge->dest))))
643 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
644 else if (tem == false_edge
645 || (single_pred_p (false_edge->dest)
646 && (tem->src == false_edge->dest
647 || dominated_by_p (CDI_DOMINATORS,
648 tem->src, false_edge->dest))))
649 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
650 else
651 return false;
652 if (!arg0 || !arg1)
653 return false;
655 if (true_arg_p)
656 *true_arg_p = arg0;
657 if (false_arg_p)
658 *false_arg_p = arg1;
660 return true;
663 /* Determine the outermost loop to that it is possible to hoist a statement
664 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
665 the outermost loop in that the value computed by STMT is invariant.
666 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
667 we preserve the fact whether STMT is executed. It also fills other related
668 information to LIM_DATA (STMT).
670 The function returns false if STMT cannot be hoisted outside of the loop it
671 is defined in, and true otherwise. */
673 static bool
674 determine_max_movement (gimple stmt, bool must_preserve_exec)
676 basic_block bb = gimple_bb (stmt);
677 struct loop *loop = bb->loop_father;
678 struct loop *level;
679 struct lim_aux_data *lim_data = get_lim_data (stmt);
680 tree val;
681 ssa_op_iter iter;
683 if (must_preserve_exec)
684 level = ALWAYS_EXECUTED_IN (bb);
685 else
686 level = superloop_at_depth (loop, 1);
687 lim_data->max_loop = level;
689 if (gimple_code (stmt) == GIMPLE_PHI)
691 use_operand_p use_p;
692 unsigned min_cost = UINT_MAX;
693 unsigned total_cost = 0;
694 struct lim_aux_data *def_data;
696 /* We will end up promoting dependencies to be unconditionally
697 evaluated. For this reason the PHI cost (and thus the
698 cost we remove from the loop by doing the invariant motion)
699 is that of the cheapest PHI argument dependency chain. */
700 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
702 val = USE_FROM_PTR (use_p);
703 if (TREE_CODE (val) != SSA_NAME)
704 continue;
705 if (!add_dependency (val, lim_data, loop, false))
706 return false;
707 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
708 if (def_data)
710 min_cost = MIN (min_cost, def_data->cost);
711 total_cost += def_data->cost;
715 lim_data->cost += min_cost;
717 if (gimple_phi_num_args (stmt) > 1)
719 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
720 gimple cond;
721 if (gsi_end_p (gsi_last_bb (dom)))
722 return false;
723 cond = gsi_stmt (gsi_last_bb (dom));
724 if (gimple_code (cond) != GIMPLE_COND)
725 return false;
726 /* Verify that this is an extended form of a diamond and
727 the PHI arguments are completely controlled by the
728 predicate in DOM. */
729 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
730 return false;
732 /* Fold in dependencies and cost of the condition. */
733 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
735 if (!add_dependency (val, lim_data, loop, false))
736 return false;
737 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
738 if (def_data)
739 total_cost += def_data->cost;
742 /* We want to avoid unconditionally executing very expensive
743 operations. As costs for our dependencies cannot be
744 negative just claim we are not invariand for this case.
745 We also are not sure whether the control-flow inside the
746 loop will vanish. */
747 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
748 && !(min_cost != 0
749 && total_cost / min_cost <= 2))
750 return false;
752 /* Assume that the control-flow in the loop will vanish.
753 ??? We should verify this and not artificially increase
754 the cost if that is not the case. */
755 lim_data->cost += stmt_cost (stmt);
758 return true;
760 else
761 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
762 if (!add_dependency (val, lim_data, loop, true))
763 return false;
765 if (gimple_vuse (stmt))
767 mem_ref_p ref = mem_ref_in_stmt (stmt);
769 if (ref)
771 lim_data->max_loop
772 = outermost_indep_loop (lim_data->max_loop, loop, ref);
773 if (!lim_data->max_loop)
774 return false;
776 else
778 if ((val = gimple_vuse (stmt)) != NULL_TREE)
780 if (!add_dependency (val, lim_data, loop, false))
781 return false;
786 lim_data->cost += stmt_cost (stmt);
788 return true;
791 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
792 and that one of the operands of this statement is computed by STMT.
793 Ensure that STMT (together with all the statements that define its
794 operands) is hoisted at least out of the loop LEVEL. */
796 static void
797 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
799 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
800 struct lim_aux_data *lim_data;
801 gimple dep_stmt;
802 unsigned i;
804 stmt_loop = find_common_loop (orig_loop, stmt_loop);
805 lim_data = get_lim_data (stmt);
806 if (lim_data != NULL && lim_data->tgt_loop != NULL)
807 stmt_loop = find_common_loop (stmt_loop,
808 loop_outer (lim_data->tgt_loop));
809 if (flow_loop_nested_p (stmt_loop, level))
810 return;
812 gcc_assert (level == lim_data->max_loop
813 || flow_loop_nested_p (lim_data->max_loop, level));
815 lim_data->tgt_loop = level;
816 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
817 set_level (dep_stmt, orig_loop, level);
820 /* Determines an outermost loop from that we want to hoist the statement STMT.
821 For now we chose the outermost possible loop. TODO -- use profiling
822 information to set it more sanely. */
824 static void
825 set_profitable_level (gimple stmt)
827 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
830 /* Returns true if STMT is a call that has side effects. */
832 static bool
833 nonpure_call_p (gimple stmt)
835 if (gimple_code (stmt) != GIMPLE_CALL)
836 return false;
838 return gimple_has_side_effects (stmt);
841 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
843 static gimple
844 rewrite_reciprocal (gimple_stmt_iterator *bsi)
846 gimple stmt, stmt1, stmt2;
847 tree name, lhs, type;
848 tree real_one;
849 gimple_stmt_iterator gsi;
851 stmt = gsi_stmt (*bsi);
852 lhs = gimple_assign_lhs (stmt);
853 type = TREE_TYPE (lhs);
855 real_one = build_one_cst (type);
857 name = make_temp_ssa_name (type, NULL, "reciptmp");
858 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
859 gimple_assign_rhs2 (stmt));
861 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
862 gimple_assign_rhs1 (stmt));
864 /* Replace division stmt with reciprocal and multiply stmts.
865 The multiply stmt is not invariant, so update iterator
866 and avoid rescanning. */
867 gsi = *bsi;
868 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
869 gsi_replace (&gsi, stmt2, true);
871 /* Continue processing with invariant reciprocal statement. */
872 return stmt1;
875 /* Check if the pattern at *BSI is a bittest of the form
876 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
878 static gimple
879 rewrite_bittest (gimple_stmt_iterator *bsi)
881 gimple stmt, use_stmt, stmt1, stmt2;
882 tree lhs, name, t, a, b;
883 use_operand_p use;
885 stmt = gsi_stmt (*bsi);
886 lhs = gimple_assign_lhs (stmt);
888 /* Verify that the single use of lhs is a comparison against zero. */
889 if (TREE_CODE (lhs) != SSA_NAME
890 || !single_imm_use (lhs, &use, &use_stmt)
891 || gimple_code (use_stmt) != GIMPLE_COND)
892 return stmt;
893 if (gimple_cond_lhs (use_stmt) != lhs
894 || (gimple_cond_code (use_stmt) != NE_EXPR
895 && gimple_cond_code (use_stmt) != EQ_EXPR)
896 || !integer_zerop (gimple_cond_rhs (use_stmt)))
897 return stmt;
899 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
900 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
901 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
902 return stmt;
904 /* There is a conversion in between possibly inserted by fold. */
905 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
907 t = gimple_assign_rhs1 (stmt1);
908 if (TREE_CODE (t) != SSA_NAME
909 || !has_single_use (t))
910 return stmt;
911 stmt1 = SSA_NAME_DEF_STMT (t);
912 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
913 return stmt;
916 /* Verify that B is loop invariant but A is not. Verify that with
917 all the stmt walking we are still in the same loop. */
918 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
919 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
920 return stmt;
922 a = gimple_assign_rhs1 (stmt1);
923 b = gimple_assign_rhs2 (stmt1);
925 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
926 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
928 gimple_stmt_iterator rsi;
930 /* 1 << B */
931 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
932 build_int_cst (TREE_TYPE (a), 1), b);
933 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
934 stmt1 = gimple_build_assign (name, t);
936 /* A & (1 << B) */
937 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
938 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
939 stmt2 = gimple_build_assign (name, t);
941 /* Replace the SSA_NAME we compare against zero. Adjust
942 the type of zero accordingly. */
943 SET_USE (use, name);
944 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
946 /* Don't use gsi_replace here, none of the new assignments sets
947 the variable originally set in stmt. Move bsi to stmt1, and
948 then remove the original stmt, so that we get a chance to
949 retain debug info for it. */
950 rsi = *bsi;
951 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
952 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
953 gsi_remove (&rsi, true);
955 return stmt1;
958 return stmt;
961 /* For each statement determines the outermost loop in that it is invariant,
962 - statements on whose motion it depends and the cost of the computation.
963 - This information is stored to the LIM_DATA structure associated with
964 - each statement. */
965 class invariantness_dom_walker : public dom_walker
967 public:
968 invariantness_dom_walker (cdi_direction direction)
969 : dom_walker (direction) {}
971 virtual void before_dom_children (basic_block);
974 /* Determine the outermost loops in that statements in basic block BB are
975 invariant, and record them to the LIM_DATA associated with the statements.
976 Callback for dom_walker. */
978 void
979 invariantness_dom_walker::before_dom_children (basic_block bb)
981 enum move_pos pos;
982 gimple_stmt_iterator bsi;
983 gimple stmt;
984 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
985 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
986 struct lim_aux_data *lim_data;
988 if (!loop_outer (bb->loop_father))
989 return;
991 if (dump_file && (dump_flags & TDF_DETAILS))
992 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
993 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
995 /* Look at PHI nodes, but only if there is at most two.
996 ??? We could relax this further by post-processing the inserted
997 code and transforming adjacent cond-exprs with the same predicate
998 to control flow again. */
999 bsi = gsi_start_phis (bb);
1000 if (!gsi_end_p (bsi)
1001 && ((gsi_next (&bsi), gsi_end_p (bsi))
1002 || (gsi_next (&bsi), gsi_end_p (bsi))))
1003 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1005 stmt = gsi_stmt (bsi);
1007 pos = movement_possibility (stmt);
1008 if (pos == MOVE_IMPOSSIBLE)
1009 continue;
1011 lim_data = init_lim_data (stmt);
1012 lim_data->always_executed_in = outermost;
1014 if (!determine_max_movement (stmt, false))
1016 lim_data->max_loop = NULL;
1017 continue;
1020 if (dump_file && (dump_flags & TDF_DETAILS))
1022 print_gimple_stmt (dump_file, stmt, 2, 0);
1023 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1024 loop_depth (lim_data->max_loop),
1025 lim_data->cost);
1028 if (lim_data->cost >= LIM_EXPENSIVE)
1029 set_profitable_level (stmt);
1032 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1034 stmt = gsi_stmt (bsi);
1036 pos = movement_possibility (stmt);
1037 if (pos == MOVE_IMPOSSIBLE)
1039 if (nonpure_call_p (stmt))
1041 maybe_never = true;
1042 outermost = NULL;
1044 /* Make sure to note always_executed_in for stores to make
1045 store-motion work. */
1046 else if (stmt_makes_single_store (stmt))
1048 struct lim_aux_data *lim_data = init_lim_data (stmt);
1049 lim_data->always_executed_in = outermost;
1051 continue;
1054 if (is_gimple_assign (stmt)
1055 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1056 == GIMPLE_BINARY_RHS))
1058 tree op0 = gimple_assign_rhs1 (stmt);
1059 tree op1 = gimple_assign_rhs2 (stmt);
1060 struct loop *ol1 = outermost_invariant_loop (op1,
1061 loop_containing_stmt (stmt));
1063 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1064 to be hoisted out of loop, saving expensive divide. */
1065 if (pos == MOVE_POSSIBLE
1066 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1067 && flag_unsafe_math_optimizations
1068 && !flag_trapping_math
1069 && ol1 != NULL
1070 && outermost_invariant_loop (op0, ol1) == NULL)
1071 stmt = rewrite_reciprocal (&bsi);
1073 /* If the shift count is invariant, convert (A >> B) & 1 to
1074 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1075 saving an expensive shift. */
1076 if (pos == MOVE_POSSIBLE
1077 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1078 && integer_onep (op1)
1079 && TREE_CODE (op0) == SSA_NAME
1080 && has_single_use (op0))
1081 stmt = rewrite_bittest (&bsi);
1084 lim_data = init_lim_data (stmt);
1085 lim_data->always_executed_in = outermost;
1087 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1088 continue;
1090 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1092 lim_data->max_loop = NULL;
1093 continue;
1096 if (dump_file && (dump_flags & TDF_DETAILS))
1098 print_gimple_stmt (dump_file, stmt, 2, 0);
1099 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1100 loop_depth (lim_data->max_loop),
1101 lim_data->cost);
1104 if (lim_data->cost >= LIM_EXPENSIVE)
1105 set_profitable_level (stmt);
1109 class move_computations_dom_walker : public dom_walker
1111 public:
1112 move_computations_dom_walker (cdi_direction direction)
1113 : dom_walker (direction), todo_ (0) {}
1115 virtual void before_dom_children (basic_block);
1117 unsigned int todo_;
1120 /* Hoist the statements in basic block BB out of the loops prescribed by
1121 data stored in LIM_DATA structures associated with each statement. Callback
1122 for walk_dominator_tree. */
1124 void
1125 move_computations_dom_walker::before_dom_children (basic_block bb)
1127 struct loop *level;
1128 gimple_stmt_iterator bsi;
1129 gimple stmt;
1130 unsigned cost = 0;
1131 struct lim_aux_data *lim_data;
1133 if (!loop_outer (bb->loop_father))
1134 return;
1136 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1138 gimple new_stmt;
1139 stmt = gsi_stmt (bsi);
1141 lim_data = get_lim_data (stmt);
1142 if (lim_data == NULL)
1144 gsi_next (&bsi);
1145 continue;
1148 cost = lim_data->cost;
1149 level = lim_data->tgt_loop;
1150 clear_lim_data (stmt);
1152 if (!level)
1154 gsi_next (&bsi);
1155 continue;
1158 if (dump_file && (dump_flags & TDF_DETAILS))
1160 fprintf (dump_file, "Moving PHI node\n");
1161 print_gimple_stmt (dump_file, stmt, 0, 0);
1162 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1163 cost, level->num);
1166 if (gimple_phi_num_args (stmt) == 1)
1168 tree arg = PHI_ARG_DEF (stmt, 0);
1169 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1170 gimple_phi_result (stmt),
1171 arg, NULL_TREE);
1172 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1174 else
1176 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1177 gimple cond = gsi_stmt (gsi_last_bb (dom));
1178 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1179 /* Get the PHI arguments corresponding to the true and false
1180 edges of COND. */
1181 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1182 gcc_assert (arg0 && arg1);
1183 t = build2 (gimple_cond_code (cond), boolean_type_node,
1184 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1185 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1186 gimple_phi_result (stmt),
1187 t, arg0, arg1);
1188 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1189 todo_ |= TODO_cleanup_cfg;
1191 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1192 remove_phi_node (&bsi, false);
1195 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1197 edge e;
1199 stmt = gsi_stmt (bsi);
1201 lim_data = get_lim_data (stmt);
1202 if (lim_data == NULL)
1204 gsi_next (&bsi);
1205 continue;
1208 cost = lim_data->cost;
1209 level = lim_data->tgt_loop;
1210 clear_lim_data (stmt);
1212 if (!level)
1214 gsi_next (&bsi);
1215 continue;
1218 /* We do not really want to move conditionals out of the loop; we just
1219 placed it here to force its operands to be moved if necessary. */
1220 if (gimple_code (stmt) == GIMPLE_COND)
1221 continue;
1223 if (dump_file && (dump_flags & TDF_DETAILS))
1225 fprintf (dump_file, "Moving statement\n");
1226 print_gimple_stmt (dump_file, stmt, 0, 0);
1227 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1228 cost, level->num);
1231 e = loop_preheader_edge (level);
1232 gcc_assert (!gimple_vdef (stmt));
1233 if (gimple_vuse (stmt))
1235 /* The new VUSE is the one from the virtual PHI in the loop
1236 header or the one already present. */
1237 gimple_stmt_iterator gsi2;
1238 for (gsi2 = gsi_start_phis (e->dest);
1239 !gsi_end_p (gsi2); gsi_next (&gsi2))
1241 gimple phi = gsi_stmt (gsi2);
1242 if (virtual_operand_p (gimple_phi_result (phi)))
1244 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1245 break;
1249 gsi_remove (&bsi, false);
1250 gsi_insert_on_edge (e, stmt);
1254 /* Hoist the statements out of the loops prescribed by data stored in
1255 LIM_DATA structures associated with each statement.*/
1257 static unsigned int
1258 move_computations (void)
1260 move_computations_dom_walker walker (CDI_DOMINATORS);
1261 walker.walk (cfun->cfg->x_entry_block_ptr);
1263 gsi_commit_edge_inserts ();
1264 if (need_ssa_update_p (cfun))
1265 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1267 return walker.todo_;
1270 /* Checks whether the statement defining variable *INDEX can be hoisted
1271 out of the loop passed in DATA. Callback for for_each_index. */
1273 static bool
1274 may_move_till (tree ref, tree *index, void *data)
1276 struct loop *loop = (struct loop *) data, *max_loop;
1278 /* If REF is an array reference, check also that the step and the lower
1279 bound is invariant in LOOP. */
1280 if (TREE_CODE (ref) == ARRAY_REF)
1282 tree step = TREE_OPERAND (ref, 3);
1283 tree lbound = TREE_OPERAND (ref, 2);
1285 max_loop = outermost_invariant_loop (step, loop);
1286 if (!max_loop)
1287 return false;
1289 max_loop = outermost_invariant_loop (lbound, loop);
1290 if (!max_loop)
1291 return false;
1294 max_loop = outermost_invariant_loop (*index, loop);
1295 if (!max_loop)
1296 return false;
1298 return true;
1301 /* If OP is SSA NAME, force the statement that defines it to be
1302 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1304 static void
1305 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1307 gimple stmt;
1309 if (!op
1310 || is_gimple_min_invariant (op))
1311 return;
1313 gcc_assert (TREE_CODE (op) == SSA_NAME);
1315 stmt = SSA_NAME_DEF_STMT (op);
1316 if (gimple_nop_p (stmt))
1317 return;
1319 set_level (stmt, orig_loop, loop);
1322 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1323 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1324 for_each_index. */
1326 struct fmt_data
1328 struct loop *loop;
1329 struct loop *orig_loop;
1332 static bool
1333 force_move_till (tree ref, tree *index, void *data)
1335 struct fmt_data *fmt_data = (struct fmt_data *) data;
1337 if (TREE_CODE (ref) == ARRAY_REF)
1339 tree step = TREE_OPERAND (ref, 3);
1340 tree lbound = TREE_OPERAND (ref, 2);
1342 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1343 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1346 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1348 return true;
1351 /* A function to free the mem_ref object OBJ. */
1353 static void
1354 memref_free (struct mem_ref *mem)
1356 unsigned i;
1357 vec<mem_ref_loc> *accs;
1359 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1360 accs->release ();
1361 mem->accesses_in_loop.release ();
1363 free (mem);
1366 /* Allocates and returns a memory reference description for MEM whose hash
1367 value is HASH and id is ID. */
1369 static mem_ref_p
1370 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1372 mem_ref_p ref = XNEW (struct mem_ref);
1373 ao_ref_init (&ref->mem, mem);
1374 ref->id = id;
1375 ref->hash = hash;
1376 bitmap_initialize (&ref->stored, &lim_bitmap_obstack);
1377 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1378 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1379 ref->accesses_in_loop.create (0);
1381 return ref;
1384 /* Records memory reference location *LOC in LOOP to the memory reference
1385 description REF. The reference occurs in statement STMT. */
1387 static void
1388 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1390 mem_ref_loc aref;
1392 if (ref->accesses_in_loop.length ()
1393 <= (unsigned) loop->num)
1394 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1396 aref.stmt = stmt;
1397 aref.ref = loc;
1398 ref->accesses_in_loop[loop->num].safe_push (aref);
1401 /* Marks reference REF as stored in LOOP. */
1403 static void
1404 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1406 while (loop != current_loops->tree_root
1407 && bitmap_set_bit (&ref->stored, loop->num))
1408 loop = loop_outer (loop);
1411 /* Gathers memory references in statement STMT in LOOP, storing the
1412 information about them in the memory_accesses structure. Marks
1413 the vops accessed through unrecognized statements there as
1414 well. */
1416 static void
1417 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1419 tree *mem = NULL;
1420 hashval_t hash;
1421 mem_ref **slot;
1422 mem_ref_p ref;
1423 bool is_stored;
1424 unsigned id;
1426 if (!gimple_vuse (stmt))
1427 return;
1429 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1430 if (!mem)
1432 /* We use the shared mem_ref for all unanalyzable refs. */
1433 id = UNANALYZABLE_MEM_ID;
1434 ref = memory_accesses.refs_list[id];
1435 if (dump_file && (dump_flags & TDF_DETAILS))
1437 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1438 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1440 is_stored = gimple_vdef (stmt);
1442 else
1444 hash = iterative_hash_expr (*mem, 0);
1445 slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
1446 if (*slot)
1448 ref = (mem_ref_p) *slot;
1449 id = ref->id;
1451 else
1453 id = memory_accesses.refs_list.length ();
1454 ref = mem_ref_alloc (*mem, hash, id);
1455 memory_accesses.refs_list.safe_push (ref);
1456 *slot = ref;
1458 if (dump_file && (dump_flags & TDF_DETAILS))
1460 fprintf (dump_file, "Memory reference %u: ", id);
1461 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1462 fprintf (dump_file, "\n");
1466 record_mem_ref_loc (ref, loop, stmt, mem);
1468 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1469 if (is_stored)
1471 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1472 mark_ref_stored (ref, loop);
1474 return;
1477 static unsigned *bb_loop_postorder;
1479 /* qsort sort function to sort blocks after their loop fathers postorder. */
1481 static int
1482 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1484 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1485 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1486 struct loop *loop1 = bb1->loop_father;
1487 struct loop *loop2 = bb2->loop_father;
1488 if (loop1->num == loop2->num)
1489 return 0;
1490 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1493 /* Gathers memory references in loops. */
1495 static void
1496 analyze_memory_references (void)
1498 gimple_stmt_iterator bsi;
1499 basic_block bb, *bbs;
1500 struct loop *loop, *outer;
1501 loop_iterator li;
1502 unsigned i, n;
1504 /* Initialize bb_loop_postorder with a mapping from loop->num to
1505 its postorder index. */
1506 i = 0;
1507 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
1508 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1509 bb_loop_postorder[loop->num] = i++;
1510 /* Collect all basic-blocks in loops and sort them after their
1511 loops postorder. */
1512 i = 0;
1513 bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
1514 FOR_EACH_BB (bb)
1515 if (bb->loop_father != current_loops->tree_root)
1516 bbs[i++] = bb;
1517 n = i;
1518 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1519 free (bb_loop_postorder);
1521 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1522 That results in better locality for all the bitmaps. */
1523 for (i = 0; i < n; ++i)
1525 basic_block bb = bbs[i];
1526 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1527 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1530 free (bbs);
1532 /* Propagate the information about accessed memory references up
1533 the loop hierarchy. */
1534 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1536 /* Finalize the overall touched references (including subloops). */
1537 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1538 &memory_accesses.refs_stored_in_loop[loop->num]);
1540 /* Propagate the information about accessed memory references up
1541 the loop hierarchy. */
1542 outer = loop_outer (loop);
1543 if (outer == current_loops->tree_root)
1544 continue;
1546 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1547 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1551 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1552 tree_to_aff_combination_expand. */
1554 static bool
1555 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1556 struct pointer_map_t **ttae_cache)
1558 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1559 object and their offset differ in such a way that the locations cannot
1560 overlap, then they cannot alias. */
1561 double_int size1, size2;
1562 aff_tree off1, off2;
1564 /* Perform basic offset and type-based disambiguation. */
1565 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1566 return false;
1568 /* The expansion of addresses may be a bit expensive, thus we only do
1569 the check at -O2 and higher optimization levels. */
1570 if (optimize < 2)
1571 return true;
1573 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1574 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1575 aff_combination_expand (&off1, ttae_cache);
1576 aff_combination_expand (&off2, ttae_cache);
1577 aff_combination_scale (&off1, double_int_minus_one);
1578 aff_combination_add (&off2, &off1);
1580 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1581 return false;
1583 return true;
1586 /* Iterates over all locations of REF in LOOP and its subloops calling
1587 fn.operator() with the location as argument. When that operator
1588 returns true the iteration is stopped and true is returned.
1589 Otherwise false is returned. */
1591 template <typename FN>
1592 static bool
1593 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1595 unsigned i;
1596 mem_ref_loc_p loc;
1597 struct loop *subloop;
1599 if (ref->accesses_in_loop.length () > (unsigned) loop->num)
1600 FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc)
1601 if (fn (loc))
1602 return true;
1604 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1605 if (for_all_locs_in_loop (subloop, ref, fn))
1606 return true;
1608 return false;
1611 /* Rewrites location LOC by TMP_VAR. */
1613 struct rewrite_mem_ref_loc
1615 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1616 bool operator () (mem_ref_loc_p loc);
1617 tree tmp_var;
1620 bool
1621 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1623 *loc->ref = tmp_var;
1624 update_stmt (loc->stmt);
1625 return false;
1628 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1630 static void
1631 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1633 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1636 /* Stores the first reference location in LOCP. */
1638 struct first_mem_ref_loc_1
1640 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1641 bool operator () (mem_ref_loc_p loc);
1642 mem_ref_loc_p *locp;
1645 bool
1646 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1648 *locp = loc;
1649 return true;
1652 /* Returns the first reference location to REF in LOOP. */
1654 static mem_ref_loc_p
1655 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1657 mem_ref_loc_p locp = NULL;
1658 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1659 return locp;
1662 struct prev_flag_edges {
1663 /* Edge to insert new flag comparison code. */
1664 edge append_cond_position;
1666 /* Edge for fall through from previous flag comparison. */
1667 edge last_cond_fallthru;
1670 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1671 MEM along edge EX.
1673 The store is only done if MEM has changed. We do this so no
1674 changes to MEM occur on code paths that did not originally store
1675 into it.
1677 The common case for execute_sm will transform:
1679 for (...) {
1680 if (foo)
1681 stuff;
1682 else
1683 MEM = TMP_VAR;
1686 into:
1688 lsm = MEM;
1689 for (...) {
1690 if (foo)
1691 stuff;
1692 else
1693 lsm = TMP_VAR;
1695 MEM = lsm;
1697 This function will generate:
1699 lsm = MEM;
1701 lsm_flag = false;
1703 for (...) {
1704 if (foo)
1705 stuff;
1706 else {
1707 lsm = TMP_VAR;
1708 lsm_flag = true;
1711 if (lsm_flag) <--
1712 MEM = lsm; <--
1715 static void
1716 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1718 basic_block new_bb, then_bb, old_dest;
1719 bool loop_has_only_one_exit;
1720 edge then_old_edge, orig_ex = ex;
1721 gimple_stmt_iterator gsi;
1722 gimple stmt;
1723 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1725 /* ?? Insert store after previous store if applicable. See note
1726 below. */
1727 if (prev_edges)
1728 ex = prev_edges->append_cond_position;
1730 loop_has_only_one_exit = single_pred_p (ex->dest);
1732 if (loop_has_only_one_exit)
1733 ex = split_block_after_labels (ex->dest);
1735 old_dest = ex->dest;
1736 new_bb = split_edge (ex);
1737 then_bb = create_empty_bb (new_bb);
1738 if (current_loops && new_bb->loop_father)
1739 add_bb_to_loop (then_bb, new_bb->loop_father);
1741 gsi = gsi_start_bb (new_bb);
1742 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1743 NULL_TREE, NULL_TREE);
1744 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1746 gsi = gsi_start_bb (then_bb);
1747 /* Insert actual store. */
1748 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1749 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1751 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
1752 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
1753 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
1755 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1757 if (prev_edges)
1759 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1760 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1761 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1762 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1763 recompute_dominator (CDI_DOMINATORS, old_dest));
1766 /* ?? Because stores may alias, they must happen in the exact
1767 sequence they originally happened. Save the position right after
1768 the (_lsm) store we just created so we can continue appending after
1769 it and maintain the original order. */
1771 struct prev_flag_edges *p;
1773 if (orig_ex->aux)
1774 orig_ex->aux = NULL;
1775 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1776 p = (struct prev_flag_edges *) orig_ex->aux;
1777 p->append_cond_position = then_old_edge;
1778 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1779 orig_ex->aux = (void *) p;
1782 if (!loop_has_only_one_exit)
1783 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
1785 gimple phi = gsi_stmt (gsi);
1786 unsigned i;
1788 for (i = 0; i < gimple_phi_num_args (phi); i++)
1789 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1791 tree arg = gimple_phi_arg_def (phi, i);
1792 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1793 update_stmt (phi);
1796 /* Remove the original fall through edge. This was the
1797 single_succ_edge (new_bb). */
1798 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1801 /* When REF is set on the location, set flag indicating the store. */
1803 struct sm_set_flag_if_changed
1805 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1806 bool operator () (mem_ref_loc_p loc);
1807 tree flag;
1810 bool
1811 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1813 /* Only set the flag for writes. */
1814 if (is_gimple_assign (loc->stmt)
1815 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1817 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1818 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1819 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1821 return false;
1824 /* Helper function for execute_sm. On every location where REF is
1825 set, set an appropriate flag indicating the store. */
1827 static tree
1828 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1830 tree flag;
1831 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1832 flag = create_tmp_reg (boolean_type_node, str);
1833 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1834 return flag;
1837 /* Executes store motion of memory reference REF from LOOP.
1838 Exits from the LOOP are stored in EXITS. The initialization of the
1839 temporary variable is put to the preheader of the loop, and assignments
1840 to the reference from the temporary variable are emitted to exits. */
1842 static void
1843 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1845 tree tmp_var, store_flag;
1846 unsigned i;
1847 gimple load;
1848 struct fmt_data fmt_data;
1849 edge ex;
1850 struct lim_aux_data *lim_data;
1851 bool multi_threaded_model_p = false;
1852 gimple_stmt_iterator gsi;
1854 if (dump_file && (dump_flags & TDF_DETAILS))
1856 fprintf (dump_file, "Executing store motion of ");
1857 print_generic_expr (dump_file, ref->mem.ref, 0);
1858 fprintf (dump_file, " from loop %d\n", loop->num);
1861 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1862 get_lsm_tmp_name (ref->mem.ref, ~0));
1864 fmt_data.loop = loop;
1865 fmt_data.orig_loop = loop;
1866 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1868 if (block_in_transaction (loop_preheader_edge (loop)->src)
1869 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1870 multi_threaded_model_p = true;
1872 if (multi_threaded_model_p)
1873 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1875 rewrite_mem_refs (loop, ref, tmp_var);
1877 /* Emit the load code on a random exit edge or into the latch if
1878 the loop does not exit, so that we are sure it will be processed
1879 by move_computations after all dependencies. */
1880 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1882 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1883 load altogether, since the store is predicated by a flag. We
1884 could, do the load only if it was originally in the loop. */
1885 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1886 lim_data = init_lim_data (load);
1887 lim_data->max_loop = loop;
1888 lim_data->tgt_loop = loop;
1889 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1891 if (multi_threaded_model_p)
1893 load = gimple_build_assign (store_flag, boolean_false_node);
1894 lim_data = init_lim_data (load);
1895 lim_data->max_loop = loop;
1896 lim_data->tgt_loop = loop;
1897 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1900 /* Sink the store to every exit from the loop. */
1901 FOR_EACH_VEC_ELT (exits, i, ex)
1902 if (!multi_threaded_model_p)
1904 gimple store;
1905 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
1906 gsi_insert_on_edge (ex, store);
1908 else
1909 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
1912 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
1913 edges of the LOOP. */
1915 static void
1916 hoist_memory_references (struct loop *loop, bitmap mem_refs,
1917 vec<edge> exits)
1919 mem_ref_p ref;
1920 unsigned i;
1921 bitmap_iterator bi;
1923 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
1925 ref = memory_accesses.refs_list[i];
1926 execute_sm (loop, exits, ref);
1930 struct ref_always_accessed
1932 ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_)
1933 : loop (loop_), base (base_), stored_p (stored_p_) {}
1934 bool operator () (mem_ref_loc_p loc);
1935 struct loop *loop;
1936 tree base;
1937 bool stored_p;
1940 bool
1941 ref_always_accessed::operator () (mem_ref_loc_p loc)
1943 struct loop *must_exec;
1945 if (!get_lim_data (loc->stmt))
1946 return false;
1948 /* If we require an always executed store make sure the statement
1949 stores to the reference. */
1950 if (stored_p)
1952 tree lhs;
1953 if (!gimple_get_lhs (loc->stmt))
1954 return false;
1955 lhs = get_base_address (gimple_get_lhs (loc->stmt));
1956 if (!lhs)
1957 return false;
1958 if (INDIRECT_REF_P (lhs)
1959 || TREE_CODE (lhs) == MEM_REF)
1960 lhs = TREE_OPERAND (lhs, 0);
1961 if (lhs != base)
1962 return false;
1965 must_exec = get_lim_data (loc->stmt)->always_executed_in;
1966 if (!must_exec)
1967 return false;
1969 if (must_exec == loop
1970 || flow_loop_nested_p (must_exec, loop))
1971 return true;
1973 return false;
1976 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
1977 make sure REF is always stored to in LOOP. */
1979 static bool
1980 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
1982 tree base = ao_ref_base (&ref->mem);
1983 if (TREE_CODE (base) == MEM_REF)
1984 base = TREE_OPERAND (base, 0);
1986 return for_all_locs_in_loop (loop, ref,
1987 ref_always_accessed (loop, base, stored_p));
1990 /* Returns true if REF1 and REF2 are independent. */
1992 static bool
1993 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
1995 if (ref1 == ref2)
1996 return true;
1998 if (dump_file && (dump_flags & TDF_DETAILS))
1999 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2000 ref1->id, ref2->id);
2002 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2004 if (dump_file && (dump_flags & TDF_DETAILS))
2005 fprintf (dump_file, "dependent.\n");
2006 return false;
2008 else
2010 if (dump_file && (dump_flags & TDF_DETAILS))
2011 fprintf (dump_file, "independent.\n");
2012 return true;
2016 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2017 and its super-loops. */
2019 static void
2020 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2022 /* We can propagate dependent-in-loop bits up the loop
2023 hierarchy to all outer loops. */
2024 while (loop != current_loops->tree_root
2025 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2026 loop = loop_outer (loop);
2029 /* Returns true if REF is independent on all other memory references in
2030 LOOP. */
2032 static bool
2033 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2035 bitmap refs_to_check;
2036 unsigned i;
2037 bitmap_iterator bi;
2038 mem_ref_p aref;
2040 if (stored_p)
2041 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2042 else
2043 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2045 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2046 return false;
2048 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2050 aref = memory_accesses.refs_list[i];
2051 if (!refs_independent_p (ref, aref))
2052 return false;
2055 return true;
2058 /* Returns true if REF is independent on all other memory references in
2059 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2061 static bool
2062 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2064 stored_p |= bitmap_bit_p (&ref->stored, loop->num);
2066 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2067 return true;
2068 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2069 return false;
2071 struct loop *inner = loop->inner;
2072 while (inner)
2074 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2075 return false;
2076 inner = inner->next;
2079 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2081 if (dump_file && (dump_flags & TDF_DETAILS))
2082 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2083 ref->id, loop->num, indep_p ? "independent" : "dependent");
2085 /* Record the computed result in the cache. */
2086 if (indep_p)
2088 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2089 && stored_p)
2091 /* If it's independend against all refs then it's independent
2092 against stores, too. */
2093 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2096 else
2098 record_dep_loop (loop, ref, stored_p);
2099 if (!stored_p)
2101 /* If it's dependent against stores it's dependent against
2102 all refs, too. */
2103 record_dep_loop (loop, ref, true);
2107 return indep_p;
2110 /* Returns true if REF is independent on all other memory references in
2111 LOOP. */
2113 static bool
2114 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2116 gcc_checking_assert (MEM_ANALYZABLE (ref));
2118 return ref_indep_loop_p_2 (loop, ref, false);
2121 /* Returns true if we can perform store motion of REF from LOOP. */
2123 static bool
2124 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2126 tree base;
2128 /* Can't hoist unanalyzable refs. */
2129 if (!MEM_ANALYZABLE (ref))
2130 return false;
2132 /* It should be movable. */
2133 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2134 || TREE_THIS_VOLATILE (ref->mem.ref)
2135 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2136 return false;
2138 /* If it can throw fail, we do not properly update EH info. */
2139 if (tree_could_throw_p (ref->mem.ref))
2140 return false;
2142 /* If it can trap, it must be always executed in LOOP.
2143 Readonly memory locations may trap when storing to them, but
2144 tree_could_trap_p is a predicate for rvalues, so check that
2145 explicitly. */
2146 base = get_base_address (ref->mem.ref);
2147 if ((tree_could_trap_p (ref->mem.ref)
2148 || (DECL_P (base) && TREE_READONLY (base)))
2149 && !ref_always_accessed_p (loop, ref, true))
2150 return false;
2152 /* And it must be independent on all other memory references
2153 in LOOP. */
2154 if (!ref_indep_loop_p (loop, ref))
2155 return false;
2157 return true;
2160 /* Marks the references in LOOP for that store motion should be performed
2161 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2162 motion was performed in one of the outer loops. */
2164 static void
2165 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2167 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2168 unsigned i;
2169 bitmap_iterator bi;
2170 mem_ref_p ref;
2172 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2174 ref = memory_accesses.refs_list[i];
2175 if (can_sm_ref_p (loop, ref))
2176 bitmap_set_bit (refs_to_sm, i);
2180 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2181 for a store motion optimization (i.e. whether we can insert statement
2182 on its exits). */
2184 static bool
2185 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2186 vec<edge> exits)
2188 unsigned i;
2189 edge ex;
2191 FOR_EACH_VEC_ELT (exits, i, ex)
2192 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2193 return false;
2195 return true;
2198 /* Try to perform store motion for all memory references modified inside
2199 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2200 store motion was executed in one of the outer loops. */
2202 static void
2203 store_motion_loop (struct loop *loop, bitmap sm_executed)
2205 vec<edge> exits = get_loop_exit_edges (loop);
2206 struct loop *subloop;
2207 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2209 if (loop_suitable_for_sm (loop, exits))
2211 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2212 hoist_memory_references (loop, sm_in_loop, exits);
2214 exits.release ();
2216 bitmap_ior_into (sm_executed, sm_in_loop);
2217 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2218 store_motion_loop (subloop, sm_executed);
2219 bitmap_and_compl_into (sm_executed, sm_in_loop);
2220 BITMAP_FREE (sm_in_loop);
2223 /* Try to perform store motion for all memory references modified inside
2224 loops. */
2226 static void
2227 store_motion (void)
2229 struct loop *loop;
2230 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2232 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2233 store_motion_loop (loop, sm_executed);
2235 BITMAP_FREE (sm_executed);
2236 gsi_commit_edge_inserts ();
2239 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2240 for each such basic block bb records the outermost loop for that execution
2241 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2242 blocks that contain a nonpure call. */
2244 static void
2245 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2247 basic_block bb = NULL, *bbs, last = NULL;
2248 unsigned i;
2249 edge e;
2250 struct loop *inn_loop = loop;
2252 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2254 bbs = get_loop_body_in_dom_order (loop);
2256 for (i = 0; i < loop->num_nodes; i++)
2258 edge_iterator ei;
2259 bb = bbs[i];
2261 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2262 last = bb;
2264 if (bitmap_bit_p (contains_call, bb->index))
2265 break;
2267 FOR_EACH_EDGE (e, ei, bb->succs)
2268 if (!flow_bb_inside_loop_p (loop, e->dest))
2269 break;
2270 if (e)
2271 break;
2273 /* A loop might be infinite (TODO use simple loop analysis
2274 to disprove this if possible). */
2275 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2276 break;
2278 if (!flow_bb_inside_loop_p (inn_loop, bb))
2279 break;
2281 if (bb->loop_father->header == bb)
2283 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2284 break;
2286 /* In a loop that is always entered we may proceed anyway.
2287 But record that we entered it and stop once we leave it. */
2288 inn_loop = bb->loop_father;
2292 while (1)
2294 SET_ALWAYS_EXECUTED_IN (last, loop);
2295 if (last == loop->header)
2296 break;
2297 last = get_immediate_dominator (CDI_DOMINATORS, last);
2300 free (bbs);
2303 for (loop = loop->inner; loop; loop = loop->next)
2304 fill_always_executed_in_1 (loop, contains_call);
2307 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2308 for each such basic block bb records the outermost loop for that execution
2309 of its header implies execution of bb. */
2311 static void
2312 fill_always_executed_in (void)
2314 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2315 basic_block bb;
2316 struct loop *loop;
2318 bitmap_clear (contains_call);
2319 FOR_EACH_BB (bb)
2321 gimple_stmt_iterator gsi;
2322 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2324 if (nonpure_call_p (gsi_stmt (gsi)))
2325 break;
2328 if (!gsi_end_p (gsi))
2329 bitmap_set_bit (contains_call, bb->index);
2332 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2333 fill_always_executed_in_1 (loop, contains_call);
2335 sbitmap_free (contains_call);
2339 /* Compute the global information needed by the loop invariant motion pass. */
2341 static void
2342 tree_ssa_lim_initialize (void)
2344 unsigned i;
2346 bitmap_obstack_initialize (&lim_bitmap_obstack);
2347 lim_aux_data_map = pointer_map_create ();
2349 if (flag_tm)
2350 compute_transaction_bits ();
2352 alloc_aux_for_edges (0);
2354 memory_accesses.refs.create (100);
2355 memory_accesses.refs_list.create (100);
2356 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2357 memory_accesses.refs_list.quick_push
2358 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2360 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2361 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2362 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2363 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2364 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2365 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2367 for (i = 0; i < number_of_loops (cfun); i++)
2369 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2370 &lim_bitmap_obstack);
2371 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2372 &lim_bitmap_obstack);
2373 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2374 &lim_bitmap_obstack);
2377 memory_accesses.ttae_cache = NULL;
2380 /* Cleans up after the invariant motion pass. */
2382 static void
2383 tree_ssa_lim_finalize (void)
2385 basic_block bb;
2386 unsigned i;
2387 mem_ref_p ref;
2389 free_aux_for_edges ();
2391 FOR_EACH_BB (bb)
2392 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2394 bitmap_obstack_release (&lim_bitmap_obstack);
2395 pointer_map_destroy (lim_aux_data_map);
2397 memory_accesses.refs.dispose ();
2399 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2400 memref_free (ref);
2401 memory_accesses.refs_list.release ();
2403 memory_accesses.refs_in_loop.release ();
2404 memory_accesses.refs_stored_in_loop.release ();
2405 memory_accesses.all_refs_stored_in_loop.release ();
2407 if (memory_accesses.ttae_cache)
2408 free_affine_expand_cache (&memory_accesses.ttae_cache);
2411 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2412 i.e. those that are likely to be win regardless of the register pressure. */
2414 unsigned int
2415 tree_ssa_lim (void)
2417 unsigned int todo;
2419 tree_ssa_lim_initialize ();
2421 /* Gathers information about memory accesses in the loops. */
2422 analyze_memory_references ();
2424 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2425 fill_always_executed_in ();
2427 /* For each statement determine the outermost loop in that it is
2428 invariant and cost for computing the invariant. */
2429 invariantness_dom_walker (CDI_DOMINATORS)
2430 .walk (cfun->cfg->x_entry_block_ptr);
2432 /* Execute store motion. Force the necessary invariants to be moved
2433 out of the loops as well. */
2434 store_motion ();
2436 /* Move the expressions that are expensive enough. */
2437 todo = move_computations ();
2439 tree_ssa_lim_finalize ();
2441 return todo;
2444 /* Loop invariant motion pass. */
2446 static unsigned int
2447 tree_ssa_loop_im (void)
2449 if (number_of_loops (cfun) <= 1)
2450 return 0;
2452 return tree_ssa_lim ();
2455 static bool
2456 gate_tree_ssa_loop_im (void)
2458 return flag_tree_loop_im != 0;
2461 namespace {
2463 const pass_data pass_data_lim =
2465 GIMPLE_PASS, /* type */
2466 "lim", /* name */
2467 OPTGROUP_LOOP, /* optinfo_flags */
2468 true, /* has_gate */
2469 true, /* has_execute */
2470 TV_LIM, /* tv_id */
2471 PROP_cfg, /* properties_required */
2472 0, /* properties_provided */
2473 0, /* properties_destroyed */
2474 0, /* todo_flags_start */
2475 0, /* todo_flags_finish */
2478 class pass_lim : public gimple_opt_pass
2480 public:
2481 pass_lim (gcc::context *ctxt)
2482 : gimple_opt_pass (pass_data_lim, ctxt)
2485 /* opt_pass methods: */
2486 opt_pass * clone () { return new pass_lim (m_ctxt); }
2487 bool gate () { return gate_tree_ssa_loop_im (); }
2488 unsigned int execute () { return tree_ssa_loop_im (); }
2490 }; // class pass_lim
2492 } // anon namespace
2494 gimple_opt_pass *
2495 make_pass_lim (gcc::context *ctxt)
2497 return new pass_lim (ctxt);