Fix cygwin performance loss on linpack.
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob30b53ce19894faf84c6699be4ce6ad9fd23d4bc8
1 /* Loop invariant motion.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "params.h"
42 #include "tree-affine.h"
43 #include "tree-ssa-propagate.h"
44 #include "trans-mem.h"
45 #include "gimple-fold.h"
47 /* TODO: Support for predicated code motion. I.e.
49 while (1)
51 if (cond)
53 a = inv;
54 something;
58 Where COND and INV are invariants, but evaluating INV may trap or be
59 invalid from some other reason if !COND. This may be transformed to
61 if (cond)
62 a = inv;
63 while (1)
65 if (cond)
66 something;
67 } */
69 /* The auxiliary data kept for each statement. */
71 struct lim_aux_data
73 struct loop *max_loop; /* The outermost loop in that the statement
74 is invariant. */
76 struct loop *tgt_loop; /* The loop out of that we want to move the
77 invariant. */
79 struct loop *always_executed_in;
80 /* The outermost loop for that we are sure
81 the statement is executed if the loop
82 is entered. */
84 unsigned cost; /* Cost of the computation performed by the
85 statement. */
87 vec<gimple *> depends; /* Vector of statements that must be also
88 hoisted out of the loop when this statement
89 is hoisted; i.e. those that define the
90 operands of the statement and are inside of
91 the MAX_LOOP loop. */
94 /* Maps statements to their lim_aux_data. */
96 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
98 /* Description of a memory reference location. */
100 struct mem_ref_loc
102 tree *ref; /* The reference itself. */
103 gimple *stmt; /* The statement in that it occurs. */
107 /* Description of a memory reference. */
109 struct im_mem_ref
111 unsigned id; /* ID assigned to the memory reference
112 (its index in memory_accesses.refs_list) */
113 hashval_t hash; /* Its hash value. */
115 /* The memory access itself and associated caching of alias-oracle
116 query meta-data. */
117 ao_ref mem;
119 bitmap stored; /* The set of loops in that this memory location
120 is stored to. */
121 vec<mem_ref_loc> accesses_in_loop;
122 /* The locations of the accesses. Vector
123 indexed by the loop number. */
125 /* The following sets are computed on demand. We keep both set and
126 its complement, so that we know whether the information was
127 already computed or not. */
128 bitmap_head indep_loop; /* The set of loops in that the memory
129 reference is independent, meaning:
130 If it is stored in the loop, this store
131 is independent on all other loads and
132 stores.
133 If it is only loaded, then it is independent
134 on all stores in the loop. */
135 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
138 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
139 to record (in)dependence against stores in the loop and its subloops, the
140 second to record (in)dependence against all references in the loop
141 and its subloops. */
142 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
144 /* Mem_ref hashtable helpers. */
146 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
148 typedef tree_node *compare_type;
149 static inline hashval_t hash (const im_mem_ref *);
150 static inline bool equal (const im_mem_ref *, const tree_node *);
153 /* A hash function for struct im_mem_ref object OBJ. */
155 inline hashval_t
156 mem_ref_hasher::hash (const im_mem_ref *mem)
158 return mem->hash;
161 /* An equality function for struct im_mem_ref object MEM1 with
162 memory reference OBJ2. */
164 inline bool
165 mem_ref_hasher::equal (const im_mem_ref *mem1, const tree_node *obj2)
167 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
171 /* Description of memory accesses in loops. */
173 static struct
175 /* The hash table of memory references accessed in loops. */
176 hash_table<mem_ref_hasher> *refs;
178 /* The list of memory references. */
179 vec<im_mem_ref *> refs_list;
181 /* The set of memory references accessed in each loop. */
182 vec<bitmap_head> refs_in_loop;
184 /* The set of memory references stored in each loop. */
185 vec<bitmap_head> refs_stored_in_loop;
187 /* The set of memory references stored in each loop, including subloops . */
188 vec<bitmap_head> all_refs_stored_in_loop;
190 /* Cache for expanding memory addresses. */
191 hash_map<tree, name_expansion *> *ttae_cache;
192 } memory_accesses;
194 /* Obstack for the bitmaps in the above data structures. */
195 static bitmap_obstack lim_bitmap_obstack;
196 static obstack mem_ref_obstack;
198 static bool ref_indep_loop_p (struct loop *, im_mem_ref *);
200 /* Minimum cost of an expensive expression. */
201 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
203 /* The outermost loop for which execution of the header guarantees that the
204 block will be executed. */
205 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
206 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
208 /* ID of the shared unanalyzable mem. */
209 #define UNANALYZABLE_MEM_ID 0
211 /* Whether the reference was analyzable. */
212 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
214 static struct lim_aux_data *
215 init_lim_data (gimple *stmt)
217 lim_aux_data *p = XCNEW (struct lim_aux_data);
218 lim_aux_data_map->put (stmt, p);
220 return p;
223 static struct lim_aux_data *
224 get_lim_data (gimple *stmt)
226 lim_aux_data **p = lim_aux_data_map->get (stmt);
227 if (!p)
228 return NULL;
230 return *p;
233 /* Releases the memory occupied by DATA. */
235 static void
236 free_lim_aux_data (struct lim_aux_data *data)
238 data->depends.release ();
239 free (data);
242 static void
243 clear_lim_data (gimple *stmt)
245 lim_aux_data **p = lim_aux_data_map->get (stmt);
246 if (!p)
247 return;
249 free_lim_aux_data (*p);
250 *p = NULL;
254 /* The possibilities of statement movement. */
255 enum move_pos
257 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
258 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
259 become executed -- memory accesses, ... */
260 MOVE_POSSIBLE /* Unlimited movement. */
264 /* If it is possible to hoist the statement STMT unconditionally,
265 returns MOVE_POSSIBLE.
266 If it is possible to hoist the statement STMT, but we must avoid making
267 it executed if it would not be executed in the original program (e.g.
268 because it may trap), return MOVE_PRESERVE_EXECUTION.
269 Otherwise return MOVE_IMPOSSIBLE. */
271 enum move_pos
272 movement_possibility (gimple *stmt)
274 tree lhs;
275 enum move_pos ret = MOVE_POSSIBLE;
277 if (flag_unswitch_loops
278 && gimple_code (stmt) == GIMPLE_COND)
280 /* If we perform unswitching, force the operands of the invariant
281 condition to be moved out of the loop. */
282 return MOVE_POSSIBLE;
285 if (gimple_code (stmt) == GIMPLE_PHI
286 && gimple_phi_num_args (stmt) <= 2
287 && !virtual_operand_p (gimple_phi_result (stmt))
288 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
289 return MOVE_POSSIBLE;
291 if (gimple_get_lhs (stmt) == NULL_TREE)
292 return MOVE_IMPOSSIBLE;
294 if (gimple_vdef (stmt))
295 return MOVE_IMPOSSIBLE;
297 if (stmt_ends_bb_p (stmt)
298 || gimple_has_volatile_ops (stmt)
299 || gimple_has_side_effects (stmt)
300 || stmt_could_throw_p (stmt))
301 return MOVE_IMPOSSIBLE;
303 if (is_gimple_call (stmt))
305 /* While pure or const call is guaranteed to have no side effects, we
306 cannot move it arbitrarily. Consider code like
308 char *s = something ();
310 while (1)
312 if (s)
313 t = strlen (s);
314 else
315 t = 0;
318 Here the strlen call cannot be moved out of the loop, even though
319 s is invariant. In addition to possibly creating a call with
320 invalid arguments, moving out a function call that is not executed
321 may cause performance regressions in case the call is costly and
322 not executed at all. */
323 ret = MOVE_PRESERVE_EXECUTION;
324 lhs = gimple_call_lhs (stmt);
326 else if (is_gimple_assign (stmt))
327 lhs = gimple_assign_lhs (stmt);
328 else
329 return MOVE_IMPOSSIBLE;
331 if (TREE_CODE (lhs) == SSA_NAME
332 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
333 return MOVE_IMPOSSIBLE;
335 if (TREE_CODE (lhs) != SSA_NAME
336 || gimple_could_trap_p (stmt))
337 return MOVE_PRESERVE_EXECUTION;
339 /* Non local loads in a transaction cannot be hoisted out. Well,
340 unless the load happens on every path out of the loop, but we
341 don't take this into account yet. */
342 if (flag_tm
343 && gimple_in_transaction (stmt)
344 && gimple_assign_single_p (stmt))
346 tree rhs = gimple_assign_rhs1 (stmt);
347 if (DECL_P (rhs) && is_global_var (rhs))
349 if (dump_file)
351 fprintf (dump_file, "Cannot hoist conditional load of ");
352 print_generic_expr (dump_file, rhs, TDF_SLIM);
353 fprintf (dump_file, " because it is in a transaction.\n");
355 return MOVE_IMPOSSIBLE;
359 return ret;
362 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
363 loop to that we could move the expression using DEF if it did not have
364 other operands, i.e. the outermost loop enclosing LOOP in that the value
365 of DEF is invariant. */
367 static struct loop *
368 outermost_invariant_loop (tree def, struct loop *loop)
370 gimple *def_stmt;
371 basic_block def_bb;
372 struct loop *max_loop;
373 struct lim_aux_data *lim_data;
375 if (!def)
376 return superloop_at_depth (loop, 1);
378 if (TREE_CODE (def) != SSA_NAME)
380 gcc_assert (is_gimple_min_invariant (def));
381 return superloop_at_depth (loop, 1);
384 def_stmt = SSA_NAME_DEF_STMT (def);
385 def_bb = gimple_bb (def_stmt);
386 if (!def_bb)
387 return superloop_at_depth (loop, 1);
389 max_loop = find_common_loop (loop, def_bb->loop_father);
391 lim_data = get_lim_data (def_stmt);
392 if (lim_data != NULL && lim_data->max_loop != NULL)
393 max_loop = find_common_loop (max_loop,
394 loop_outer (lim_data->max_loop));
395 if (max_loop == loop)
396 return NULL;
397 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
399 return max_loop;
402 /* DATA is a structure containing information associated with a statement
403 inside LOOP. DEF is one of the operands of this statement.
405 Find the outermost loop enclosing LOOP in that value of DEF is invariant
406 and record this in DATA->max_loop field. If DEF itself is defined inside
407 this loop as well (i.e. we need to hoist it out of the loop if we want
408 to hoist the statement represented by DATA), record the statement in that
409 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
410 add the cost of the computation of DEF to the DATA->cost.
412 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
414 static bool
415 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
416 bool add_cost)
418 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
419 basic_block def_bb = gimple_bb (def_stmt);
420 struct loop *max_loop;
421 struct lim_aux_data *def_data;
423 if (!def_bb)
424 return true;
426 max_loop = outermost_invariant_loop (def, loop);
427 if (!max_loop)
428 return false;
430 if (flow_loop_nested_p (data->max_loop, max_loop))
431 data->max_loop = max_loop;
433 def_data = get_lim_data (def_stmt);
434 if (!def_data)
435 return true;
437 if (add_cost
438 /* Only add the cost if the statement defining DEF is inside LOOP,
439 i.e. if it is likely that by moving the invariants dependent
440 on it, we will be able to avoid creating a new register for
441 it (since it will be only used in these dependent invariants). */
442 && def_bb->loop_father == loop)
443 data->cost += def_data->cost;
445 data->depends.safe_push (def_stmt);
447 return true;
450 /* Returns an estimate for a cost of statement STMT. The values here
451 are just ad-hoc constants, similar to costs for inlining. */
453 static unsigned
454 stmt_cost (gimple *stmt)
456 /* Always try to create possibilities for unswitching. */
457 if (gimple_code (stmt) == GIMPLE_COND
458 || gimple_code (stmt) == GIMPLE_PHI)
459 return LIM_EXPENSIVE;
461 /* We should be hoisting calls if possible. */
462 if (is_gimple_call (stmt))
464 tree fndecl;
466 /* Unless the call is a builtin_constant_p; this always folds to a
467 constant, so moving it is useless. */
468 fndecl = gimple_call_fndecl (stmt);
469 if (fndecl
470 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
471 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
472 return 0;
474 return LIM_EXPENSIVE;
477 /* Hoisting memory references out should almost surely be a win. */
478 if (gimple_references_memory_p (stmt))
479 return LIM_EXPENSIVE;
481 if (gimple_code (stmt) != GIMPLE_ASSIGN)
482 return 1;
484 switch (gimple_assign_rhs_code (stmt))
486 case MULT_EXPR:
487 case WIDEN_MULT_EXPR:
488 case WIDEN_MULT_PLUS_EXPR:
489 case WIDEN_MULT_MINUS_EXPR:
490 case DOT_PROD_EXPR:
491 case FMA_EXPR:
492 case TRUNC_DIV_EXPR:
493 case CEIL_DIV_EXPR:
494 case FLOOR_DIV_EXPR:
495 case ROUND_DIV_EXPR:
496 case EXACT_DIV_EXPR:
497 case CEIL_MOD_EXPR:
498 case FLOOR_MOD_EXPR:
499 case ROUND_MOD_EXPR:
500 case TRUNC_MOD_EXPR:
501 case RDIV_EXPR:
502 /* Division and multiplication are usually expensive. */
503 return LIM_EXPENSIVE;
505 case LSHIFT_EXPR:
506 case RSHIFT_EXPR:
507 case WIDEN_LSHIFT_EXPR:
508 case LROTATE_EXPR:
509 case RROTATE_EXPR:
510 /* Shifts and rotates are usually expensive. */
511 return LIM_EXPENSIVE;
513 case CONSTRUCTOR:
514 /* Make vector construction cost proportional to the number
515 of elements. */
516 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
518 case SSA_NAME:
519 case PAREN_EXPR:
520 /* Whether or not something is wrapped inside a PAREN_EXPR
521 should not change move cost. Nor should an intermediate
522 unpropagated SSA name copy. */
523 return 0;
525 default:
526 return 1;
530 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
531 REF is independent. If REF is not independent in LOOP, NULL is returned
532 instead. */
534 static struct loop *
535 outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref)
537 struct loop *aloop;
539 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
540 return NULL;
542 for (aloop = outer;
543 aloop != loop;
544 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
545 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
546 && ref_indep_loop_p (aloop, ref))
547 return aloop;
549 if (ref_indep_loop_p (loop, ref))
550 return loop;
551 else
552 return NULL;
555 /* If there is a simple load or store to a memory reference in STMT, returns
556 the location of the memory reference, and sets IS_STORE according to whether
557 it is a store or load. Otherwise, returns NULL. */
559 static tree *
560 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
562 tree *lhs, *rhs;
564 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
565 if (!gimple_assign_single_p (stmt))
566 return NULL;
568 lhs = gimple_assign_lhs_ptr (stmt);
569 rhs = gimple_assign_rhs1_ptr (stmt);
571 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
573 *is_store = false;
574 return rhs;
576 else if (gimple_vdef (stmt)
577 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
579 *is_store = true;
580 return lhs;
582 else
583 return NULL;
586 /* Returns the memory reference contained in STMT. */
588 static im_mem_ref *
589 mem_ref_in_stmt (gimple *stmt)
591 bool store;
592 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
593 hashval_t hash;
594 im_mem_ref *ref;
596 if (!mem)
597 return NULL;
598 gcc_assert (!store);
600 hash = iterative_hash_expr (*mem, 0);
601 ref = memory_accesses.refs->find_with_hash (*mem, hash);
603 gcc_assert (ref != NULL);
604 return ref;
607 /* From a controlling predicate in DOM determine the arguments from
608 the PHI node PHI that are chosen if the predicate evaluates to
609 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
610 they are non-NULL. Returns true if the arguments can be determined,
611 else return false. */
613 static bool
614 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
615 tree *true_arg_p, tree *false_arg_p)
617 edge te, fe;
618 if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
619 &te, &fe))
620 return false;
622 if (true_arg_p)
623 *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
624 if (false_arg_p)
625 *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
627 return true;
630 /* Determine the outermost loop to that it is possible to hoist a statement
631 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
632 the outermost loop in that the value computed by STMT is invariant.
633 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
634 we preserve the fact whether STMT is executed. It also fills other related
635 information to LIM_DATA (STMT).
637 The function returns false if STMT cannot be hoisted outside of the loop it
638 is defined in, and true otherwise. */
640 static bool
641 determine_max_movement (gimple *stmt, bool must_preserve_exec)
643 basic_block bb = gimple_bb (stmt);
644 struct loop *loop = bb->loop_father;
645 struct loop *level;
646 struct lim_aux_data *lim_data = get_lim_data (stmt);
647 tree val;
648 ssa_op_iter iter;
650 if (must_preserve_exec)
651 level = ALWAYS_EXECUTED_IN (bb);
652 else
653 level = superloop_at_depth (loop, 1);
654 lim_data->max_loop = level;
656 if (gphi *phi = dyn_cast <gphi *> (stmt))
658 use_operand_p use_p;
659 unsigned min_cost = UINT_MAX;
660 unsigned total_cost = 0;
661 struct lim_aux_data *def_data;
663 /* We will end up promoting dependencies to be unconditionally
664 evaluated. For this reason the PHI cost (and thus the
665 cost we remove from the loop by doing the invariant motion)
666 is that of the cheapest PHI argument dependency chain. */
667 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
669 val = USE_FROM_PTR (use_p);
671 if (TREE_CODE (val) != SSA_NAME)
673 /* Assign const 1 to constants. */
674 min_cost = MIN (min_cost, 1);
675 total_cost += 1;
676 continue;
678 if (!add_dependency (val, lim_data, loop, false))
679 return false;
681 gimple *def_stmt = SSA_NAME_DEF_STMT (val);
682 if (gimple_bb (def_stmt)
683 && gimple_bb (def_stmt)->loop_father == loop)
685 def_data = get_lim_data (def_stmt);
686 if (def_data)
688 min_cost = MIN (min_cost, def_data->cost);
689 total_cost += def_data->cost;
694 min_cost = MIN (min_cost, total_cost);
695 lim_data->cost += min_cost;
697 if (gimple_phi_num_args (phi) > 1)
699 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
700 gimple *cond;
701 if (gsi_end_p (gsi_last_bb (dom)))
702 return false;
703 cond = gsi_stmt (gsi_last_bb (dom));
704 if (gimple_code (cond) != GIMPLE_COND)
705 return false;
706 /* Verify that this is an extended form of a diamond and
707 the PHI arguments are completely controlled by the
708 predicate in DOM. */
709 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
710 return false;
712 /* Fold in dependencies and cost of the condition. */
713 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
715 if (!add_dependency (val, lim_data, loop, false))
716 return false;
717 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
718 if (def_data)
719 total_cost += def_data->cost;
722 /* We want to avoid unconditionally executing very expensive
723 operations. As costs for our dependencies cannot be
724 negative just claim we are not invariand for this case.
725 We also are not sure whether the control-flow inside the
726 loop will vanish. */
727 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
728 && !(min_cost != 0
729 && total_cost / min_cost <= 2))
730 return false;
732 /* Assume that the control-flow in the loop will vanish.
733 ??? We should verify this and not artificially increase
734 the cost if that is not the case. */
735 lim_data->cost += stmt_cost (stmt);
738 return true;
740 else
741 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
742 if (!add_dependency (val, lim_data, loop, true))
743 return false;
745 if (gimple_vuse (stmt))
747 im_mem_ref *ref = mem_ref_in_stmt (stmt);
749 if (ref)
751 lim_data->max_loop
752 = outermost_indep_loop (lim_data->max_loop, loop, ref);
753 if (!lim_data->max_loop)
754 return false;
756 else
758 if ((val = gimple_vuse (stmt)) != NULL_TREE)
760 if (!add_dependency (val, lim_data, loop, false))
761 return false;
766 lim_data->cost += stmt_cost (stmt);
768 return true;
771 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
772 and that one of the operands of this statement is computed by STMT.
773 Ensure that STMT (together with all the statements that define its
774 operands) is hoisted at least out of the loop LEVEL. */
776 static void
777 set_level (gimple *stmt, struct loop *orig_loop, struct loop *level)
779 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
780 struct lim_aux_data *lim_data;
781 gimple *dep_stmt;
782 unsigned i;
784 stmt_loop = find_common_loop (orig_loop, stmt_loop);
785 lim_data = get_lim_data (stmt);
786 if (lim_data != NULL && lim_data->tgt_loop != NULL)
787 stmt_loop = find_common_loop (stmt_loop,
788 loop_outer (lim_data->tgt_loop));
789 if (flow_loop_nested_p (stmt_loop, level))
790 return;
792 gcc_assert (level == lim_data->max_loop
793 || flow_loop_nested_p (lim_data->max_loop, level));
795 lim_data->tgt_loop = level;
796 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
797 set_level (dep_stmt, orig_loop, level);
800 /* Determines an outermost loop from that we want to hoist the statement STMT.
801 For now we chose the outermost possible loop. TODO -- use profiling
802 information to set it more sanely. */
804 static void
805 set_profitable_level (gimple *stmt)
807 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
810 /* Returns true if STMT is a call that has side effects. */
812 static bool
813 nonpure_call_p (gimple *stmt)
815 if (gimple_code (stmt) != GIMPLE_CALL)
816 return false;
818 return gimple_has_side_effects (stmt);
821 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
823 static gimple *
824 rewrite_reciprocal (gimple_stmt_iterator *bsi)
826 gassign *stmt, *stmt1, *stmt2;
827 tree name, lhs, type;
828 tree real_one;
829 gimple_stmt_iterator gsi;
831 stmt = as_a <gassign *> (gsi_stmt (*bsi));
832 lhs = gimple_assign_lhs (stmt);
833 type = TREE_TYPE (lhs);
835 real_one = build_one_cst (type);
837 name = make_temp_ssa_name (type, NULL, "reciptmp");
838 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
839 gimple_assign_rhs2 (stmt));
840 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
841 gimple_assign_rhs1 (stmt));
843 /* Replace division stmt with reciprocal and multiply stmts.
844 The multiply stmt is not invariant, so update iterator
845 and avoid rescanning. */
846 gsi = *bsi;
847 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
848 gsi_replace (&gsi, stmt2, true);
850 /* Continue processing with invariant reciprocal statement. */
851 return stmt1;
854 /* Check if the pattern at *BSI is a bittest of the form
855 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
857 static gimple *
858 rewrite_bittest (gimple_stmt_iterator *bsi)
860 gassign *stmt;
861 gimple *stmt1;
862 gassign *stmt2;
863 gimple *use_stmt;
864 gcond *cond_stmt;
865 tree lhs, name, t, a, b;
866 use_operand_p use;
868 stmt = as_a <gassign *> (gsi_stmt (*bsi));
869 lhs = gimple_assign_lhs (stmt);
871 /* Verify that the single use of lhs is a comparison against zero. */
872 if (TREE_CODE (lhs) != SSA_NAME
873 || !single_imm_use (lhs, &use, &use_stmt))
874 return stmt;
875 cond_stmt = dyn_cast <gcond *> (use_stmt);
876 if (!cond_stmt)
877 return stmt;
878 if (gimple_cond_lhs (cond_stmt) != lhs
879 || (gimple_cond_code (cond_stmt) != NE_EXPR
880 && gimple_cond_code (cond_stmt) != EQ_EXPR)
881 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
882 return stmt;
884 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
885 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
886 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
887 return stmt;
889 /* There is a conversion in between possibly inserted by fold. */
890 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
892 t = gimple_assign_rhs1 (stmt1);
893 if (TREE_CODE (t) != SSA_NAME
894 || !has_single_use (t))
895 return stmt;
896 stmt1 = SSA_NAME_DEF_STMT (t);
897 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
898 return stmt;
901 /* Verify that B is loop invariant but A is not. Verify that with
902 all the stmt walking we are still in the same loop. */
903 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
904 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
905 return stmt;
907 a = gimple_assign_rhs1 (stmt1);
908 b = gimple_assign_rhs2 (stmt1);
910 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
911 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
913 gimple_stmt_iterator rsi;
915 /* 1 << B */
916 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
917 build_int_cst (TREE_TYPE (a), 1), b);
918 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
919 stmt1 = gimple_build_assign (name, t);
921 /* A & (1 << B) */
922 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
923 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
924 stmt2 = gimple_build_assign (name, t);
926 /* Replace the SSA_NAME we compare against zero. Adjust
927 the type of zero accordingly. */
928 SET_USE (use, name);
929 gimple_cond_set_rhs (cond_stmt,
930 build_int_cst_type (TREE_TYPE (name),
931 0));
933 /* Don't use gsi_replace here, none of the new assignments sets
934 the variable originally set in stmt. Move bsi to stmt1, and
935 then remove the original stmt, so that we get a chance to
936 retain debug info for it. */
937 rsi = *bsi;
938 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
939 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
940 gimple *to_release = gsi_stmt (rsi);
941 gsi_remove (&rsi, true);
942 release_defs (to_release);
944 return stmt1;
947 return stmt;
950 /* For each statement determines the outermost loop in that it is invariant,
951 - statements on whose motion it depends and the cost of the computation.
952 - This information is stored to the LIM_DATA structure associated with
953 - each statement. */
954 class invariantness_dom_walker : public dom_walker
956 public:
957 invariantness_dom_walker (cdi_direction direction)
958 : dom_walker (direction) {}
960 virtual void before_dom_children (basic_block);
963 /* Determine the outermost loops in that statements in basic block BB are
964 invariant, and record them to the LIM_DATA associated with the statements.
965 Callback for dom_walker. */
967 void
968 invariantness_dom_walker::before_dom_children (basic_block bb)
970 enum move_pos pos;
971 gimple_stmt_iterator bsi;
972 gimple *stmt;
973 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
974 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
975 struct lim_aux_data *lim_data;
977 if (!loop_outer (bb->loop_father))
978 return;
980 if (dump_file && (dump_flags & TDF_DETAILS))
981 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
982 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
984 /* Look at PHI nodes, but only if there is at most two.
985 ??? We could relax this further by post-processing the inserted
986 code and transforming adjacent cond-exprs with the same predicate
987 to control flow again. */
988 bsi = gsi_start_phis (bb);
989 if (!gsi_end_p (bsi)
990 && ((gsi_next (&bsi), gsi_end_p (bsi))
991 || (gsi_next (&bsi), gsi_end_p (bsi))))
992 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
994 stmt = gsi_stmt (bsi);
996 pos = movement_possibility (stmt);
997 if (pos == MOVE_IMPOSSIBLE)
998 continue;
1000 lim_data = init_lim_data (stmt);
1001 lim_data->always_executed_in = outermost;
1003 if (!determine_max_movement (stmt, false))
1005 lim_data->max_loop = NULL;
1006 continue;
1009 if (dump_file && (dump_flags & TDF_DETAILS))
1011 print_gimple_stmt (dump_file, stmt, 2, 0);
1012 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1013 loop_depth (lim_data->max_loop),
1014 lim_data->cost);
1017 if (lim_data->cost >= LIM_EXPENSIVE)
1018 set_profitable_level (stmt);
1021 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1023 stmt = gsi_stmt (bsi);
1025 pos = movement_possibility (stmt);
1026 if (pos == MOVE_IMPOSSIBLE)
1028 if (nonpure_call_p (stmt))
1030 maybe_never = true;
1031 outermost = NULL;
1033 /* Make sure to note always_executed_in for stores to make
1034 store-motion work. */
1035 else if (stmt_makes_single_store (stmt))
1037 struct lim_aux_data *lim_data = init_lim_data (stmt);
1038 lim_data->always_executed_in = outermost;
1040 continue;
1043 if (is_gimple_assign (stmt)
1044 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1045 == GIMPLE_BINARY_RHS))
1047 tree op0 = gimple_assign_rhs1 (stmt);
1048 tree op1 = gimple_assign_rhs2 (stmt);
1049 struct loop *ol1 = outermost_invariant_loop (op1,
1050 loop_containing_stmt (stmt));
1052 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1053 to be hoisted out of loop, saving expensive divide. */
1054 if (pos == MOVE_POSSIBLE
1055 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1056 && flag_unsafe_math_optimizations
1057 && !flag_trapping_math
1058 && ol1 != NULL
1059 && outermost_invariant_loop (op0, ol1) == NULL)
1060 stmt = rewrite_reciprocal (&bsi);
1062 /* If the shift count is invariant, convert (A >> B) & 1 to
1063 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1064 saving an expensive shift. */
1065 if (pos == MOVE_POSSIBLE
1066 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1067 && integer_onep (op1)
1068 && TREE_CODE (op0) == SSA_NAME
1069 && has_single_use (op0))
1070 stmt = rewrite_bittest (&bsi);
1073 lim_data = init_lim_data (stmt);
1074 lim_data->always_executed_in = outermost;
1076 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1077 continue;
1079 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1081 lim_data->max_loop = NULL;
1082 continue;
1085 if (dump_file && (dump_flags & TDF_DETAILS))
1087 print_gimple_stmt (dump_file, stmt, 2, 0);
1088 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1089 loop_depth (lim_data->max_loop),
1090 lim_data->cost);
1093 if (lim_data->cost >= LIM_EXPENSIVE)
1094 set_profitable_level (stmt);
1098 class move_computations_dom_walker : public dom_walker
1100 public:
1101 move_computations_dom_walker (cdi_direction direction)
1102 : dom_walker (direction), todo_ (0) {}
1104 virtual void before_dom_children (basic_block);
1106 unsigned int todo_;
1109 /* Hoist the statements in basic block BB out of the loops prescribed by
1110 data stored in LIM_DATA structures associated with each statement. Callback
1111 for walk_dominator_tree. */
1113 void
1114 move_computations_dom_walker::before_dom_children (basic_block bb)
1116 struct loop *level;
1117 unsigned cost = 0;
1118 struct lim_aux_data *lim_data;
1120 if (!loop_outer (bb->loop_father))
1121 return;
1123 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1125 gassign *new_stmt;
1126 gphi *stmt = bsi.phi ();
1128 lim_data = get_lim_data (stmt);
1129 if (lim_data == NULL)
1131 gsi_next (&bsi);
1132 continue;
1135 cost = lim_data->cost;
1136 level = lim_data->tgt_loop;
1137 clear_lim_data (stmt);
1139 if (!level)
1141 gsi_next (&bsi);
1142 continue;
1145 if (dump_file && (dump_flags & TDF_DETAILS))
1147 fprintf (dump_file, "Moving PHI node\n");
1148 print_gimple_stmt (dump_file, stmt, 0, 0);
1149 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1150 cost, level->num);
1153 if (gimple_phi_num_args (stmt) == 1)
1155 tree arg = PHI_ARG_DEF (stmt, 0);
1156 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1157 TREE_CODE (arg), arg);
1159 else
1161 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1162 gimple *cond = gsi_stmt (gsi_last_bb (dom));
1163 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1164 /* Get the PHI arguments corresponding to the true and false
1165 edges of COND. */
1166 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1167 gcc_assert (arg0 && arg1);
1168 t = build2 (gimple_cond_code (cond), boolean_type_node,
1169 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1170 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1171 COND_EXPR, t, arg0, arg1);
1172 todo_ |= TODO_cleanup_cfg;
1174 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1175 && (!ALWAYS_EXECUTED_IN (bb)
1176 || (ALWAYS_EXECUTED_IN (bb) != level
1177 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1179 tree lhs = gimple_assign_lhs (new_stmt);
1180 SSA_NAME_RANGE_INFO (lhs) = NULL;
1182 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1183 remove_phi_node (&bsi, false);
1186 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1188 edge e;
1190 gimple *stmt = gsi_stmt (bsi);
1192 lim_data = get_lim_data (stmt);
1193 if (lim_data == NULL)
1195 gsi_next (&bsi);
1196 continue;
1199 cost = lim_data->cost;
1200 level = lim_data->tgt_loop;
1201 clear_lim_data (stmt);
1203 if (!level)
1205 gsi_next (&bsi);
1206 continue;
1209 /* We do not really want to move conditionals out of the loop; we just
1210 placed it here to force its operands to be moved if necessary. */
1211 if (gimple_code (stmt) == GIMPLE_COND)
1212 continue;
1214 if (dump_file && (dump_flags & TDF_DETAILS))
1216 fprintf (dump_file, "Moving statement\n");
1217 print_gimple_stmt (dump_file, stmt, 0, 0);
1218 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1219 cost, level->num);
1222 e = loop_preheader_edge (level);
1223 gcc_assert (!gimple_vdef (stmt));
1224 if (gimple_vuse (stmt))
1226 /* The new VUSE is the one from the virtual PHI in the loop
1227 header or the one already present. */
1228 gphi_iterator gsi2;
1229 for (gsi2 = gsi_start_phis (e->dest);
1230 !gsi_end_p (gsi2); gsi_next (&gsi2))
1232 gphi *phi = gsi2.phi ();
1233 if (virtual_operand_p (gimple_phi_result (phi)))
1235 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1236 break;
1240 gsi_remove (&bsi, false);
1241 if (gimple_has_lhs (stmt)
1242 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1243 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1244 && (!ALWAYS_EXECUTED_IN (bb)
1245 || !(ALWAYS_EXECUTED_IN (bb) == level
1246 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1248 tree lhs = gimple_get_lhs (stmt);
1249 SSA_NAME_RANGE_INFO (lhs) = NULL;
1251 /* In case this is a stmt that is not unconditionally executed
1252 when the target loop header is executed and the stmt may
1253 invoke undefined integer or pointer overflow rewrite it to
1254 unsigned arithmetic. */
1255 if (is_gimple_assign (stmt)
1256 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1257 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1258 && arith_code_with_undefined_signed_overflow
1259 (gimple_assign_rhs_code (stmt))
1260 && (!ALWAYS_EXECUTED_IN (bb)
1261 || !(ALWAYS_EXECUTED_IN (bb) == level
1262 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1263 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1264 else
1265 gsi_insert_on_edge (e, stmt);
1269 /* Hoist the statements out of the loops prescribed by data stored in
1270 LIM_DATA structures associated with each statement.*/
1272 static unsigned int
1273 move_computations (void)
1275 move_computations_dom_walker walker (CDI_DOMINATORS);
1276 walker.walk (cfun->cfg->x_entry_block_ptr);
1278 gsi_commit_edge_inserts ();
1279 if (need_ssa_update_p (cfun))
1280 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1282 return walker.todo_;
1285 /* Checks whether the statement defining variable *INDEX can be hoisted
1286 out of the loop passed in DATA. Callback for for_each_index. */
1288 static bool
1289 may_move_till (tree ref, tree *index, void *data)
1291 struct loop *loop = (struct loop *) data, *max_loop;
1293 /* If REF is an array reference, check also that the step and the lower
1294 bound is invariant in LOOP. */
1295 if (TREE_CODE (ref) == ARRAY_REF)
1297 tree step = TREE_OPERAND (ref, 3);
1298 tree lbound = TREE_OPERAND (ref, 2);
1300 max_loop = outermost_invariant_loop (step, loop);
1301 if (!max_loop)
1302 return false;
1304 max_loop = outermost_invariant_loop (lbound, loop);
1305 if (!max_loop)
1306 return false;
1309 max_loop = outermost_invariant_loop (*index, loop);
1310 if (!max_loop)
1311 return false;
1313 return true;
1316 /* If OP is SSA NAME, force the statement that defines it to be
1317 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1319 static void
1320 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1322 gimple *stmt;
1324 if (!op
1325 || is_gimple_min_invariant (op))
1326 return;
1328 gcc_assert (TREE_CODE (op) == SSA_NAME);
1330 stmt = SSA_NAME_DEF_STMT (op);
1331 if (gimple_nop_p (stmt))
1332 return;
1334 set_level (stmt, orig_loop, loop);
1337 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1338 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1339 for_each_index. */
1341 struct fmt_data
1343 struct loop *loop;
1344 struct loop *orig_loop;
1347 static bool
1348 force_move_till (tree ref, tree *index, void *data)
1350 struct fmt_data *fmt_data = (struct fmt_data *) data;
1352 if (TREE_CODE (ref) == ARRAY_REF)
1354 tree step = TREE_OPERAND (ref, 3);
1355 tree lbound = TREE_OPERAND (ref, 2);
1357 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1358 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1361 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1363 return true;
1366 /* A function to free the mem_ref object OBJ. */
1368 static void
1369 memref_free (struct im_mem_ref *mem)
1371 mem->accesses_in_loop.release ();
1374 /* Allocates and returns a memory reference description for MEM whose hash
1375 value is HASH and id is ID. */
1377 static im_mem_ref *
1378 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1380 im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1381 ao_ref_init (&ref->mem, mem);
1382 ref->id = id;
1383 ref->hash = hash;
1384 ref->stored = NULL;
1385 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1386 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1387 ref->accesses_in_loop.create (1);
1389 return ref;
1392 /* Records memory reference location *LOC in LOOP to the memory reference
1393 description REF. The reference occurs in statement STMT. */
1395 static void
1396 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1398 mem_ref_loc aref;
1399 aref.stmt = stmt;
1400 aref.ref = loc;
1401 ref->accesses_in_loop.safe_push (aref);
1404 /* Set the LOOP bit in REF stored bitmap and allocate that if
1405 necessary. Return whether a bit was changed. */
1407 static bool
1408 set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
1410 if (!ref->stored)
1411 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1412 return bitmap_set_bit (ref->stored, loop->num);
1415 /* Marks reference REF as stored in LOOP. */
1417 static void
1418 mark_ref_stored (im_mem_ref *ref, struct loop *loop)
1420 while (loop != current_loops->tree_root
1421 && set_ref_stored_in_loop (ref, loop))
1422 loop = loop_outer (loop);
1425 /* Gathers memory references in statement STMT in LOOP, storing the
1426 information about them in the memory_accesses structure. Marks
1427 the vops accessed through unrecognized statements there as
1428 well. */
1430 static void
1431 gather_mem_refs_stmt (struct loop *loop, gimple *stmt)
1433 tree *mem = NULL;
1434 hashval_t hash;
1435 im_mem_ref **slot;
1436 im_mem_ref *ref;
1437 bool is_stored;
1438 unsigned id;
1440 if (!gimple_vuse (stmt))
1441 return;
1443 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1444 if (!mem)
1446 /* We use the shared mem_ref for all unanalyzable refs. */
1447 id = UNANALYZABLE_MEM_ID;
1448 ref = memory_accesses.refs_list[id];
1449 if (dump_file && (dump_flags & TDF_DETAILS))
1451 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1452 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1454 is_stored = gimple_vdef (stmt);
1456 else
1458 hash = iterative_hash_expr (*mem, 0);
1459 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1460 if (*slot)
1462 ref = *slot;
1463 id = ref->id;
1465 else
1467 id = memory_accesses.refs_list.length ();
1468 ref = mem_ref_alloc (*mem, hash, id);
1469 memory_accesses.refs_list.safe_push (ref);
1470 *slot = ref;
1472 if (dump_file && (dump_flags & TDF_DETAILS))
1474 fprintf (dump_file, "Memory reference %u: ", id);
1475 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1476 fprintf (dump_file, "\n");
1480 record_mem_ref_loc (ref, stmt, mem);
1482 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1483 if (is_stored)
1485 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1486 mark_ref_stored (ref, loop);
1488 return;
1491 static unsigned *bb_loop_postorder;
1493 /* qsort sort function to sort blocks after their loop fathers postorder. */
1495 static int
1496 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1498 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1499 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1500 struct loop *loop1 = bb1->loop_father;
1501 struct loop *loop2 = bb2->loop_father;
1502 if (loop1->num == loop2->num)
1503 return 0;
1504 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1507 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1509 static int
1510 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1512 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1513 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1514 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1515 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1516 if (loop1->num == loop2->num)
1517 return 0;
1518 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1521 /* Gathers memory references in loops. */
1523 static void
1524 analyze_memory_references (void)
1526 gimple_stmt_iterator bsi;
1527 basic_block bb, *bbs;
1528 struct loop *loop, *outer;
1529 unsigned i, n;
1531 /* Collect all basic-blocks in loops and sort them after their
1532 loops postorder. */
1533 i = 0;
1534 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1535 FOR_EACH_BB_FN (bb, cfun)
1536 if (bb->loop_father != current_loops->tree_root)
1537 bbs[i++] = bb;
1538 n = i;
1539 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1541 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1542 That results in better locality for all the bitmaps. */
1543 for (i = 0; i < n; ++i)
1545 basic_block bb = bbs[i];
1546 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1547 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1550 /* Sort the location list of gathered memory references after their
1551 loop postorder number. */
1552 im_mem_ref *ref;
1553 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1554 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1556 free (bbs);
1557 // free (bb_loop_postorder);
1559 /* Propagate the information about accessed memory references up
1560 the loop hierarchy. */
1561 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1563 /* Finalize the overall touched references (including subloops). */
1564 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1565 &memory_accesses.refs_stored_in_loop[loop->num]);
1567 /* Propagate the information about accessed memory references up
1568 the loop hierarchy. */
1569 outer = loop_outer (loop);
1570 if (outer == current_loops->tree_root)
1571 continue;
1573 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1574 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1578 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1579 tree_to_aff_combination_expand. */
1581 static bool
1582 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1583 hash_map<tree, name_expansion *> **ttae_cache)
1585 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1586 object and their offset differ in such a way that the locations cannot
1587 overlap, then they cannot alias. */
1588 widest_int size1, size2;
1589 aff_tree off1, off2;
1591 /* Perform basic offset and type-based disambiguation. */
1592 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1593 return false;
1595 /* The expansion of addresses may be a bit expensive, thus we only do
1596 the check at -O2 and higher optimization levels. */
1597 if (optimize < 2)
1598 return true;
1600 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1601 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1602 aff_combination_expand (&off1, ttae_cache);
1603 aff_combination_expand (&off2, ttae_cache);
1604 aff_combination_scale (&off1, -1);
1605 aff_combination_add (&off2, &off1);
1607 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1608 return false;
1610 return true;
1613 /* Compare function for bsearch searching for reference locations
1614 in a loop. */
1616 static int
1617 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1619 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1620 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1621 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1622 if (loop->num == loc_loop->num
1623 || flow_loop_nested_p (loop, loc_loop))
1624 return 0;
1625 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1626 ? -1 : 1);
1629 /* Iterates over all locations of REF in LOOP and its subloops calling
1630 fn.operator() with the location as argument. When that operator
1631 returns true the iteration is stopped and true is returned.
1632 Otherwise false is returned. */
1634 template <typename FN>
1635 static bool
1636 for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn)
1638 unsigned i;
1639 mem_ref_loc *loc;
1641 /* Search for the cluster of locs in the accesses_in_loop vector
1642 which is sorted after postorder index of the loop father. */
1643 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1644 if (!loc)
1645 return false;
1647 /* We have found one location inside loop or its sub-loops. Iterate
1648 both forward and backward to cover the whole cluster. */
1649 i = loc - ref->accesses_in_loop.address ();
1650 while (i > 0)
1652 --i;
1653 mem_ref_loc *l = &ref->accesses_in_loop[i];
1654 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1655 break;
1656 if (fn (l))
1657 return true;
1659 for (i = loc - ref->accesses_in_loop.address ();
1660 i < ref->accesses_in_loop.length (); ++i)
1662 mem_ref_loc *l = &ref->accesses_in_loop[i];
1663 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1664 break;
1665 if (fn (l))
1666 return true;
1669 return false;
1672 /* Rewrites location LOC by TMP_VAR. */
1674 struct rewrite_mem_ref_loc
1676 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1677 bool operator () (mem_ref_loc *loc);
1678 tree tmp_var;
1681 bool
1682 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1684 *loc->ref = tmp_var;
1685 update_stmt (loc->stmt);
1686 return false;
1689 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1691 static void
1692 rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var)
1694 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1697 /* Stores the first reference location in LOCP. */
1699 struct first_mem_ref_loc_1
1701 first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1702 bool operator () (mem_ref_loc *loc);
1703 mem_ref_loc **locp;
1706 bool
1707 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1709 *locp = loc;
1710 return true;
1713 /* Returns the first reference location to REF in LOOP. */
1715 static mem_ref_loc *
1716 first_mem_ref_loc (struct loop *loop, im_mem_ref *ref)
1718 mem_ref_loc *locp = NULL;
1719 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1720 return locp;
1723 struct prev_flag_edges {
1724 /* Edge to insert new flag comparison code. */
1725 edge append_cond_position;
1727 /* Edge for fall through from previous flag comparison. */
1728 edge last_cond_fallthru;
1731 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1732 MEM along edge EX.
1734 The store is only done if MEM has changed. We do this so no
1735 changes to MEM occur on code paths that did not originally store
1736 into it.
1738 The common case for execute_sm will transform:
1740 for (...) {
1741 if (foo)
1742 stuff;
1743 else
1744 MEM = TMP_VAR;
1747 into:
1749 lsm = MEM;
1750 for (...) {
1751 if (foo)
1752 stuff;
1753 else
1754 lsm = TMP_VAR;
1756 MEM = lsm;
1758 This function will generate:
1760 lsm = MEM;
1762 lsm_flag = false;
1764 for (...) {
1765 if (foo)
1766 stuff;
1767 else {
1768 lsm = TMP_VAR;
1769 lsm_flag = true;
1772 if (lsm_flag) <--
1773 MEM = lsm; <--
1776 static void
1777 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1779 basic_block new_bb, then_bb, old_dest;
1780 bool loop_has_only_one_exit;
1781 edge then_old_edge, orig_ex = ex;
1782 gimple_stmt_iterator gsi;
1783 gimple *stmt;
1784 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1785 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1787 /* ?? Insert store after previous store if applicable. See note
1788 below. */
1789 if (prev_edges)
1790 ex = prev_edges->append_cond_position;
1792 loop_has_only_one_exit = single_pred_p (ex->dest);
1794 if (loop_has_only_one_exit)
1795 ex = split_block_after_labels (ex->dest);
1796 else
1798 for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1799 !gsi_end_p (gpi); gsi_next (&gpi))
1801 gphi *phi = gpi.phi ();
1802 if (virtual_operand_p (gimple_phi_result (phi)))
1803 continue;
1805 /* When the destination has a non-virtual PHI node with multiple
1806 predecessors make sure we preserve the PHI structure by
1807 forcing a forwarder block so that hoisting of that PHI will
1808 still work. */
1809 split_edge (ex);
1810 break;
1814 old_dest = ex->dest;
1815 new_bb = split_edge (ex);
1816 then_bb = create_empty_bb (new_bb);
1817 if (irr)
1818 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1819 add_bb_to_loop (then_bb, new_bb->loop_father);
1821 gsi = gsi_start_bb (new_bb);
1822 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1823 NULL_TREE, NULL_TREE);
1824 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1826 gsi = gsi_start_bb (then_bb);
1827 /* Insert actual store. */
1828 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1829 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1831 make_edge (new_bb, then_bb,
1832 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1833 make_edge (new_bb, old_dest,
1834 EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1835 then_old_edge = make_edge (then_bb, old_dest,
1836 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1838 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1840 if (prev_edges)
1842 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1843 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1844 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1845 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1846 recompute_dominator (CDI_DOMINATORS, old_dest));
1849 /* ?? Because stores may alias, they must happen in the exact
1850 sequence they originally happened. Save the position right after
1851 the (_lsm) store we just created so we can continue appending after
1852 it and maintain the original order. */
1854 struct prev_flag_edges *p;
1856 if (orig_ex->aux)
1857 orig_ex->aux = NULL;
1858 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1859 p = (struct prev_flag_edges *) orig_ex->aux;
1860 p->append_cond_position = then_old_edge;
1861 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1862 orig_ex->aux = (void *) p;
1865 if (!loop_has_only_one_exit)
1866 for (gphi_iterator gpi = gsi_start_phis (old_dest);
1867 !gsi_end_p (gpi); gsi_next (&gpi))
1869 gphi *phi = gpi.phi ();
1870 unsigned i;
1872 for (i = 0; i < gimple_phi_num_args (phi); i++)
1873 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1875 tree arg = gimple_phi_arg_def (phi, i);
1876 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1877 update_stmt (phi);
1880 /* Remove the original fall through edge. This was the
1881 single_succ_edge (new_bb). */
1882 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1885 /* When REF is set on the location, set flag indicating the store. */
1887 struct sm_set_flag_if_changed
1889 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1890 bool operator () (mem_ref_loc *loc);
1891 tree flag;
1894 bool
1895 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
1897 /* Only set the flag for writes. */
1898 if (is_gimple_assign (loc->stmt)
1899 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1901 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1902 gimple *stmt = gimple_build_assign (flag, boolean_true_node);
1903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1905 return false;
1908 /* Helper function for execute_sm. On every location where REF is
1909 set, set an appropriate flag indicating the store. */
1911 static tree
1912 execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref)
1914 tree flag;
1915 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1916 flag = create_tmp_reg (boolean_type_node, str);
1917 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1918 return flag;
1921 /* Executes store motion of memory reference REF from LOOP.
1922 Exits from the LOOP are stored in EXITS. The initialization of the
1923 temporary variable is put to the preheader of the loop, and assignments
1924 to the reference from the temporary variable are emitted to exits. */
1926 static void
1927 execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
1929 tree tmp_var, store_flag = NULL_TREE;
1930 unsigned i;
1931 gassign *load;
1932 struct fmt_data fmt_data;
1933 edge ex;
1934 struct lim_aux_data *lim_data;
1935 bool multi_threaded_model_p = false;
1936 gimple_stmt_iterator gsi;
1938 if (dump_file && (dump_flags & TDF_DETAILS))
1940 fprintf (dump_file, "Executing store motion of ");
1941 print_generic_expr (dump_file, ref->mem.ref, 0);
1942 fprintf (dump_file, " from loop %d\n", loop->num);
1945 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1946 get_lsm_tmp_name (ref->mem.ref, ~0));
1948 fmt_data.loop = loop;
1949 fmt_data.orig_loop = loop;
1950 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1952 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1953 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1954 multi_threaded_model_p = true;
1956 if (multi_threaded_model_p)
1957 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1959 rewrite_mem_refs (loop, ref, tmp_var);
1961 /* Emit the load code on a random exit edge or into the latch if
1962 the loop does not exit, so that we are sure it will be processed
1963 by move_computations after all dependencies. */
1964 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1966 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1967 load altogether, since the store is predicated by a flag. We
1968 could, do the load only if it was originally in the loop. */
1969 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1970 lim_data = init_lim_data (load);
1971 lim_data->max_loop = loop;
1972 lim_data->tgt_loop = loop;
1973 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1975 if (multi_threaded_model_p)
1977 load = gimple_build_assign (store_flag, boolean_false_node);
1978 lim_data = init_lim_data (load);
1979 lim_data->max_loop = loop;
1980 lim_data->tgt_loop = loop;
1981 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1984 /* Sink the store to every exit from the loop. */
1985 FOR_EACH_VEC_ELT (exits, i, ex)
1986 if (!multi_threaded_model_p)
1988 gassign *store;
1989 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
1990 gsi_insert_on_edge (ex, store);
1992 else
1993 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
1996 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
1997 edges of the LOOP. */
1999 static void
2000 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2001 vec<edge> exits)
2003 im_mem_ref *ref;
2004 unsigned i;
2005 bitmap_iterator bi;
2007 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2009 ref = memory_accesses.refs_list[i];
2010 execute_sm (loop, exits, ref);
2014 struct ref_always_accessed
2016 ref_always_accessed (struct loop *loop_, bool stored_p_)
2017 : loop (loop_), stored_p (stored_p_) {}
2018 bool operator () (mem_ref_loc *loc);
2019 struct loop *loop;
2020 bool stored_p;
2023 bool
2024 ref_always_accessed::operator () (mem_ref_loc *loc)
2026 struct loop *must_exec;
2028 if (!get_lim_data (loc->stmt))
2029 return false;
2031 /* If we require an always executed store make sure the statement
2032 stores to the reference. */
2033 if (stored_p)
2035 tree lhs = gimple_get_lhs (loc->stmt);
2036 if (!lhs
2037 || lhs != *loc->ref)
2038 return false;
2041 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2042 if (!must_exec)
2043 return false;
2045 if (must_exec == loop
2046 || flow_loop_nested_p (must_exec, loop))
2047 return true;
2049 return false;
2052 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2053 make sure REF is always stored to in LOOP. */
2055 static bool
2056 ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p)
2058 return for_all_locs_in_loop (loop, ref,
2059 ref_always_accessed (loop, stored_p));
2062 /* Returns true if REF1 and REF2 are independent. */
2064 static bool
2065 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2067 if (ref1 == ref2)
2068 return true;
2070 if (dump_file && (dump_flags & TDF_DETAILS))
2071 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2072 ref1->id, ref2->id);
2074 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2076 if (dump_file && (dump_flags & TDF_DETAILS))
2077 fprintf (dump_file, "dependent.\n");
2078 return false;
2080 else
2082 if (dump_file && (dump_flags & TDF_DETAILS))
2083 fprintf (dump_file, "independent.\n");
2084 return true;
2088 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2089 and its super-loops. */
2091 static void
2092 record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
2094 /* We can propagate dependent-in-loop bits up the loop
2095 hierarchy to all outer loops. */
2096 while (loop != current_loops->tree_root
2097 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2098 loop = loop_outer (loop);
2101 /* Returns true if REF is independent on all other memory references in
2102 LOOP. */
2104 static bool
2105 ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
2107 bitmap refs_to_check;
2108 unsigned i;
2109 bitmap_iterator bi;
2110 im_mem_ref *aref;
2112 if (stored_p)
2113 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2114 else
2115 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2117 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2118 return false;
2120 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2122 aref = memory_accesses.refs_list[i];
2123 if (!refs_independent_p (ref, aref))
2124 return false;
2127 return true;
2130 /* Returns true if REF is independent on all other memory references in
2131 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2133 static bool
2134 ref_indep_loop_p_2 (struct loop *loop, im_mem_ref *ref, bool stored_p)
2136 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2138 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2139 return true;
2140 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2141 return false;
2143 struct loop *inner = loop->inner;
2144 while (inner)
2146 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2147 return false;
2148 inner = inner->next;
2151 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2154 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2155 ref->id, loop->num, indep_p ? "independent" : "dependent");
2157 /* Record the computed result in the cache. */
2158 if (indep_p)
2160 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2161 && stored_p)
2163 /* If it's independend against all refs then it's independent
2164 against stores, too. */
2165 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2168 else
2170 record_dep_loop (loop, ref, stored_p);
2171 if (!stored_p)
2173 /* If it's dependent against stores it's dependent against
2174 all refs, too. */
2175 record_dep_loop (loop, ref, true);
2179 return indep_p;
2182 /* Returns true if REF is independent on all other memory references in
2183 LOOP. */
2185 static bool
2186 ref_indep_loop_p (struct loop *loop, im_mem_ref *ref)
2188 gcc_checking_assert (MEM_ANALYZABLE (ref));
2190 return ref_indep_loop_p_2 (loop, ref, false);
2193 /* Returns true if we can perform store motion of REF from LOOP. */
2195 static bool
2196 can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
2198 tree base;
2200 /* Can't hoist unanalyzable refs. */
2201 if (!MEM_ANALYZABLE (ref))
2202 return false;
2204 /* It should be movable. */
2205 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2206 || TREE_THIS_VOLATILE (ref->mem.ref)
2207 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2208 return false;
2210 /* If it can throw fail, we do not properly update EH info. */
2211 if (tree_could_throw_p (ref->mem.ref))
2212 return false;
2214 /* If it can trap, it must be always executed in LOOP.
2215 Readonly memory locations may trap when storing to them, but
2216 tree_could_trap_p is a predicate for rvalues, so check that
2217 explicitly. */
2218 base = get_base_address (ref->mem.ref);
2219 if ((tree_could_trap_p (ref->mem.ref)
2220 || (DECL_P (base) && TREE_READONLY (base)))
2221 && !ref_always_accessed_p (loop, ref, true))
2222 return false;
2224 /* And it must be independent on all other memory references
2225 in LOOP. */
2226 if (!ref_indep_loop_p (loop, ref))
2227 return false;
2229 return true;
2232 /* Marks the references in LOOP for that store motion should be performed
2233 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2234 motion was performed in one of the outer loops. */
2236 static void
2237 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2239 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2240 unsigned i;
2241 bitmap_iterator bi;
2242 im_mem_ref *ref;
2244 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2246 ref = memory_accesses.refs_list[i];
2247 if (can_sm_ref_p (loop, ref))
2248 bitmap_set_bit (refs_to_sm, i);
2252 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2253 for a store motion optimization (i.e. whether we can insert statement
2254 on its exits). */
2256 static bool
2257 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2258 vec<edge> exits)
2260 unsigned i;
2261 edge ex;
2263 FOR_EACH_VEC_ELT (exits, i, ex)
2264 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2265 return false;
2267 return true;
2270 /* Try to perform store motion for all memory references modified inside
2271 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2272 store motion was executed in one of the outer loops. */
2274 static void
2275 store_motion_loop (struct loop *loop, bitmap sm_executed)
2277 vec<edge> exits = get_loop_exit_edges (loop);
2278 struct loop *subloop;
2279 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2281 if (loop_suitable_for_sm (loop, exits))
2283 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2284 hoist_memory_references (loop, sm_in_loop, exits);
2286 exits.release ();
2288 bitmap_ior_into (sm_executed, sm_in_loop);
2289 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2290 store_motion_loop (subloop, sm_executed);
2291 bitmap_and_compl_into (sm_executed, sm_in_loop);
2292 BITMAP_FREE (sm_in_loop);
2295 /* Try to perform store motion for all memory references modified inside
2296 loops. */
2298 static void
2299 store_motion (void)
2301 struct loop *loop;
2302 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2304 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2305 store_motion_loop (loop, sm_executed);
2307 BITMAP_FREE (sm_executed);
2308 gsi_commit_edge_inserts ();
2311 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2312 for each such basic block bb records the outermost loop for that execution
2313 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2314 blocks that contain a nonpure call. */
2316 static void
2317 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2319 basic_block bb = NULL, *bbs, last = NULL;
2320 unsigned i;
2321 edge e;
2322 struct loop *inn_loop = loop;
2324 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2326 bbs = get_loop_body_in_dom_order (loop);
2328 for (i = 0; i < loop->num_nodes; i++)
2330 edge_iterator ei;
2331 bb = bbs[i];
2333 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2334 last = bb;
2336 if (bitmap_bit_p (contains_call, bb->index))
2337 break;
2339 FOR_EACH_EDGE (e, ei, bb->succs)
2340 if (!flow_bb_inside_loop_p (loop, e->dest))
2341 break;
2342 if (e)
2343 break;
2345 /* A loop might be infinite (TODO use simple loop analysis
2346 to disprove this if possible). */
2347 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2348 break;
2350 if (!flow_bb_inside_loop_p (inn_loop, bb))
2351 break;
2353 if (bb->loop_father->header == bb)
2355 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2356 break;
2358 /* In a loop that is always entered we may proceed anyway.
2359 But record that we entered it and stop once we leave it. */
2360 inn_loop = bb->loop_father;
2364 while (1)
2366 SET_ALWAYS_EXECUTED_IN (last, loop);
2367 if (last == loop->header)
2368 break;
2369 last = get_immediate_dominator (CDI_DOMINATORS, last);
2372 free (bbs);
2375 for (loop = loop->inner; loop; loop = loop->next)
2376 fill_always_executed_in_1 (loop, contains_call);
2379 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2380 for each such basic block bb records the outermost loop for that execution
2381 of its header implies execution of bb. */
2383 static void
2384 fill_always_executed_in (void)
2386 sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
2387 basic_block bb;
2388 struct loop *loop;
2390 bitmap_clear (contains_call);
2391 FOR_EACH_BB_FN (bb, cfun)
2393 gimple_stmt_iterator gsi;
2394 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2396 if (nonpure_call_p (gsi_stmt (gsi)))
2397 break;
2400 if (!gsi_end_p (gsi))
2401 bitmap_set_bit (contains_call, bb->index);
2404 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2405 fill_always_executed_in_1 (loop, contains_call);
2407 sbitmap_free (contains_call);
2411 /* Compute the global information needed by the loop invariant motion pass. */
2413 static void
2414 tree_ssa_lim_initialize (void)
2416 struct loop *loop;
2417 unsigned i;
2419 bitmap_obstack_initialize (&lim_bitmap_obstack);
2420 gcc_obstack_init (&mem_ref_obstack);
2421 lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2423 if (flag_tm)
2424 compute_transaction_bits ();
2426 alloc_aux_for_edges (0);
2428 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2429 memory_accesses.refs_list.create (100);
2430 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2431 memory_accesses.refs_list.quick_push
2432 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2434 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2435 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2436 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2437 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2438 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2439 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2441 for (i = 0; i < number_of_loops (cfun); i++)
2443 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2444 &lim_bitmap_obstack);
2445 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2446 &lim_bitmap_obstack);
2447 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2448 &lim_bitmap_obstack);
2451 memory_accesses.ttae_cache = NULL;
2453 /* Initialize bb_loop_postorder with a mapping from loop->num to
2454 its postorder index. */
2455 i = 0;
2456 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2457 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2458 bb_loop_postorder[loop->num] = i++;
2461 /* Cleans up after the invariant motion pass. */
2463 static void
2464 tree_ssa_lim_finalize (void)
2466 basic_block bb;
2467 unsigned i;
2468 im_mem_ref *ref;
2470 free_aux_for_edges ();
2472 FOR_EACH_BB_FN (bb, cfun)
2473 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2475 bitmap_obstack_release (&lim_bitmap_obstack);
2476 delete lim_aux_data_map;
2478 delete memory_accesses.refs;
2479 memory_accesses.refs = NULL;
2481 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2482 memref_free (ref);
2483 memory_accesses.refs_list.release ();
2484 obstack_free (&mem_ref_obstack, NULL);
2486 memory_accesses.refs_in_loop.release ();
2487 memory_accesses.refs_stored_in_loop.release ();
2488 memory_accesses.all_refs_stored_in_loop.release ();
2490 if (memory_accesses.ttae_cache)
2491 free_affine_expand_cache (&memory_accesses.ttae_cache);
2493 free (bb_loop_postorder);
2496 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2497 i.e. those that are likely to be win regardless of the register pressure. */
2499 unsigned int
2500 tree_ssa_lim (void)
2502 unsigned int todo;
2504 tree_ssa_lim_initialize ();
2506 /* Gathers information about memory accesses in the loops. */
2507 analyze_memory_references ();
2509 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2510 fill_always_executed_in ();
2512 /* For each statement determine the outermost loop in that it is
2513 invariant and cost for computing the invariant. */
2514 invariantness_dom_walker (CDI_DOMINATORS)
2515 .walk (cfun->cfg->x_entry_block_ptr);
2517 /* Execute store motion. Force the necessary invariants to be moved
2518 out of the loops as well. */
2519 store_motion ();
2521 /* Move the expressions that are expensive enough. */
2522 todo = move_computations ();
2524 tree_ssa_lim_finalize ();
2526 return todo;
2529 /* Loop invariant motion pass. */
2531 namespace {
2533 const pass_data pass_data_lim =
2535 GIMPLE_PASS, /* type */
2536 "lim", /* name */
2537 OPTGROUP_LOOP, /* optinfo_flags */
2538 TV_LIM, /* tv_id */
2539 PROP_cfg, /* properties_required */
2540 0, /* properties_provided */
2541 0, /* properties_destroyed */
2542 0, /* todo_flags_start */
2543 0, /* todo_flags_finish */
2546 class pass_lim : public gimple_opt_pass
2548 public:
2549 pass_lim (gcc::context *ctxt)
2550 : gimple_opt_pass (pass_data_lim, ctxt)
2553 /* opt_pass methods: */
2554 opt_pass * clone () { return new pass_lim (m_ctxt); }
2555 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2556 virtual unsigned int execute (function *);
2558 }; // class pass_lim
2560 unsigned int
2561 pass_lim::execute (function *fun)
2563 if (number_of_loops (fun) <= 1)
2564 return 0;
2566 return tree_ssa_lim ();
2569 } // anon namespace
2571 gimple_opt_pass *
2572 make_pass_lim (gcc::context *ctxt)
2574 return new pass_lim (ctxt);