* lto-streamer-out.c (lto_output_location): Stream
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob5ae5c4739e261426667caf8262f80bf1f1450644
1 /* Loop invariant motion.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "input.h"
25 #include "alias.h"
26 #include "symtab.h"
27 #include "tree.h"
28 #include "fold-const.h"
29 #include "tm_p.h"
30 #include "predict.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "cfganal.h"
37 #include "basic-block.h"
38 #include "gimple-pretty-print.h"
39 #include "tree-ssa-alias.h"
40 #include "internal-fn.h"
41 #include "tree-eh.h"
42 #include "gimple-expr.h"
43 #include "is-a.h"
44 #include "gimple.h"
45 #include "gimplify.h"
46 #include "gimple-iterator.h"
47 #include "gimple-ssa.h"
48 #include "tree-cfg.h"
49 #include "tree-phinodes.h"
50 #include "ssa-iterators.h"
51 #include "stringpool.h"
52 #include "tree-ssanames.h"
53 #include "tree-ssa-loop-manip.h"
54 #include "tree-ssa-loop.h"
55 #include "tree-into-ssa.h"
56 #include "cfgloop.h"
57 #include "domwalk.h"
58 #include "params.h"
59 #include "tree-pass.h"
60 #include "flags.h"
61 #include "tree-affine.h"
62 #include "tree-ssa-propagate.h"
63 #include "trans-mem.h"
64 #include "gimple-fold.h"
66 /* TODO: Support for predicated code motion. I.e.
68 while (1)
70 if (cond)
72 a = inv;
73 something;
77 Where COND and INV are invariants, but evaluating INV may trap or be
78 invalid from some other reason if !COND. This may be transformed to
80 if (cond)
81 a = inv;
82 while (1)
84 if (cond)
85 something;
86 } */
88 /* The auxiliary data kept for each statement. */
90 struct lim_aux_data
92 struct loop *max_loop; /* The outermost loop in that the statement
93 is invariant. */
95 struct loop *tgt_loop; /* The loop out of that we want to move the
96 invariant. */
98 struct loop *always_executed_in;
99 /* The outermost loop for that we are sure
100 the statement is executed if the loop
101 is entered. */
103 unsigned cost; /* Cost of the computation performed by the
104 statement. */
106 vec<gimple> depends; /* Vector of statements that must be also
107 hoisted out of the loop when this statement
108 is hoisted; i.e. those that define the
109 operands of the statement and are inside of
110 the MAX_LOOP loop. */
113 /* Maps statements to their lim_aux_data. */
115 static hash_map<gimple, lim_aux_data *> *lim_aux_data_map;
117 /* Description of a memory reference location. */
119 typedef struct mem_ref_loc
121 tree *ref; /* The reference itself. */
122 gimple stmt; /* The statement in that it occurs. */
123 } *mem_ref_loc_p;
126 /* Description of a memory reference. */
128 typedef struct im_mem_ref
130 unsigned id; /* ID assigned to the memory reference
131 (its index in memory_accesses.refs_list) */
132 hashval_t hash; /* Its hash value. */
134 /* The memory access itself and associated caching of alias-oracle
135 query meta-data. */
136 ao_ref mem;
138 bitmap stored; /* The set of loops in that this memory location
139 is stored to. */
140 vec<mem_ref_loc> accesses_in_loop;
141 /* The locations of the accesses. Vector
142 indexed by the loop number. */
144 /* The following sets are computed on demand. We keep both set and
145 its complement, so that we know whether the information was
146 already computed or not. */
147 bitmap_head indep_loop; /* The set of loops in that the memory
148 reference is independent, meaning:
149 If it is stored in the loop, this store
150 is independent on all other loads and
151 stores.
152 If it is only loaded, then it is independent
153 on all stores in the loop. */
154 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
155 } *mem_ref_p;
157 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
158 to record (in)dependence against stores in the loop and its subloops, the
159 second to record (in)dependence against all references in the loop
160 and its subloops. */
161 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
163 /* Mem_ref hashtable helpers. */
165 struct mem_ref_hasher : typed_noop_remove <im_mem_ref>
167 typedef im_mem_ref *value_type;
168 typedef tree_node *compare_type;
169 static inline hashval_t hash (const im_mem_ref *);
170 static inline bool equal (const im_mem_ref *, const tree_node *);
173 /* A hash function for struct im_mem_ref object OBJ. */
175 inline hashval_t
176 mem_ref_hasher::hash (const im_mem_ref *mem)
178 return mem->hash;
181 /* An equality function for struct im_mem_ref object MEM1 with
182 memory reference OBJ2. */
184 inline bool
185 mem_ref_hasher::equal (const im_mem_ref *mem1, const tree_node *obj2)
187 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
191 /* Description of memory accesses in loops. */
193 static struct
195 /* The hash table of memory references accessed in loops. */
196 hash_table<mem_ref_hasher> *refs;
198 /* The list of memory references. */
199 vec<mem_ref_p> refs_list;
201 /* The set of memory references accessed in each loop. */
202 vec<bitmap_head> refs_in_loop;
204 /* The set of memory references stored in each loop. */
205 vec<bitmap_head> refs_stored_in_loop;
207 /* The set of memory references stored in each loop, including subloops . */
208 vec<bitmap_head> all_refs_stored_in_loop;
210 /* Cache for expanding memory addresses. */
211 hash_map<tree, name_expansion *> *ttae_cache;
212 } memory_accesses;
214 /* Obstack for the bitmaps in the above data structures. */
215 static bitmap_obstack lim_bitmap_obstack;
216 static obstack mem_ref_obstack;
218 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
220 /* Minimum cost of an expensive expression. */
221 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
223 /* The outermost loop for which execution of the header guarantees that the
224 block will be executed. */
225 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
226 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
228 /* ID of the shared unanalyzable mem. */
229 #define UNANALYZABLE_MEM_ID 0
231 /* Whether the reference was analyzable. */
232 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
234 static struct lim_aux_data *
235 init_lim_data (gimple stmt)
237 lim_aux_data *p = XCNEW (struct lim_aux_data);
238 lim_aux_data_map->put (stmt, p);
240 return p;
243 static struct lim_aux_data *
244 get_lim_data (gimple stmt)
246 lim_aux_data **p = lim_aux_data_map->get (stmt);
247 if (!p)
248 return NULL;
250 return *p;
253 /* Releases the memory occupied by DATA. */
255 static void
256 free_lim_aux_data (struct lim_aux_data *data)
258 data->depends.release ();
259 free (data);
262 static void
263 clear_lim_data (gimple stmt)
265 lim_aux_data **p = lim_aux_data_map->get (stmt);
266 if (!p)
267 return;
269 free_lim_aux_data (*p);
270 *p = NULL;
274 /* The possibilities of statement movement. */
275 enum move_pos
277 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
278 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
279 become executed -- memory accesses, ... */
280 MOVE_POSSIBLE /* Unlimited movement. */
284 /* If it is possible to hoist the statement STMT unconditionally,
285 returns MOVE_POSSIBLE.
286 If it is possible to hoist the statement STMT, but we must avoid making
287 it executed if it would not be executed in the original program (e.g.
288 because it may trap), return MOVE_PRESERVE_EXECUTION.
289 Otherwise return MOVE_IMPOSSIBLE. */
291 enum move_pos
292 movement_possibility (gimple stmt)
294 tree lhs;
295 enum move_pos ret = MOVE_POSSIBLE;
297 if (flag_unswitch_loops
298 && gimple_code (stmt) == GIMPLE_COND)
300 /* If we perform unswitching, force the operands of the invariant
301 condition to be moved out of the loop. */
302 return MOVE_POSSIBLE;
305 if (gimple_code (stmt) == GIMPLE_PHI
306 && gimple_phi_num_args (stmt) <= 2
307 && !virtual_operand_p (gimple_phi_result (stmt))
308 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
309 return MOVE_POSSIBLE;
311 if (gimple_get_lhs (stmt) == NULL_TREE)
312 return MOVE_IMPOSSIBLE;
314 if (gimple_vdef (stmt))
315 return MOVE_IMPOSSIBLE;
317 if (stmt_ends_bb_p (stmt)
318 || gimple_has_volatile_ops (stmt)
319 || gimple_has_side_effects (stmt)
320 || stmt_could_throw_p (stmt))
321 return MOVE_IMPOSSIBLE;
323 if (is_gimple_call (stmt))
325 /* While pure or const call is guaranteed to have no side effects, we
326 cannot move it arbitrarily. Consider code like
328 char *s = something ();
330 while (1)
332 if (s)
333 t = strlen (s);
334 else
335 t = 0;
338 Here the strlen call cannot be moved out of the loop, even though
339 s is invariant. In addition to possibly creating a call with
340 invalid arguments, moving out a function call that is not executed
341 may cause performance regressions in case the call is costly and
342 not executed at all. */
343 ret = MOVE_PRESERVE_EXECUTION;
344 lhs = gimple_call_lhs (stmt);
346 else if (is_gimple_assign (stmt))
347 lhs = gimple_assign_lhs (stmt);
348 else
349 return MOVE_IMPOSSIBLE;
351 if (TREE_CODE (lhs) == SSA_NAME
352 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
353 return MOVE_IMPOSSIBLE;
355 if (TREE_CODE (lhs) != SSA_NAME
356 || gimple_could_trap_p (stmt))
357 return MOVE_PRESERVE_EXECUTION;
359 /* Non local loads in a transaction cannot be hoisted out. Well,
360 unless the load happens on every path out of the loop, but we
361 don't take this into account yet. */
362 if (flag_tm
363 && gimple_in_transaction (stmt)
364 && gimple_assign_single_p (stmt))
366 tree rhs = gimple_assign_rhs1 (stmt);
367 if (DECL_P (rhs) && is_global_var (rhs))
369 if (dump_file)
371 fprintf (dump_file, "Cannot hoist conditional load of ");
372 print_generic_expr (dump_file, rhs, TDF_SLIM);
373 fprintf (dump_file, " because it is in a transaction.\n");
375 return MOVE_IMPOSSIBLE;
379 return ret;
382 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
383 loop to that we could move the expression using DEF if it did not have
384 other operands, i.e. the outermost loop enclosing LOOP in that the value
385 of DEF is invariant. */
387 static struct loop *
388 outermost_invariant_loop (tree def, struct loop *loop)
390 gimple def_stmt;
391 basic_block def_bb;
392 struct loop *max_loop;
393 struct lim_aux_data *lim_data;
395 if (!def)
396 return superloop_at_depth (loop, 1);
398 if (TREE_CODE (def) != SSA_NAME)
400 gcc_assert (is_gimple_min_invariant (def));
401 return superloop_at_depth (loop, 1);
404 def_stmt = SSA_NAME_DEF_STMT (def);
405 def_bb = gimple_bb (def_stmt);
406 if (!def_bb)
407 return superloop_at_depth (loop, 1);
409 max_loop = find_common_loop (loop, def_bb->loop_father);
411 lim_data = get_lim_data (def_stmt);
412 if (lim_data != NULL && lim_data->max_loop != NULL)
413 max_loop = find_common_loop (max_loop,
414 loop_outer (lim_data->max_loop));
415 if (max_loop == loop)
416 return NULL;
417 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
419 return max_loop;
422 /* DATA is a structure containing information associated with a statement
423 inside LOOP. DEF is one of the operands of this statement.
425 Find the outermost loop enclosing LOOP in that value of DEF is invariant
426 and record this in DATA->max_loop field. If DEF itself is defined inside
427 this loop as well (i.e. we need to hoist it out of the loop if we want
428 to hoist the statement represented by DATA), record the statement in that
429 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
430 add the cost of the computation of DEF to the DATA->cost.
432 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
434 static bool
435 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
436 bool add_cost)
438 gimple def_stmt = SSA_NAME_DEF_STMT (def);
439 basic_block def_bb = gimple_bb (def_stmt);
440 struct loop *max_loop;
441 struct lim_aux_data *def_data;
443 if (!def_bb)
444 return true;
446 max_loop = outermost_invariant_loop (def, loop);
447 if (!max_loop)
448 return false;
450 if (flow_loop_nested_p (data->max_loop, max_loop))
451 data->max_loop = max_loop;
453 def_data = get_lim_data (def_stmt);
454 if (!def_data)
455 return true;
457 if (add_cost
458 /* Only add the cost if the statement defining DEF is inside LOOP,
459 i.e. if it is likely that by moving the invariants dependent
460 on it, we will be able to avoid creating a new register for
461 it (since it will be only used in these dependent invariants). */
462 && def_bb->loop_father == loop)
463 data->cost += def_data->cost;
465 data->depends.safe_push (def_stmt);
467 return true;
470 /* Returns an estimate for a cost of statement STMT. The values here
471 are just ad-hoc constants, similar to costs for inlining. */
473 static unsigned
474 stmt_cost (gimple stmt)
476 /* Always try to create possibilities for unswitching. */
477 if (gimple_code (stmt) == GIMPLE_COND
478 || gimple_code (stmt) == GIMPLE_PHI)
479 return LIM_EXPENSIVE;
481 /* We should be hoisting calls if possible. */
482 if (is_gimple_call (stmt))
484 tree fndecl;
486 /* Unless the call is a builtin_constant_p; this always folds to a
487 constant, so moving it is useless. */
488 fndecl = gimple_call_fndecl (stmt);
489 if (fndecl
490 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
491 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
492 return 0;
494 return LIM_EXPENSIVE;
497 /* Hoisting memory references out should almost surely be a win. */
498 if (gimple_references_memory_p (stmt))
499 return LIM_EXPENSIVE;
501 if (gimple_code (stmt) != GIMPLE_ASSIGN)
502 return 1;
504 switch (gimple_assign_rhs_code (stmt))
506 case MULT_EXPR:
507 case WIDEN_MULT_EXPR:
508 case WIDEN_MULT_PLUS_EXPR:
509 case WIDEN_MULT_MINUS_EXPR:
510 case DOT_PROD_EXPR:
511 case FMA_EXPR:
512 case TRUNC_DIV_EXPR:
513 case CEIL_DIV_EXPR:
514 case FLOOR_DIV_EXPR:
515 case ROUND_DIV_EXPR:
516 case EXACT_DIV_EXPR:
517 case CEIL_MOD_EXPR:
518 case FLOOR_MOD_EXPR:
519 case ROUND_MOD_EXPR:
520 case TRUNC_MOD_EXPR:
521 case RDIV_EXPR:
522 /* Division and multiplication are usually expensive. */
523 return LIM_EXPENSIVE;
525 case LSHIFT_EXPR:
526 case RSHIFT_EXPR:
527 case WIDEN_LSHIFT_EXPR:
528 case LROTATE_EXPR:
529 case RROTATE_EXPR:
530 /* Shifts and rotates are usually expensive. */
531 return LIM_EXPENSIVE;
533 case CONSTRUCTOR:
534 /* Make vector construction cost proportional to the number
535 of elements. */
536 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
538 case SSA_NAME:
539 case PAREN_EXPR:
540 /* Whether or not something is wrapped inside a PAREN_EXPR
541 should not change move cost. Nor should an intermediate
542 unpropagated SSA name copy. */
543 return 0;
545 default:
546 return 1;
550 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
551 REF is independent. If REF is not independent in LOOP, NULL is returned
552 instead. */
554 static struct loop *
555 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
557 struct loop *aloop;
559 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
560 return NULL;
562 for (aloop = outer;
563 aloop != loop;
564 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
565 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
566 && ref_indep_loop_p (aloop, ref))
567 return aloop;
569 if (ref_indep_loop_p (loop, ref))
570 return loop;
571 else
572 return NULL;
575 /* If there is a simple load or store to a memory reference in STMT, returns
576 the location of the memory reference, and sets IS_STORE according to whether
577 it is a store or load. Otherwise, returns NULL. */
579 static tree *
580 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
582 tree *lhs, *rhs;
584 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
585 if (!gimple_assign_single_p (stmt))
586 return NULL;
588 lhs = gimple_assign_lhs_ptr (stmt);
589 rhs = gimple_assign_rhs1_ptr (stmt);
591 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
593 *is_store = false;
594 return rhs;
596 else if (gimple_vdef (stmt)
597 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
599 *is_store = true;
600 return lhs;
602 else
603 return NULL;
606 /* Returns the memory reference contained in STMT. */
608 static mem_ref_p
609 mem_ref_in_stmt (gimple stmt)
611 bool store;
612 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
613 hashval_t hash;
614 mem_ref_p ref;
616 if (!mem)
617 return NULL;
618 gcc_assert (!store);
620 hash = iterative_hash_expr (*mem, 0);
621 ref = memory_accesses.refs->find_with_hash (*mem, hash);
623 gcc_assert (ref != NULL);
624 return ref;
627 /* From a controlling predicate in DOM determine the arguments from
628 the PHI node PHI that are chosen if the predicate evaluates to
629 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
630 they are non-NULL. Returns true if the arguments can be determined,
631 else return false. */
633 static bool
634 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
635 tree *true_arg_p, tree *false_arg_p)
637 basic_block bb = gimple_bb (phi);
638 edge true_edge, false_edge, tem;
639 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
641 /* We have to verify that one edge into the PHI node is dominated
642 by the true edge of the predicate block and the other edge
643 dominated by the false edge. This ensures that the PHI argument
644 we are going to take is completely determined by the path we
645 take from the predicate block.
646 We can only use BB dominance checks below if the destination of
647 the true/false edges are dominated by their edge, thus only
648 have a single predecessor. */
649 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
650 tem = EDGE_PRED (bb, 0);
651 if (tem == true_edge
652 || (single_pred_p (true_edge->dest)
653 && (tem->src == true_edge->dest
654 || dominated_by_p (CDI_DOMINATORS,
655 tem->src, true_edge->dest))))
656 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
657 else if (tem == false_edge
658 || (single_pred_p (false_edge->dest)
659 && (tem->src == false_edge->dest
660 || dominated_by_p (CDI_DOMINATORS,
661 tem->src, false_edge->dest))))
662 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
663 else
664 return false;
665 tem = EDGE_PRED (bb, 1);
666 if (tem == true_edge
667 || (single_pred_p (true_edge->dest)
668 && (tem->src == true_edge->dest
669 || dominated_by_p (CDI_DOMINATORS,
670 tem->src, true_edge->dest))))
671 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
672 else if (tem == false_edge
673 || (single_pred_p (false_edge->dest)
674 && (tem->src == false_edge->dest
675 || dominated_by_p (CDI_DOMINATORS,
676 tem->src, false_edge->dest))))
677 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
678 else
679 return false;
680 if (!arg0 || !arg1)
681 return false;
683 if (true_arg_p)
684 *true_arg_p = arg0;
685 if (false_arg_p)
686 *false_arg_p = arg1;
688 return true;
691 /* Determine the outermost loop to that it is possible to hoist a statement
692 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
693 the outermost loop in that the value computed by STMT is invariant.
694 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
695 we preserve the fact whether STMT is executed. It also fills other related
696 information to LIM_DATA (STMT).
698 The function returns false if STMT cannot be hoisted outside of the loop it
699 is defined in, and true otherwise. */
701 static bool
702 determine_max_movement (gimple stmt, bool must_preserve_exec)
704 basic_block bb = gimple_bb (stmt);
705 struct loop *loop = bb->loop_father;
706 struct loop *level;
707 struct lim_aux_data *lim_data = get_lim_data (stmt);
708 tree val;
709 ssa_op_iter iter;
711 if (must_preserve_exec)
712 level = ALWAYS_EXECUTED_IN (bb);
713 else
714 level = superloop_at_depth (loop, 1);
715 lim_data->max_loop = level;
717 if (gphi *phi = dyn_cast <gphi *> (stmt))
719 use_operand_p use_p;
720 unsigned min_cost = UINT_MAX;
721 unsigned total_cost = 0;
722 struct lim_aux_data *def_data;
724 /* We will end up promoting dependencies to be unconditionally
725 evaluated. For this reason the PHI cost (and thus the
726 cost we remove from the loop by doing the invariant motion)
727 is that of the cheapest PHI argument dependency chain. */
728 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
730 val = USE_FROM_PTR (use_p);
732 if (TREE_CODE (val) != SSA_NAME)
734 /* Assign const 1 to constants. */
735 min_cost = MIN (min_cost, 1);
736 total_cost += 1;
737 continue;
739 if (!add_dependency (val, lim_data, loop, false))
740 return false;
742 gimple def_stmt = SSA_NAME_DEF_STMT (val);
743 if (gimple_bb (def_stmt)
744 && gimple_bb (def_stmt)->loop_father == loop)
746 def_data = get_lim_data (def_stmt);
747 if (def_data)
749 min_cost = MIN (min_cost, def_data->cost);
750 total_cost += def_data->cost;
755 min_cost = MIN (min_cost, total_cost);
756 lim_data->cost += min_cost;
758 if (gimple_phi_num_args (phi) > 1)
760 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
761 gimple cond;
762 if (gsi_end_p (gsi_last_bb (dom)))
763 return false;
764 cond = gsi_stmt (gsi_last_bb (dom));
765 if (gimple_code (cond) != GIMPLE_COND)
766 return false;
767 /* Verify that this is an extended form of a diamond and
768 the PHI arguments are completely controlled by the
769 predicate in DOM. */
770 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
771 return false;
773 /* Fold in dependencies and cost of the condition. */
774 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
776 if (!add_dependency (val, lim_data, loop, false))
777 return false;
778 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
779 if (def_data)
780 total_cost += def_data->cost;
783 /* We want to avoid unconditionally executing very expensive
784 operations. As costs for our dependencies cannot be
785 negative just claim we are not invariand for this case.
786 We also are not sure whether the control-flow inside the
787 loop will vanish. */
788 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
789 && !(min_cost != 0
790 && total_cost / min_cost <= 2))
791 return false;
793 /* Assume that the control-flow in the loop will vanish.
794 ??? We should verify this and not artificially increase
795 the cost if that is not the case. */
796 lim_data->cost += stmt_cost (stmt);
799 return true;
801 else
802 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
803 if (!add_dependency (val, lim_data, loop, true))
804 return false;
806 if (gimple_vuse (stmt))
808 mem_ref_p ref = mem_ref_in_stmt (stmt);
810 if (ref)
812 lim_data->max_loop
813 = outermost_indep_loop (lim_data->max_loop, loop, ref);
814 if (!lim_data->max_loop)
815 return false;
817 else
819 if ((val = gimple_vuse (stmt)) != NULL_TREE)
821 if (!add_dependency (val, lim_data, loop, false))
822 return false;
827 lim_data->cost += stmt_cost (stmt);
829 return true;
832 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
833 and that one of the operands of this statement is computed by STMT.
834 Ensure that STMT (together with all the statements that define its
835 operands) is hoisted at least out of the loop LEVEL. */
837 static void
838 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
840 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
841 struct lim_aux_data *lim_data;
842 gimple dep_stmt;
843 unsigned i;
845 stmt_loop = find_common_loop (orig_loop, stmt_loop);
846 lim_data = get_lim_data (stmt);
847 if (lim_data != NULL && lim_data->tgt_loop != NULL)
848 stmt_loop = find_common_loop (stmt_loop,
849 loop_outer (lim_data->tgt_loop));
850 if (flow_loop_nested_p (stmt_loop, level))
851 return;
853 gcc_assert (level == lim_data->max_loop
854 || flow_loop_nested_p (lim_data->max_loop, level));
856 lim_data->tgt_loop = level;
857 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
858 set_level (dep_stmt, orig_loop, level);
861 /* Determines an outermost loop from that we want to hoist the statement STMT.
862 For now we chose the outermost possible loop. TODO -- use profiling
863 information to set it more sanely. */
865 static void
866 set_profitable_level (gimple stmt)
868 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
871 /* Returns true if STMT is a call that has side effects. */
873 static bool
874 nonpure_call_p (gimple stmt)
876 if (gimple_code (stmt) != GIMPLE_CALL)
877 return false;
879 return gimple_has_side_effects (stmt);
882 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
884 static gimple
885 rewrite_reciprocal (gimple_stmt_iterator *bsi)
887 gassign *stmt, *stmt1, *stmt2;
888 tree name, lhs, type;
889 tree real_one;
890 gimple_stmt_iterator gsi;
892 stmt = as_a <gassign *> (gsi_stmt (*bsi));
893 lhs = gimple_assign_lhs (stmt);
894 type = TREE_TYPE (lhs);
896 real_one = build_one_cst (type);
898 name = make_temp_ssa_name (type, NULL, "reciptmp");
899 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
900 gimple_assign_rhs2 (stmt));
901 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
902 gimple_assign_rhs1 (stmt));
904 /* Replace division stmt with reciprocal and multiply stmts.
905 The multiply stmt is not invariant, so update iterator
906 and avoid rescanning. */
907 gsi = *bsi;
908 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
909 gsi_replace (&gsi, stmt2, true);
911 /* Continue processing with invariant reciprocal statement. */
912 return stmt1;
915 /* Check if the pattern at *BSI is a bittest of the form
916 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
918 static gimple
919 rewrite_bittest (gimple_stmt_iterator *bsi)
921 gassign *stmt;
922 gimple stmt1;
923 gassign *stmt2;
924 gimple use_stmt;
925 gcond *cond_stmt;
926 tree lhs, name, t, a, b;
927 use_operand_p use;
929 stmt = as_a <gassign *> (gsi_stmt (*bsi));
930 lhs = gimple_assign_lhs (stmt);
932 /* Verify that the single use of lhs is a comparison against zero. */
933 if (TREE_CODE (lhs) != SSA_NAME
934 || !single_imm_use (lhs, &use, &use_stmt))
935 return stmt;
936 cond_stmt = dyn_cast <gcond *> (use_stmt);
937 if (!cond_stmt)
938 return stmt;
939 if (gimple_cond_lhs (cond_stmt) != lhs
940 || (gimple_cond_code (cond_stmt) != NE_EXPR
941 && gimple_cond_code (cond_stmt) != EQ_EXPR)
942 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
943 return stmt;
945 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
946 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
947 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
948 return stmt;
950 /* There is a conversion in between possibly inserted by fold. */
951 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
953 t = gimple_assign_rhs1 (stmt1);
954 if (TREE_CODE (t) != SSA_NAME
955 || !has_single_use (t))
956 return stmt;
957 stmt1 = SSA_NAME_DEF_STMT (t);
958 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
959 return stmt;
962 /* Verify that B is loop invariant but A is not. Verify that with
963 all the stmt walking we are still in the same loop. */
964 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
965 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
966 return stmt;
968 a = gimple_assign_rhs1 (stmt1);
969 b = gimple_assign_rhs2 (stmt1);
971 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
972 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
974 gimple_stmt_iterator rsi;
976 /* 1 << B */
977 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
978 build_int_cst (TREE_TYPE (a), 1), b);
979 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
980 stmt1 = gimple_build_assign (name, t);
982 /* A & (1 << B) */
983 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
984 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
985 stmt2 = gimple_build_assign (name, t);
987 /* Replace the SSA_NAME we compare against zero. Adjust
988 the type of zero accordingly. */
989 SET_USE (use, name);
990 gimple_cond_set_rhs (cond_stmt,
991 build_int_cst_type (TREE_TYPE (name),
992 0));
994 /* Don't use gsi_replace here, none of the new assignments sets
995 the variable originally set in stmt. Move bsi to stmt1, and
996 then remove the original stmt, so that we get a chance to
997 retain debug info for it. */
998 rsi = *bsi;
999 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1000 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1001 gsi_remove (&rsi, true);
1003 return stmt1;
1006 return stmt;
1009 /* For each statement determines the outermost loop in that it is invariant,
1010 - statements on whose motion it depends and the cost of the computation.
1011 - This information is stored to the LIM_DATA structure associated with
1012 - each statement. */
1013 class invariantness_dom_walker : public dom_walker
1015 public:
1016 invariantness_dom_walker (cdi_direction direction)
1017 : dom_walker (direction) {}
1019 virtual void before_dom_children (basic_block);
1022 /* Determine the outermost loops in that statements in basic block BB are
1023 invariant, and record them to the LIM_DATA associated with the statements.
1024 Callback for dom_walker. */
1026 void
1027 invariantness_dom_walker::before_dom_children (basic_block bb)
1029 enum move_pos pos;
1030 gimple_stmt_iterator bsi;
1031 gimple stmt;
1032 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1033 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1034 struct lim_aux_data *lim_data;
1036 if (!loop_outer (bb->loop_father))
1037 return;
1039 if (dump_file && (dump_flags & TDF_DETAILS))
1040 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1041 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1043 /* Look at PHI nodes, but only if there is at most two.
1044 ??? We could relax this further by post-processing the inserted
1045 code and transforming adjacent cond-exprs with the same predicate
1046 to control flow again. */
1047 bsi = gsi_start_phis (bb);
1048 if (!gsi_end_p (bsi)
1049 && ((gsi_next (&bsi), gsi_end_p (bsi))
1050 || (gsi_next (&bsi), gsi_end_p (bsi))))
1051 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1053 stmt = gsi_stmt (bsi);
1055 pos = movement_possibility (stmt);
1056 if (pos == MOVE_IMPOSSIBLE)
1057 continue;
1059 lim_data = init_lim_data (stmt);
1060 lim_data->always_executed_in = outermost;
1062 if (!determine_max_movement (stmt, false))
1064 lim_data->max_loop = NULL;
1065 continue;
1068 if (dump_file && (dump_flags & TDF_DETAILS))
1070 print_gimple_stmt (dump_file, stmt, 2, 0);
1071 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1072 loop_depth (lim_data->max_loop),
1073 lim_data->cost);
1076 if (lim_data->cost >= LIM_EXPENSIVE)
1077 set_profitable_level (stmt);
1080 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1082 stmt = gsi_stmt (bsi);
1084 pos = movement_possibility (stmt);
1085 if (pos == MOVE_IMPOSSIBLE)
1087 if (nonpure_call_p (stmt))
1089 maybe_never = true;
1090 outermost = NULL;
1092 /* Make sure to note always_executed_in for stores to make
1093 store-motion work. */
1094 else if (stmt_makes_single_store (stmt))
1096 struct lim_aux_data *lim_data = init_lim_data (stmt);
1097 lim_data->always_executed_in = outermost;
1099 continue;
1102 if (is_gimple_assign (stmt)
1103 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1104 == GIMPLE_BINARY_RHS))
1106 tree op0 = gimple_assign_rhs1 (stmt);
1107 tree op1 = gimple_assign_rhs2 (stmt);
1108 struct loop *ol1 = outermost_invariant_loop (op1,
1109 loop_containing_stmt (stmt));
1111 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1112 to be hoisted out of loop, saving expensive divide. */
1113 if (pos == MOVE_POSSIBLE
1114 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1115 && flag_unsafe_math_optimizations
1116 && !flag_trapping_math
1117 && ol1 != NULL
1118 && outermost_invariant_loop (op0, ol1) == NULL)
1119 stmt = rewrite_reciprocal (&bsi);
1121 /* If the shift count is invariant, convert (A >> B) & 1 to
1122 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1123 saving an expensive shift. */
1124 if (pos == MOVE_POSSIBLE
1125 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1126 && integer_onep (op1)
1127 && TREE_CODE (op0) == SSA_NAME
1128 && has_single_use (op0))
1129 stmt = rewrite_bittest (&bsi);
1132 lim_data = init_lim_data (stmt);
1133 lim_data->always_executed_in = outermost;
1135 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1136 continue;
1138 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1140 lim_data->max_loop = NULL;
1141 continue;
1144 if (dump_file && (dump_flags & TDF_DETAILS))
1146 print_gimple_stmt (dump_file, stmt, 2, 0);
1147 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1148 loop_depth (lim_data->max_loop),
1149 lim_data->cost);
1152 if (lim_data->cost >= LIM_EXPENSIVE)
1153 set_profitable_level (stmt);
1157 class move_computations_dom_walker : public dom_walker
1159 public:
1160 move_computations_dom_walker (cdi_direction direction)
1161 : dom_walker (direction), todo_ (0) {}
1163 virtual void before_dom_children (basic_block);
1165 unsigned int todo_;
1168 /* Hoist the statements in basic block BB out of the loops prescribed by
1169 data stored in LIM_DATA structures associated with each statement. Callback
1170 for walk_dominator_tree. */
1172 void
1173 move_computations_dom_walker::before_dom_children (basic_block bb)
1175 struct loop *level;
1176 unsigned cost = 0;
1177 struct lim_aux_data *lim_data;
1179 if (!loop_outer (bb->loop_father))
1180 return;
1182 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1184 gassign *new_stmt;
1185 gphi *stmt = bsi.phi ();
1187 lim_data = get_lim_data (stmt);
1188 if (lim_data == NULL)
1190 gsi_next (&bsi);
1191 continue;
1194 cost = lim_data->cost;
1195 level = lim_data->tgt_loop;
1196 clear_lim_data (stmt);
1198 if (!level)
1200 gsi_next (&bsi);
1201 continue;
1204 if (dump_file && (dump_flags & TDF_DETAILS))
1206 fprintf (dump_file, "Moving PHI node\n");
1207 print_gimple_stmt (dump_file, stmt, 0, 0);
1208 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1209 cost, level->num);
1212 if (gimple_phi_num_args (stmt) == 1)
1214 tree arg = PHI_ARG_DEF (stmt, 0);
1215 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1216 TREE_CODE (arg), arg);
1218 else
1220 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1221 gimple cond = gsi_stmt (gsi_last_bb (dom));
1222 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1223 /* Get the PHI arguments corresponding to the true and false
1224 edges of COND. */
1225 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1226 gcc_assert (arg0 && arg1);
1227 t = build2 (gimple_cond_code (cond), boolean_type_node,
1228 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1229 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1230 COND_EXPR, t, arg0, arg1);
1231 todo_ |= TODO_cleanup_cfg;
1233 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1234 && (!ALWAYS_EXECUTED_IN (bb)
1235 || (ALWAYS_EXECUTED_IN (bb) != level
1236 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1238 tree lhs = gimple_assign_lhs (new_stmt);
1239 SSA_NAME_RANGE_INFO (lhs) = NULL;
1240 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
1242 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1243 remove_phi_node (&bsi, false);
1246 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1248 edge e;
1250 gimple stmt = gsi_stmt (bsi);
1252 lim_data = get_lim_data (stmt);
1253 if (lim_data == NULL)
1255 gsi_next (&bsi);
1256 continue;
1259 cost = lim_data->cost;
1260 level = lim_data->tgt_loop;
1261 clear_lim_data (stmt);
1263 if (!level)
1265 gsi_next (&bsi);
1266 continue;
1269 /* We do not really want to move conditionals out of the loop; we just
1270 placed it here to force its operands to be moved if necessary. */
1271 if (gimple_code (stmt) == GIMPLE_COND)
1272 continue;
1274 if (dump_file && (dump_flags & TDF_DETAILS))
1276 fprintf (dump_file, "Moving statement\n");
1277 print_gimple_stmt (dump_file, stmt, 0, 0);
1278 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1279 cost, level->num);
1282 e = loop_preheader_edge (level);
1283 gcc_assert (!gimple_vdef (stmt));
1284 if (gimple_vuse (stmt))
1286 /* The new VUSE is the one from the virtual PHI in the loop
1287 header or the one already present. */
1288 gphi_iterator gsi2;
1289 for (gsi2 = gsi_start_phis (e->dest);
1290 !gsi_end_p (gsi2); gsi_next (&gsi2))
1292 gphi *phi = gsi2.phi ();
1293 if (virtual_operand_p (gimple_phi_result (phi)))
1295 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1296 break;
1300 gsi_remove (&bsi, false);
1301 if (gimple_has_lhs (stmt)
1302 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1303 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1304 && (!ALWAYS_EXECUTED_IN (bb)
1305 || !(ALWAYS_EXECUTED_IN (bb) == level
1306 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1308 tree lhs = gimple_get_lhs (stmt);
1309 SSA_NAME_RANGE_INFO (lhs) = NULL;
1310 SSA_NAME_ANTI_RANGE_P (lhs) = 0;
1312 /* In case this is a stmt that is not unconditionally executed
1313 when the target loop header is executed and the stmt may
1314 invoke undefined integer or pointer overflow rewrite it to
1315 unsigned arithmetic. */
1316 if (is_gimple_assign (stmt)
1317 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1318 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1319 && arith_code_with_undefined_signed_overflow
1320 (gimple_assign_rhs_code (stmt))
1321 && (!ALWAYS_EXECUTED_IN (bb)
1322 || !(ALWAYS_EXECUTED_IN (bb) == level
1323 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1324 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1325 else
1326 gsi_insert_on_edge (e, stmt);
1330 /* Hoist the statements out of the loops prescribed by data stored in
1331 LIM_DATA structures associated with each statement.*/
1333 static unsigned int
1334 move_computations (void)
1336 move_computations_dom_walker walker (CDI_DOMINATORS);
1337 walker.walk (cfun->cfg->x_entry_block_ptr);
1339 gsi_commit_edge_inserts ();
1340 if (need_ssa_update_p (cfun))
1341 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1343 return walker.todo_;
1346 /* Checks whether the statement defining variable *INDEX can be hoisted
1347 out of the loop passed in DATA. Callback for for_each_index. */
1349 static bool
1350 may_move_till (tree ref, tree *index, void *data)
1352 struct loop *loop = (struct loop *) data, *max_loop;
1354 /* If REF is an array reference, check also that the step and the lower
1355 bound is invariant in LOOP. */
1356 if (TREE_CODE (ref) == ARRAY_REF)
1358 tree step = TREE_OPERAND (ref, 3);
1359 tree lbound = TREE_OPERAND (ref, 2);
1361 max_loop = outermost_invariant_loop (step, loop);
1362 if (!max_loop)
1363 return false;
1365 max_loop = outermost_invariant_loop (lbound, loop);
1366 if (!max_loop)
1367 return false;
1370 max_loop = outermost_invariant_loop (*index, loop);
1371 if (!max_loop)
1372 return false;
1374 return true;
1377 /* If OP is SSA NAME, force the statement that defines it to be
1378 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1380 static void
1381 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1383 gimple stmt;
1385 if (!op
1386 || is_gimple_min_invariant (op))
1387 return;
1389 gcc_assert (TREE_CODE (op) == SSA_NAME);
1391 stmt = SSA_NAME_DEF_STMT (op);
1392 if (gimple_nop_p (stmt))
1393 return;
1395 set_level (stmt, orig_loop, loop);
1398 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1399 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1400 for_each_index. */
1402 struct fmt_data
1404 struct loop *loop;
1405 struct loop *orig_loop;
1408 static bool
1409 force_move_till (tree ref, tree *index, void *data)
1411 struct fmt_data *fmt_data = (struct fmt_data *) data;
1413 if (TREE_CODE (ref) == ARRAY_REF)
1415 tree step = TREE_OPERAND (ref, 3);
1416 tree lbound = TREE_OPERAND (ref, 2);
1418 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1419 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1422 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1424 return true;
1427 /* A function to free the mem_ref object OBJ. */
1429 static void
1430 memref_free (struct im_mem_ref *mem)
1432 mem->accesses_in_loop.release ();
1435 /* Allocates and returns a memory reference description for MEM whose hash
1436 value is HASH and id is ID. */
1438 static mem_ref_p
1439 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1441 mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1442 ao_ref_init (&ref->mem, mem);
1443 ref->id = id;
1444 ref->hash = hash;
1445 ref->stored = NULL;
1446 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1447 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1448 ref->accesses_in_loop.create (1);
1450 return ref;
1453 /* Records memory reference location *LOC in LOOP to the memory reference
1454 description REF. The reference occurs in statement STMT. */
1456 static void
1457 record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc)
1459 mem_ref_loc aref;
1460 aref.stmt = stmt;
1461 aref.ref = loc;
1462 ref->accesses_in_loop.safe_push (aref);
1465 /* Set the LOOP bit in REF stored bitmap and allocate that if
1466 necessary. Return whether a bit was changed. */
1468 static bool
1469 set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop)
1471 if (!ref->stored)
1472 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1473 return bitmap_set_bit (ref->stored, loop->num);
1476 /* Marks reference REF as stored in LOOP. */
1478 static void
1479 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1481 while (loop != current_loops->tree_root
1482 && set_ref_stored_in_loop (ref, loop))
1483 loop = loop_outer (loop);
1486 /* Gathers memory references in statement STMT in LOOP, storing the
1487 information about them in the memory_accesses structure. Marks
1488 the vops accessed through unrecognized statements there as
1489 well. */
1491 static void
1492 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1494 tree *mem = NULL;
1495 hashval_t hash;
1496 im_mem_ref **slot;
1497 mem_ref_p ref;
1498 bool is_stored;
1499 unsigned id;
1501 if (!gimple_vuse (stmt))
1502 return;
1504 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1505 if (!mem)
1507 /* We use the shared mem_ref for all unanalyzable refs. */
1508 id = UNANALYZABLE_MEM_ID;
1509 ref = memory_accesses.refs_list[id];
1510 if (dump_file && (dump_flags & TDF_DETAILS))
1512 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1513 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1515 is_stored = gimple_vdef (stmt);
1517 else
1519 hash = iterative_hash_expr (*mem, 0);
1520 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1521 if (*slot)
1523 ref = (mem_ref_p) *slot;
1524 id = ref->id;
1526 else
1528 id = memory_accesses.refs_list.length ();
1529 ref = mem_ref_alloc (*mem, hash, id);
1530 memory_accesses.refs_list.safe_push (ref);
1531 *slot = ref;
1533 if (dump_file && (dump_flags & TDF_DETAILS))
1535 fprintf (dump_file, "Memory reference %u: ", id);
1536 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1537 fprintf (dump_file, "\n");
1541 record_mem_ref_loc (ref, stmt, mem);
1543 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1544 if (is_stored)
1546 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1547 mark_ref_stored (ref, loop);
1549 return;
1552 static unsigned *bb_loop_postorder;
1554 /* qsort sort function to sort blocks after their loop fathers postorder. */
1556 static int
1557 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1559 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1560 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1561 struct loop *loop1 = bb1->loop_father;
1562 struct loop *loop2 = bb2->loop_father;
1563 if (loop1->num == loop2->num)
1564 return 0;
1565 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1568 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1570 static int
1571 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1573 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1574 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1575 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1576 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1577 if (loop1->num == loop2->num)
1578 return 0;
1579 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1582 /* Gathers memory references in loops. */
1584 static void
1585 analyze_memory_references (void)
1587 gimple_stmt_iterator bsi;
1588 basic_block bb, *bbs;
1589 struct loop *loop, *outer;
1590 unsigned i, n;
1592 /* Collect all basic-blocks in loops and sort them after their
1593 loops postorder. */
1594 i = 0;
1595 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1596 FOR_EACH_BB_FN (bb, cfun)
1597 if (bb->loop_father != current_loops->tree_root)
1598 bbs[i++] = bb;
1599 n = i;
1600 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1602 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1603 That results in better locality for all the bitmaps. */
1604 for (i = 0; i < n; ++i)
1606 basic_block bb = bbs[i];
1607 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1608 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1611 /* Sort the location list of gathered memory references after their
1612 loop postorder number. */
1613 im_mem_ref *ref;
1614 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1615 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1617 free (bbs);
1618 // free (bb_loop_postorder);
1620 /* Propagate the information about accessed memory references up
1621 the loop hierarchy. */
1622 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1624 /* Finalize the overall touched references (including subloops). */
1625 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1626 &memory_accesses.refs_stored_in_loop[loop->num]);
1628 /* Propagate the information about accessed memory references up
1629 the loop hierarchy. */
1630 outer = loop_outer (loop);
1631 if (outer == current_loops->tree_root)
1632 continue;
1634 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1635 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1639 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1640 tree_to_aff_combination_expand. */
1642 static bool
1643 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1644 hash_map<tree, name_expansion *> **ttae_cache)
1646 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1647 object and their offset differ in such a way that the locations cannot
1648 overlap, then they cannot alias. */
1649 widest_int size1, size2;
1650 aff_tree off1, off2;
1652 /* Perform basic offset and type-based disambiguation. */
1653 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1654 return false;
1656 /* The expansion of addresses may be a bit expensive, thus we only do
1657 the check at -O2 and higher optimization levels. */
1658 if (optimize < 2)
1659 return true;
1661 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1662 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1663 aff_combination_expand (&off1, ttae_cache);
1664 aff_combination_expand (&off2, ttae_cache);
1665 aff_combination_scale (&off1, -1);
1666 aff_combination_add (&off2, &off1);
1668 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1669 return false;
1671 return true;
1674 /* Compare function for bsearch searching for reference locations
1675 in a loop. */
1677 static int
1678 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1680 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1681 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1682 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1683 if (loop->num == loc_loop->num
1684 || flow_loop_nested_p (loop, loc_loop))
1685 return 0;
1686 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1687 ? -1 : 1);
1690 /* Iterates over all locations of REF in LOOP and its subloops calling
1691 fn.operator() with the location as argument. When that operator
1692 returns true the iteration is stopped and true is returned.
1693 Otherwise false is returned. */
1695 template <typename FN>
1696 static bool
1697 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1699 unsigned i;
1700 mem_ref_loc_p loc;
1702 /* Search for the cluster of locs in the accesses_in_loop vector
1703 which is sorted after postorder index of the loop father. */
1704 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1705 if (!loc)
1706 return false;
1708 /* We have found one location inside loop or its sub-loops. Iterate
1709 both forward and backward to cover the whole cluster. */
1710 i = loc - ref->accesses_in_loop.address ();
1711 while (i > 0)
1713 --i;
1714 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1715 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1716 break;
1717 if (fn (l))
1718 return true;
1720 for (i = loc - ref->accesses_in_loop.address ();
1721 i < ref->accesses_in_loop.length (); ++i)
1723 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1724 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1725 break;
1726 if (fn (l))
1727 return true;
1730 return false;
1733 /* Rewrites location LOC by TMP_VAR. */
1735 struct rewrite_mem_ref_loc
1737 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1738 bool operator () (mem_ref_loc_p loc);
1739 tree tmp_var;
1742 bool
1743 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1745 *loc->ref = tmp_var;
1746 update_stmt (loc->stmt);
1747 return false;
1750 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1752 static void
1753 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1755 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1758 /* Stores the first reference location in LOCP. */
1760 struct first_mem_ref_loc_1
1762 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1763 bool operator () (mem_ref_loc_p loc);
1764 mem_ref_loc_p *locp;
1767 bool
1768 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1770 *locp = loc;
1771 return true;
1774 /* Returns the first reference location to REF in LOOP. */
1776 static mem_ref_loc_p
1777 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1779 mem_ref_loc_p locp = NULL;
1780 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1781 return locp;
1784 struct prev_flag_edges {
1785 /* Edge to insert new flag comparison code. */
1786 edge append_cond_position;
1788 /* Edge for fall through from previous flag comparison. */
1789 edge last_cond_fallthru;
1792 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1793 MEM along edge EX.
1795 The store is only done if MEM has changed. We do this so no
1796 changes to MEM occur on code paths that did not originally store
1797 into it.
1799 The common case for execute_sm will transform:
1801 for (...) {
1802 if (foo)
1803 stuff;
1804 else
1805 MEM = TMP_VAR;
1808 into:
1810 lsm = MEM;
1811 for (...) {
1812 if (foo)
1813 stuff;
1814 else
1815 lsm = TMP_VAR;
1817 MEM = lsm;
1819 This function will generate:
1821 lsm = MEM;
1823 lsm_flag = false;
1825 for (...) {
1826 if (foo)
1827 stuff;
1828 else {
1829 lsm = TMP_VAR;
1830 lsm_flag = true;
1833 if (lsm_flag) <--
1834 MEM = lsm; <--
1837 static void
1838 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1840 basic_block new_bb, then_bb, old_dest;
1841 bool loop_has_only_one_exit;
1842 edge then_old_edge, orig_ex = ex;
1843 gimple_stmt_iterator gsi;
1844 gimple stmt;
1845 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1846 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1848 /* ?? Insert store after previous store if applicable. See note
1849 below. */
1850 if (prev_edges)
1851 ex = prev_edges->append_cond_position;
1853 loop_has_only_one_exit = single_pred_p (ex->dest);
1855 if (loop_has_only_one_exit)
1856 ex = split_block_after_labels (ex->dest);
1858 old_dest = ex->dest;
1859 new_bb = split_edge (ex);
1860 then_bb = create_empty_bb (new_bb);
1861 if (irr)
1862 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1863 add_bb_to_loop (then_bb, new_bb->loop_father);
1865 gsi = gsi_start_bb (new_bb);
1866 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1867 NULL_TREE, NULL_TREE);
1868 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1870 gsi = gsi_start_bb (then_bb);
1871 /* Insert actual store. */
1872 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1873 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1875 make_edge (new_bb, then_bb,
1876 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1877 make_edge (new_bb, old_dest,
1878 EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1879 then_old_edge = make_edge (then_bb, old_dest,
1880 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1882 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1884 if (prev_edges)
1886 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1887 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1888 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1889 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1890 recompute_dominator (CDI_DOMINATORS, old_dest));
1893 /* ?? Because stores may alias, they must happen in the exact
1894 sequence they originally happened. Save the position right after
1895 the (_lsm) store we just created so we can continue appending after
1896 it and maintain the original order. */
1898 struct prev_flag_edges *p;
1900 if (orig_ex->aux)
1901 orig_ex->aux = NULL;
1902 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1903 p = (struct prev_flag_edges *) orig_ex->aux;
1904 p->append_cond_position = then_old_edge;
1905 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1906 orig_ex->aux = (void *) p;
1909 if (!loop_has_only_one_exit)
1910 for (gphi_iterator gpi = gsi_start_phis (old_dest);
1911 !gsi_end_p (gpi); gsi_next (&gpi))
1913 gphi *phi = gpi.phi ();
1914 unsigned i;
1916 for (i = 0; i < gimple_phi_num_args (phi); i++)
1917 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1919 tree arg = gimple_phi_arg_def (phi, i);
1920 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1921 update_stmt (phi);
1924 /* Remove the original fall through edge. This was the
1925 single_succ_edge (new_bb). */
1926 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1929 /* When REF is set on the location, set flag indicating the store. */
1931 struct sm_set_flag_if_changed
1933 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1934 bool operator () (mem_ref_loc_p loc);
1935 tree flag;
1938 bool
1939 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1941 /* Only set the flag for writes. */
1942 if (is_gimple_assign (loc->stmt)
1943 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1945 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1946 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1949 return false;
1952 /* Helper function for execute_sm. On every location where REF is
1953 set, set an appropriate flag indicating the store. */
1955 static tree
1956 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1958 tree flag;
1959 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1960 flag = create_tmp_reg (boolean_type_node, str);
1961 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1962 return flag;
1965 /* Executes store motion of memory reference REF from LOOP.
1966 Exits from the LOOP are stored in EXITS. The initialization of the
1967 temporary variable is put to the preheader of the loop, and assignments
1968 to the reference from the temporary variable are emitted to exits. */
1970 static void
1971 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1973 tree tmp_var, store_flag = NULL_TREE;
1974 unsigned i;
1975 gassign *load;
1976 struct fmt_data fmt_data;
1977 edge ex;
1978 struct lim_aux_data *lim_data;
1979 bool multi_threaded_model_p = false;
1980 gimple_stmt_iterator gsi;
1982 if (dump_file && (dump_flags & TDF_DETAILS))
1984 fprintf (dump_file, "Executing store motion of ");
1985 print_generic_expr (dump_file, ref->mem.ref, 0);
1986 fprintf (dump_file, " from loop %d\n", loop->num);
1989 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1990 get_lsm_tmp_name (ref->mem.ref, ~0));
1992 fmt_data.loop = loop;
1993 fmt_data.orig_loop = loop;
1994 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1996 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1997 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1998 multi_threaded_model_p = true;
2000 if (multi_threaded_model_p)
2001 store_flag = execute_sm_if_changed_flag_set (loop, ref);
2003 rewrite_mem_refs (loop, ref, tmp_var);
2005 /* Emit the load code on a random exit edge or into the latch if
2006 the loop does not exit, so that we are sure it will be processed
2007 by move_computations after all dependencies. */
2008 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2010 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2011 load altogether, since the store is predicated by a flag. We
2012 could, do the load only if it was originally in the loop. */
2013 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2014 lim_data = init_lim_data (load);
2015 lim_data->max_loop = loop;
2016 lim_data->tgt_loop = loop;
2017 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2019 if (multi_threaded_model_p)
2021 load = gimple_build_assign (store_flag, boolean_false_node);
2022 lim_data = init_lim_data (load);
2023 lim_data->max_loop = loop;
2024 lim_data->tgt_loop = loop;
2025 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2028 /* Sink the store to every exit from the loop. */
2029 FOR_EACH_VEC_ELT (exits, i, ex)
2030 if (!multi_threaded_model_p)
2032 gassign *store;
2033 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2034 gsi_insert_on_edge (ex, store);
2036 else
2037 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
2040 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2041 edges of the LOOP. */
2043 static void
2044 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2045 vec<edge> exits)
2047 mem_ref_p ref;
2048 unsigned i;
2049 bitmap_iterator bi;
2051 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2053 ref = memory_accesses.refs_list[i];
2054 execute_sm (loop, exits, ref);
2058 struct ref_always_accessed
2060 ref_always_accessed (struct loop *loop_, bool stored_p_)
2061 : loop (loop_), stored_p (stored_p_) {}
2062 bool operator () (mem_ref_loc_p loc);
2063 struct loop *loop;
2064 bool stored_p;
2067 bool
2068 ref_always_accessed::operator () (mem_ref_loc_p loc)
2070 struct loop *must_exec;
2072 if (!get_lim_data (loc->stmt))
2073 return false;
2075 /* If we require an always executed store make sure the statement
2076 stores to the reference. */
2077 if (stored_p)
2079 tree lhs = gimple_get_lhs (loc->stmt);
2080 if (!lhs
2081 || lhs != *loc->ref)
2082 return false;
2085 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2086 if (!must_exec)
2087 return false;
2089 if (must_exec == loop
2090 || flow_loop_nested_p (must_exec, loop))
2091 return true;
2093 return false;
2096 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2097 make sure REF is always stored to in LOOP. */
2099 static bool
2100 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2102 return for_all_locs_in_loop (loop, ref,
2103 ref_always_accessed (loop, stored_p));
2106 /* Returns true if REF1 and REF2 are independent. */
2108 static bool
2109 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2111 if (ref1 == ref2)
2112 return true;
2114 if (dump_file && (dump_flags & TDF_DETAILS))
2115 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2116 ref1->id, ref2->id);
2118 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2120 if (dump_file && (dump_flags & TDF_DETAILS))
2121 fprintf (dump_file, "dependent.\n");
2122 return false;
2124 else
2126 if (dump_file && (dump_flags & TDF_DETAILS))
2127 fprintf (dump_file, "independent.\n");
2128 return true;
2132 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2133 and its super-loops. */
2135 static void
2136 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2138 /* We can propagate dependent-in-loop bits up the loop
2139 hierarchy to all outer loops. */
2140 while (loop != current_loops->tree_root
2141 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2142 loop = loop_outer (loop);
2145 /* Returns true if REF is independent on all other memory references in
2146 LOOP. */
2148 static bool
2149 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2151 bitmap refs_to_check;
2152 unsigned i;
2153 bitmap_iterator bi;
2154 mem_ref_p aref;
2156 if (stored_p)
2157 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2158 else
2159 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2161 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2162 return false;
2164 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2166 aref = memory_accesses.refs_list[i];
2167 if (!refs_independent_p (ref, aref))
2168 return false;
2171 return true;
2174 /* Returns true if REF is independent on all other memory references in
2175 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2177 static bool
2178 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2180 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2182 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2183 return true;
2184 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2185 return false;
2187 struct loop *inner = loop->inner;
2188 while (inner)
2190 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2191 return false;
2192 inner = inner->next;
2195 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2197 if (dump_file && (dump_flags & TDF_DETAILS))
2198 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2199 ref->id, loop->num, indep_p ? "independent" : "dependent");
2201 /* Record the computed result in the cache. */
2202 if (indep_p)
2204 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2205 && stored_p)
2207 /* If it's independend against all refs then it's independent
2208 against stores, too. */
2209 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2212 else
2214 record_dep_loop (loop, ref, stored_p);
2215 if (!stored_p)
2217 /* If it's dependent against stores it's dependent against
2218 all refs, too. */
2219 record_dep_loop (loop, ref, true);
2223 return indep_p;
2226 /* Returns true if REF is independent on all other memory references in
2227 LOOP. */
2229 static bool
2230 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2232 gcc_checking_assert (MEM_ANALYZABLE (ref));
2234 return ref_indep_loop_p_2 (loop, ref, false);
2237 /* Returns true if we can perform store motion of REF from LOOP. */
2239 static bool
2240 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2242 tree base;
2244 /* Can't hoist unanalyzable refs. */
2245 if (!MEM_ANALYZABLE (ref))
2246 return false;
2248 /* It should be movable. */
2249 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2250 || TREE_THIS_VOLATILE (ref->mem.ref)
2251 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2252 return false;
2254 /* If it can throw fail, we do not properly update EH info. */
2255 if (tree_could_throw_p (ref->mem.ref))
2256 return false;
2258 /* If it can trap, it must be always executed in LOOP.
2259 Readonly memory locations may trap when storing to them, but
2260 tree_could_trap_p is a predicate for rvalues, so check that
2261 explicitly. */
2262 base = get_base_address (ref->mem.ref);
2263 if ((tree_could_trap_p (ref->mem.ref)
2264 || (DECL_P (base) && TREE_READONLY (base)))
2265 && !ref_always_accessed_p (loop, ref, true))
2266 return false;
2268 /* And it must be independent on all other memory references
2269 in LOOP. */
2270 if (!ref_indep_loop_p (loop, ref))
2271 return false;
2273 return true;
2276 /* Marks the references in LOOP for that store motion should be performed
2277 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2278 motion was performed in one of the outer loops. */
2280 static void
2281 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2283 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2284 unsigned i;
2285 bitmap_iterator bi;
2286 mem_ref_p ref;
2288 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2290 ref = memory_accesses.refs_list[i];
2291 if (can_sm_ref_p (loop, ref))
2292 bitmap_set_bit (refs_to_sm, i);
2296 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2297 for a store motion optimization (i.e. whether we can insert statement
2298 on its exits). */
2300 static bool
2301 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2302 vec<edge> exits)
2304 unsigned i;
2305 edge ex;
2307 FOR_EACH_VEC_ELT (exits, i, ex)
2308 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2309 return false;
2311 return true;
2314 /* Try to perform store motion for all memory references modified inside
2315 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2316 store motion was executed in one of the outer loops. */
2318 static void
2319 store_motion_loop (struct loop *loop, bitmap sm_executed)
2321 vec<edge> exits = get_loop_exit_edges (loop);
2322 struct loop *subloop;
2323 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2325 if (loop_suitable_for_sm (loop, exits))
2327 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2328 hoist_memory_references (loop, sm_in_loop, exits);
2330 exits.release ();
2332 bitmap_ior_into (sm_executed, sm_in_loop);
2333 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2334 store_motion_loop (subloop, sm_executed);
2335 bitmap_and_compl_into (sm_executed, sm_in_loop);
2336 BITMAP_FREE (sm_in_loop);
2339 /* Try to perform store motion for all memory references modified inside
2340 loops. */
2342 static void
2343 store_motion (void)
2345 struct loop *loop;
2346 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2348 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2349 store_motion_loop (loop, sm_executed);
2351 BITMAP_FREE (sm_executed);
2352 gsi_commit_edge_inserts ();
2355 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2356 for each such basic block bb records the outermost loop for that execution
2357 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2358 blocks that contain a nonpure call. */
2360 static void
2361 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2363 basic_block bb = NULL, *bbs, last = NULL;
2364 unsigned i;
2365 edge e;
2366 struct loop *inn_loop = loop;
2368 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2370 bbs = get_loop_body_in_dom_order (loop);
2372 for (i = 0; i < loop->num_nodes; i++)
2374 edge_iterator ei;
2375 bb = bbs[i];
2377 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2378 last = bb;
2380 if (bitmap_bit_p (contains_call, bb->index))
2381 break;
2383 FOR_EACH_EDGE (e, ei, bb->succs)
2384 if (!flow_bb_inside_loop_p (loop, e->dest))
2385 break;
2386 if (e)
2387 break;
2389 /* A loop might be infinite (TODO use simple loop analysis
2390 to disprove this if possible). */
2391 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2392 break;
2394 if (!flow_bb_inside_loop_p (inn_loop, bb))
2395 break;
2397 if (bb->loop_father->header == bb)
2399 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2400 break;
2402 /* In a loop that is always entered we may proceed anyway.
2403 But record that we entered it and stop once we leave it. */
2404 inn_loop = bb->loop_father;
2408 while (1)
2410 SET_ALWAYS_EXECUTED_IN (last, loop);
2411 if (last == loop->header)
2412 break;
2413 last = get_immediate_dominator (CDI_DOMINATORS, last);
2416 free (bbs);
2419 for (loop = loop->inner; loop; loop = loop->next)
2420 fill_always_executed_in_1 (loop, contains_call);
2423 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2424 for each such basic block bb records the outermost loop for that execution
2425 of its header implies execution of bb. */
2427 static void
2428 fill_always_executed_in (void)
2430 sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
2431 basic_block bb;
2432 struct loop *loop;
2434 bitmap_clear (contains_call);
2435 FOR_EACH_BB_FN (bb, cfun)
2437 gimple_stmt_iterator gsi;
2438 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2440 if (nonpure_call_p (gsi_stmt (gsi)))
2441 break;
2444 if (!gsi_end_p (gsi))
2445 bitmap_set_bit (contains_call, bb->index);
2448 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2449 fill_always_executed_in_1 (loop, contains_call);
2451 sbitmap_free (contains_call);
2455 /* Compute the global information needed by the loop invariant motion pass. */
2457 static void
2458 tree_ssa_lim_initialize (void)
2460 struct loop *loop;
2461 unsigned i;
2463 bitmap_obstack_initialize (&lim_bitmap_obstack);
2464 gcc_obstack_init (&mem_ref_obstack);
2465 lim_aux_data_map = new hash_map<gimple, lim_aux_data *>;
2467 if (flag_tm)
2468 compute_transaction_bits ();
2470 alloc_aux_for_edges (0);
2472 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2473 memory_accesses.refs_list.create (100);
2474 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2475 memory_accesses.refs_list.quick_push
2476 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2478 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2479 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2480 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2481 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2482 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2483 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2485 for (i = 0; i < number_of_loops (cfun); i++)
2487 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2488 &lim_bitmap_obstack);
2489 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2490 &lim_bitmap_obstack);
2491 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2492 &lim_bitmap_obstack);
2495 memory_accesses.ttae_cache = NULL;
2497 /* Initialize bb_loop_postorder with a mapping from loop->num to
2498 its postorder index. */
2499 i = 0;
2500 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2501 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2502 bb_loop_postorder[loop->num] = i++;
2505 /* Cleans up after the invariant motion pass. */
2507 static void
2508 tree_ssa_lim_finalize (void)
2510 basic_block bb;
2511 unsigned i;
2512 mem_ref_p ref;
2514 free_aux_for_edges ();
2516 FOR_EACH_BB_FN (bb, cfun)
2517 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2519 bitmap_obstack_release (&lim_bitmap_obstack);
2520 delete lim_aux_data_map;
2522 delete memory_accesses.refs;
2523 memory_accesses.refs = NULL;
2525 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2526 memref_free (ref);
2527 memory_accesses.refs_list.release ();
2528 obstack_free (&mem_ref_obstack, NULL);
2530 memory_accesses.refs_in_loop.release ();
2531 memory_accesses.refs_stored_in_loop.release ();
2532 memory_accesses.all_refs_stored_in_loop.release ();
2534 if (memory_accesses.ttae_cache)
2535 free_affine_expand_cache (&memory_accesses.ttae_cache);
2537 free (bb_loop_postorder);
2540 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2541 i.e. those that are likely to be win regardless of the register pressure. */
2543 unsigned int
2544 tree_ssa_lim (void)
2546 unsigned int todo;
2548 tree_ssa_lim_initialize ();
2550 /* Gathers information about memory accesses in the loops. */
2551 analyze_memory_references ();
2553 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2554 fill_always_executed_in ();
2556 /* For each statement determine the outermost loop in that it is
2557 invariant and cost for computing the invariant. */
2558 invariantness_dom_walker (CDI_DOMINATORS)
2559 .walk (cfun->cfg->x_entry_block_ptr);
2561 /* Execute store motion. Force the necessary invariants to be moved
2562 out of the loops as well. */
2563 store_motion ();
2565 /* Move the expressions that are expensive enough. */
2566 todo = move_computations ();
2568 tree_ssa_lim_finalize ();
2570 return todo;
2573 /* Loop invariant motion pass. */
2575 namespace {
2577 const pass_data pass_data_lim =
2579 GIMPLE_PASS, /* type */
2580 "lim", /* name */
2581 OPTGROUP_LOOP, /* optinfo_flags */
2582 TV_LIM, /* tv_id */
2583 PROP_cfg, /* properties_required */
2584 0, /* properties_provided */
2585 0, /* properties_destroyed */
2586 0, /* todo_flags_start */
2587 0, /* todo_flags_finish */
2590 class pass_lim : public gimple_opt_pass
2592 public:
2593 pass_lim (gcc::context *ctxt)
2594 : gimple_opt_pass (pass_data_lim, ctxt)
2597 /* opt_pass methods: */
2598 opt_pass * clone () { return new pass_lim (m_ctxt); }
2599 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2600 virtual unsigned int execute (function *);
2602 }; // class pass_lim
2604 unsigned int
2605 pass_lim::execute (function *fun)
2607 if (number_of_loops (fun) <= 1)
2608 return 0;
2610 return tree_ssa_lim ();
2613 } // anon namespace
2615 gimple_opt_pass *
2616 make_pass_lim (gcc::context *ctxt)
2618 return new pass_lim (ctxt);