* gimple-walk.h: New File. Relocate prototypes from gimple.h.
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob48697c7fa246adfd489357d12fb32123fe07f4df
1 /* Loop invariant motion.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "gimplify.h"
29 #include "gimple-iterator.h"
30 #include "gimple-ssa.h"
31 #include "tree-cfg.h"
32 #include "tree-phinodes.h"
33 #include "ssa-iterators.h"
34 #include "tree-ssanames.h"
35 #include "tree-ssa-loop-manip.h"
36 #include "tree-ssa-loop.h"
37 #include "tree-into-ssa.h"
38 #include "cfgloop.h"
39 #include "domwalk.h"
40 #include "params.h"
41 #include "tree-pass.h"
42 #include "flags.h"
43 #include "hash-table.h"
44 #include "tree-affine.h"
45 #include "pointer-set.h"
46 #include "tree-ssa-propagate.h"
48 /* TODO: Support for predicated code motion. I.e.
50 while (1)
52 if (cond)
54 a = inv;
55 something;
59 Where COND and INV are invariants, but evaluating INV may trap or be
60 invalid from some other reason if !COND. This may be transformed to
62 if (cond)
63 a = inv;
64 while (1)
66 if (cond)
67 something;
68 } */
70 /* The auxiliary data kept for each statement. */
72 struct lim_aux_data
74 struct loop *max_loop; /* The outermost loop in that the statement
75 is invariant. */
77 struct loop *tgt_loop; /* The loop out of that we want to move the
78 invariant. */
80 struct loop *always_executed_in;
81 /* The outermost loop for that we are sure
82 the statement is executed if the loop
83 is entered. */
85 unsigned cost; /* Cost of the computation performed by the
86 statement. */
88 vec<gimple> depends; /* Vector of statements that must be also
89 hoisted out of the loop when this statement
90 is hoisted; i.e. those that define the
91 operands of the statement and are inside of
92 the MAX_LOOP loop. */
95 /* Maps statements to their lim_aux_data. */
97 static struct pointer_map_t *lim_aux_data_map;
99 /* Description of a memory reference location. */
101 typedef struct mem_ref_loc
103 tree *ref; /* The reference itself. */
104 gimple stmt; /* The statement in that it occurs. */
105 } *mem_ref_loc_p;
108 /* Description of a memory reference. */
110 typedef struct mem_ref
112 unsigned id; /* ID assigned to the memory reference
113 (its index in memory_accesses.refs_list) */
114 hashval_t hash; /* Its hash value. */
116 /* The memory access itself and associated caching of alias-oracle
117 query meta-data. */
118 ao_ref mem;
120 bitmap_head stored; /* The set of loops in that this memory location
121 is stored to. */
122 vec<vec<mem_ref_loc> > accesses_in_loop;
123 /* The locations of the accesses. Vector
124 indexed by the loop number. */
126 /* The following sets are computed on demand. We keep both set and
127 its complement, so that we know whether the information was
128 already computed or not. */
129 bitmap_head indep_loop; /* The set of loops in that the memory
130 reference is independent, meaning:
131 If it is stored in the loop, this store
132 is independent on all other loads and
133 stores.
134 If it is only loaded, then it is independent
135 on all stores in the loop. */
136 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
137 } *mem_ref_p;
139 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
140 to record (in)dependence against stores in the loop and its subloops, the
141 second to record (in)dependence against all references in the loop
142 and its subloops. */
143 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
145 /* Mem_ref hashtable helpers. */
147 struct mem_ref_hasher : typed_noop_remove <mem_ref>
149 typedef mem_ref value_type;
150 typedef tree_node compare_type;
151 static inline hashval_t hash (const value_type *);
152 static inline bool equal (const value_type *, const compare_type *);
155 /* A hash function for struct mem_ref object OBJ. */
157 inline hashval_t
158 mem_ref_hasher::hash (const value_type *mem)
160 return mem->hash;
163 /* An equality function for struct mem_ref object MEM1 with
164 memory reference OBJ2. */
166 inline bool
167 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
169 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
173 /* Description of memory accesses in loops. */
175 static struct
177 /* The hash table of memory references accessed in loops. */
178 hash_table <mem_ref_hasher> refs;
180 /* The list of memory references. */
181 vec<mem_ref_p> refs_list;
183 /* The set of memory references accessed in each loop. */
184 vec<bitmap_head> refs_in_loop;
186 /* The set of memory references stored in each loop. */
187 vec<bitmap_head> refs_stored_in_loop;
189 /* The set of memory references stored in each loop, including subloops . */
190 vec<bitmap_head> all_refs_stored_in_loop;
192 /* Cache for expanding memory addresses. */
193 struct pointer_map_t *ttae_cache;
194 } memory_accesses;
196 /* Obstack for the bitmaps in the above data structures. */
197 static bitmap_obstack lim_bitmap_obstack;
199 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
201 /* Minimum cost of an expensive expression. */
202 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
204 /* The outermost loop for which execution of the header guarantees that the
205 block will be executed. */
206 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
207 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
209 /* ID of the shared unanalyzable mem. */
210 #define UNANALYZABLE_MEM_ID 0
212 /* Whether the reference was analyzable. */
213 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
215 static struct lim_aux_data *
216 init_lim_data (gimple stmt)
218 void **p = pointer_map_insert (lim_aux_data_map, stmt);
220 *p = XCNEW (struct lim_aux_data);
221 return (struct lim_aux_data *) *p;
224 static struct lim_aux_data *
225 get_lim_data (gimple stmt)
227 void **p = pointer_map_contains (lim_aux_data_map, stmt);
228 if (!p)
229 return NULL;
231 return (struct lim_aux_data *) *p;
234 /* Releases the memory occupied by DATA. */
236 static void
237 free_lim_aux_data (struct lim_aux_data *data)
239 data->depends.release ();
240 free (data);
243 static void
244 clear_lim_data (gimple stmt)
246 void **p = pointer_map_contains (lim_aux_data_map, stmt);
247 if (!p)
248 return;
250 free_lim_aux_data ((struct lim_aux_data *) *p);
251 *p = NULL;
255 /* The possibilities of statement movement. */
256 enum move_pos
258 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
259 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
260 become executed -- memory accesses, ... */
261 MOVE_POSSIBLE /* Unlimited movement. */
265 /* If it is possible to hoist the statement STMT unconditionally,
266 returns MOVE_POSSIBLE.
267 If it is possible to hoist the statement STMT, but we must avoid making
268 it executed if it would not be executed in the original program (e.g.
269 because it may trap), return MOVE_PRESERVE_EXECUTION.
270 Otherwise return MOVE_IMPOSSIBLE. */
272 enum move_pos
273 movement_possibility (gimple stmt)
275 tree lhs;
276 enum move_pos ret = MOVE_POSSIBLE;
278 if (flag_unswitch_loops
279 && gimple_code (stmt) == GIMPLE_COND)
281 /* If we perform unswitching, force the operands of the invariant
282 condition to be moved out of the loop. */
283 return MOVE_POSSIBLE;
286 if (gimple_code (stmt) == GIMPLE_PHI
287 && gimple_phi_num_args (stmt) <= 2
288 && !virtual_operand_p (gimple_phi_result (stmt))
289 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
290 return MOVE_POSSIBLE;
292 if (gimple_get_lhs (stmt) == NULL_TREE)
293 return MOVE_IMPOSSIBLE;
295 if (gimple_vdef (stmt))
296 return MOVE_IMPOSSIBLE;
298 if (stmt_ends_bb_p (stmt)
299 || gimple_has_volatile_ops (stmt)
300 || gimple_has_side_effects (stmt)
301 || stmt_could_throw_p (stmt))
302 return MOVE_IMPOSSIBLE;
304 if (is_gimple_call (stmt))
306 /* While pure or const call is guaranteed to have no side effects, we
307 cannot move it arbitrarily. Consider code like
309 char *s = something ();
311 while (1)
313 if (s)
314 t = strlen (s);
315 else
316 t = 0;
319 Here the strlen call cannot be moved out of the loop, even though
320 s is invariant. In addition to possibly creating a call with
321 invalid arguments, moving out a function call that is not executed
322 may cause performance regressions in case the call is costly and
323 not executed at all. */
324 ret = MOVE_PRESERVE_EXECUTION;
325 lhs = gimple_call_lhs (stmt);
327 else if (is_gimple_assign (stmt))
328 lhs = gimple_assign_lhs (stmt);
329 else
330 return MOVE_IMPOSSIBLE;
332 if (TREE_CODE (lhs) == SSA_NAME
333 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
334 return MOVE_IMPOSSIBLE;
336 if (TREE_CODE (lhs) != SSA_NAME
337 || gimple_could_trap_p (stmt))
338 return MOVE_PRESERVE_EXECUTION;
340 /* Non local loads in a transaction cannot be hoisted out. Well,
341 unless the load happens on every path out of the loop, but we
342 don't take this into account yet. */
343 if (flag_tm
344 && gimple_in_transaction (stmt)
345 && gimple_assign_single_p (stmt))
347 tree rhs = gimple_assign_rhs1 (stmt);
348 if (DECL_P (rhs) && is_global_var (rhs))
350 if (dump_file)
352 fprintf (dump_file, "Cannot hoist conditional load of ");
353 print_generic_expr (dump_file, rhs, TDF_SLIM);
354 fprintf (dump_file, " because it is in a transaction.\n");
356 return MOVE_IMPOSSIBLE;
360 return ret;
363 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
364 loop to that we could move the expression using DEF if it did not have
365 other operands, i.e. the outermost loop enclosing LOOP in that the value
366 of DEF is invariant. */
368 static struct loop *
369 outermost_invariant_loop (tree def, struct loop *loop)
371 gimple def_stmt;
372 basic_block def_bb;
373 struct loop *max_loop;
374 struct lim_aux_data *lim_data;
376 if (!def)
377 return superloop_at_depth (loop, 1);
379 if (TREE_CODE (def) != SSA_NAME)
381 gcc_assert (is_gimple_min_invariant (def));
382 return superloop_at_depth (loop, 1);
385 def_stmt = SSA_NAME_DEF_STMT (def);
386 def_bb = gimple_bb (def_stmt);
387 if (!def_bb)
388 return superloop_at_depth (loop, 1);
390 max_loop = find_common_loop (loop, def_bb->loop_father);
392 lim_data = get_lim_data (def_stmt);
393 if (lim_data != NULL && lim_data->max_loop != NULL)
394 max_loop = find_common_loop (max_loop,
395 loop_outer (lim_data->max_loop));
396 if (max_loop == loop)
397 return NULL;
398 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
400 return max_loop;
403 /* DATA is a structure containing information associated with a statement
404 inside LOOP. DEF is one of the operands of this statement.
406 Find the outermost loop enclosing LOOP in that value of DEF is invariant
407 and record this in DATA->max_loop field. If DEF itself is defined inside
408 this loop as well (i.e. we need to hoist it out of the loop if we want
409 to hoist the statement represented by DATA), record the statement in that
410 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
411 add the cost of the computation of DEF to the DATA->cost.
413 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
415 static bool
416 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
417 bool add_cost)
419 gimple def_stmt = SSA_NAME_DEF_STMT (def);
420 basic_block def_bb = gimple_bb (def_stmt);
421 struct loop *max_loop;
422 struct lim_aux_data *def_data;
424 if (!def_bb)
425 return true;
427 max_loop = outermost_invariant_loop (def, loop);
428 if (!max_loop)
429 return false;
431 if (flow_loop_nested_p (data->max_loop, max_loop))
432 data->max_loop = max_loop;
434 def_data = get_lim_data (def_stmt);
435 if (!def_data)
436 return true;
438 if (add_cost
439 /* Only add the cost if the statement defining DEF is inside LOOP,
440 i.e. if it is likely that by moving the invariants dependent
441 on it, we will be able to avoid creating a new register for
442 it (since it will be only used in these dependent invariants). */
443 && def_bb->loop_father == loop)
444 data->cost += def_data->cost;
446 data->depends.safe_push (def_stmt);
448 return true;
451 /* Returns an estimate for a cost of statement STMT. The values here
452 are just ad-hoc constants, similar to costs for inlining. */
454 static unsigned
455 stmt_cost (gimple stmt)
457 /* Always try to create possibilities for unswitching. */
458 if (gimple_code (stmt) == GIMPLE_COND
459 || gimple_code (stmt) == GIMPLE_PHI)
460 return LIM_EXPENSIVE;
462 /* We should be hoisting calls if possible. */
463 if (is_gimple_call (stmt))
465 tree fndecl;
467 /* Unless the call is a builtin_constant_p; this always folds to a
468 constant, so moving it is useless. */
469 fndecl = gimple_call_fndecl (stmt);
470 if (fndecl
471 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
472 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
473 return 0;
475 return LIM_EXPENSIVE;
478 /* Hoisting memory references out should almost surely be a win. */
479 if (gimple_references_memory_p (stmt))
480 return LIM_EXPENSIVE;
482 if (gimple_code (stmt) != GIMPLE_ASSIGN)
483 return 1;
485 switch (gimple_assign_rhs_code (stmt))
487 case MULT_EXPR:
488 case WIDEN_MULT_EXPR:
489 case WIDEN_MULT_PLUS_EXPR:
490 case WIDEN_MULT_MINUS_EXPR:
491 case DOT_PROD_EXPR:
492 case FMA_EXPR:
493 case TRUNC_DIV_EXPR:
494 case CEIL_DIV_EXPR:
495 case FLOOR_DIV_EXPR:
496 case ROUND_DIV_EXPR:
497 case EXACT_DIV_EXPR:
498 case CEIL_MOD_EXPR:
499 case FLOOR_MOD_EXPR:
500 case ROUND_MOD_EXPR:
501 case TRUNC_MOD_EXPR:
502 case RDIV_EXPR:
503 /* Division and multiplication are usually expensive. */
504 return LIM_EXPENSIVE;
506 case LSHIFT_EXPR:
507 case RSHIFT_EXPR:
508 case WIDEN_LSHIFT_EXPR:
509 case LROTATE_EXPR:
510 case RROTATE_EXPR:
511 /* Shifts and rotates are usually expensive. */
512 return LIM_EXPENSIVE;
514 case CONSTRUCTOR:
515 /* Make vector construction cost proportional to the number
516 of elements. */
517 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
519 case SSA_NAME:
520 case PAREN_EXPR:
521 /* Whether or not something is wrapped inside a PAREN_EXPR
522 should not change move cost. Nor should an intermediate
523 unpropagated SSA name copy. */
524 return 0;
526 default:
527 return 1;
531 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
532 REF is independent. If REF is not independent in LOOP, NULL is returned
533 instead. */
535 static struct loop *
536 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
538 struct loop *aloop;
540 if (bitmap_bit_p (&ref->stored, loop->num))
541 return NULL;
543 for (aloop = outer;
544 aloop != loop;
545 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
546 if (!bitmap_bit_p (&ref->stored, aloop->num)
547 && ref_indep_loop_p (aloop, ref))
548 return aloop;
550 if (ref_indep_loop_p (loop, ref))
551 return loop;
552 else
553 return NULL;
556 /* If there is a simple load or store to a memory reference in STMT, returns
557 the location of the memory reference, and sets IS_STORE according to whether
558 it is a store or load. Otherwise, returns NULL. */
560 static tree *
561 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
563 tree *lhs, *rhs;
565 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
566 if (!gimple_assign_single_p (stmt))
567 return NULL;
569 lhs = gimple_assign_lhs_ptr (stmt);
570 rhs = gimple_assign_rhs1_ptr (stmt);
572 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
574 *is_store = false;
575 return rhs;
577 else if (gimple_vdef (stmt)
578 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
580 *is_store = true;
581 return lhs;
583 else
584 return NULL;
587 /* Returns the memory reference contained in STMT. */
589 static mem_ref_p
590 mem_ref_in_stmt (gimple stmt)
592 bool store;
593 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
594 hashval_t hash;
595 mem_ref_p ref;
597 if (!mem)
598 return NULL;
599 gcc_assert (!store);
601 hash = iterative_hash_expr (*mem, 0);
602 ref = memory_accesses.refs.find_with_hash (*mem, hash);
604 gcc_assert (ref != NULL);
605 return ref;
608 /* From a controlling predicate in DOM determine the arguments from
609 the PHI node PHI that are chosen if the predicate evaluates to
610 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
611 they are non-NULL. Returns true if the arguments can be determined,
612 else return false. */
614 static bool
615 extract_true_false_args_from_phi (basic_block dom, gimple phi,
616 tree *true_arg_p, tree *false_arg_p)
618 basic_block bb = gimple_bb (phi);
619 edge true_edge, false_edge, tem;
620 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
622 /* We have to verify that one edge into the PHI node is dominated
623 by the true edge of the predicate block and the other edge
624 dominated by the false edge. This ensures that the PHI argument
625 we are going to take is completely determined by the path we
626 take from the predicate block.
627 We can only use BB dominance checks below if the destination of
628 the true/false edges are dominated by their edge, thus only
629 have a single predecessor. */
630 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
631 tem = EDGE_PRED (bb, 0);
632 if (tem == true_edge
633 || (single_pred_p (true_edge->dest)
634 && (tem->src == true_edge->dest
635 || dominated_by_p (CDI_DOMINATORS,
636 tem->src, true_edge->dest))))
637 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
638 else if (tem == false_edge
639 || (single_pred_p (false_edge->dest)
640 && (tem->src == false_edge->dest
641 || dominated_by_p (CDI_DOMINATORS,
642 tem->src, false_edge->dest))))
643 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
644 else
645 return false;
646 tem = EDGE_PRED (bb, 1);
647 if (tem == true_edge
648 || (single_pred_p (true_edge->dest)
649 && (tem->src == true_edge->dest
650 || dominated_by_p (CDI_DOMINATORS,
651 tem->src, true_edge->dest))))
652 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
653 else if (tem == false_edge
654 || (single_pred_p (false_edge->dest)
655 && (tem->src == false_edge->dest
656 || dominated_by_p (CDI_DOMINATORS,
657 tem->src, false_edge->dest))))
658 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
659 else
660 return false;
661 if (!arg0 || !arg1)
662 return false;
664 if (true_arg_p)
665 *true_arg_p = arg0;
666 if (false_arg_p)
667 *false_arg_p = arg1;
669 return true;
672 /* Determine the outermost loop to that it is possible to hoist a statement
673 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
674 the outermost loop in that the value computed by STMT is invariant.
675 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
676 we preserve the fact whether STMT is executed. It also fills other related
677 information to LIM_DATA (STMT).
679 The function returns false if STMT cannot be hoisted outside of the loop it
680 is defined in, and true otherwise. */
682 static bool
683 determine_max_movement (gimple stmt, bool must_preserve_exec)
685 basic_block bb = gimple_bb (stmt);
686 struct loop *loop = bb->loop_father;
687 struct loop *level;
688 struct lim_aux_data *lim_data = get_lim_data (stmt);
689 tree val;
690 ssa_op_iter iter;
692 if (must_preserve_exec)
693 level = ALWAYS_EXECUTED_IN (bb);
694 else
695 level = superloop_at_depth (loop, 1);
696 lim_data->max_loop = level;
698 if (gimple_code (stmt) == GIMPLE_PHI)
700 use_operand_p use_p;
701 unsigned min_cost = UINT_MAX;
702 unsigned total_cost = 0;
703 struct lim_aux_data *def_data;
705 /* We will end up promoting dependencies to be unconditionally
706 evaluated. For this reason the PHI cost (and thus the
707 cost we remove from the loop by doing the invariant motion)
708 is that of the cheapest PHI argument dependency chain. */
709 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
711 val = USE_FROM_PTR (use_p);
712 if (TREE_CODE (val) != SSA_NAME)
713 continue;
714 if (!add_dependency (val, lim_data, loop, false))
715 return false;
716 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
717 if (def_data)
719 min_cost = MIN (min_cost, def_data->cost);
720 total_cost += def_data->cost;
724 lim_data->cost += min_cost;
726 if (gimple_phi_num_args (stmt) > 1)
728 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
729 gimple cond;
730 if (gsi_end_p (gsi_last_bb (dom)))
731 return false;
732 cond = gsi_stmt (gsi_last_bb (dom));
733 if (gimple_code (cond) != GIMPLE_COND)
734 return false;
735 /* Verify that this is an extended form of a diamond and
736 the PHI arguments are completely controlled by the
737 predicate in DOM. */
738 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
739 return false;
741 /* Fold in dependencies and cost of the condition. */
742 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
744 if (!add_dependency (val, lim_data, loop, false))
745 return false;
746 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
747 if (def_data)
748 total_cost += def_data->cost;
751 /* We want to avoid unconditionally executing very expensive
752 operations. As costs for our dependencies cannot be
753 negative just claim we are not invariand for this case.
754 We also are not sure whether the control-flow inside the
755 loop will vanish. */
756 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
757 && !(min_cost != 0
758 && total_cost / min_cost <= 2))
759 return false;
761 /* Assume that the control-flow in the loop will vanish.
762 ??? We should verify this and not artificially increase
763 the cost if that is not the case. */
764 lim_data->cost += stmt_cost (stmt);
767 return true;
769 else
770 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
771 if (!add_dependency (val, lim_data, loop, true))
772 return false;
774 if (gimple_vuse (stmt))
776 mem_ref_p ref = mem_ref_in_stmt (stmt);
778 if (ref)
780 lim_data->max_loop
781 = outermost_indep_loop (lim_data->max_loop, loop, ref);
782 if (!lim_data->max_loop)
783 return false;
785 else
787 if ((val = gimple_vuse (stmt)) != NULL_TREE)
789 if (!add_dependency (val, lim_data, loop, false))
790 return false;
795 lim_data->cost += stmt_cost (stmt);
797 return true;
800 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
801 and that one of the operands of this statement is computed by STMT.
802 Ensure that STMT (together with all the statements that define its
803 operands) is hoisted at least out of the loop LEVEL. */
805 static void
806 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
808 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
809 struct lim_aux_data *lim_data;
810 gimple dep_stmt;
811 unsigned i;
813 stmt_loop = find_common_loop (orig_loop, stmt_loop);
814 lim_data = get_lim_data (stmt);
815 if (lim_data != NULL && lim_data->tgt_loop != NULL)
816 stmt_loop = find_common_loop (stmt_loop,
817 loop_outer (lim_data->tgt_loop));
818 if (flow_loop_nested_p (stmt_loop, level))
819 return;
821 gcc_assert (level == lim_data->max_loop
822 || flow_loop_nested_p (lim_data->max_loop, level));
824 lim_data->tgt_loop = level;
825 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
826 set_level (dep_stmt, orig_loop, level);
829 /* Determines an outermost loop from that we want to hoist the statement STMT.
830 For now we chose the outermost possible loop. TODO -- use profiling
831 information to set it more sanely. */
833 static void
834 set_profitable_level (gimple stmt)
836 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
839 /* Returns true if STMT is a call that has side effects. */
841 static bool
842 nonpure_call_p (gimple stmt)
844 if (gimple_code (stmt) != GIMPLE_CALL)
845 return false;
847 return gimple_has_side_effects (stmt);
850 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
852 static gimple
853 rewrite_reciprocal (gimple_stmt_iterator *bsi)
855 gimple stmt, stmt1, stmt2;
856 tree name, lhs, type;
857 tree real_one;
858 gimple_stmt_iterator gsi;
860 stmt = gsi_stmt (*bsi);
861 lhs = gimple_assign_lhs (stmt);
862 type = TREE_TYPE (lhs);
864 real_one = build_one_cst (type);
866 name = make_temp_ssa_name (type, NULL, "reciptmp");
867 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
868 gimple_assign_rhs2 (stmt));
870 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
871 gimple_assign_rhs1 (stmt));
873 /* Replace division stmt with reciprocal and multiply stmts.
874 The multiply stmt is not invariant, so update iterator
875 and avoid rescanning. */
876 gsi = *bsi;
877 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
878 gsi_replace (&gsi, stmt2, true);
880 /* Continue processing with invariant reciprocal statement. */
881 return stmt1;
884 /* Check if the pattern at *BSI is a bittest of the form
885 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
887 static gimple
888 rewrite_bittest (gimple_stmt_iterator *bsi)
890 gimple stmt, use_stmt, stmt1, stmt2;
891 tree lhs, name, t, a, b;
892 use_operand_p use;
894 stmt = gsi_stmt (*bsi);
895 lhs = gimple_assign_lhs (stmt);
897 /* Verify that the single use of lhs is a comparison against zero. */
898 if (TREE_CODE (lhs) != SSA_NAME
899 || !single_imm_use (lhs, &use, &use_stmt)
900 || gimple_code (use_stmt) != GIMPLE_COND)
901 return stmt;
902 if (gimple_cond_lhs (use_stmt) != lhs
903 || (gimple_cond_code (use_stmt) != NE_EXPR
904 && gimple_cond_code (use_stmt) != EQ_EXPR)
905 || !integer_zerop (gimple_cond_rhs (use_stmt)))
906 return stmt;
908 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
909 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
910 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
911 return stmt;
913 /* There is a conversion in between possibly inserted by fold. */
914 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
916 t = gimple_assign_rhs1 (stmt1);
917 if (TREE_CODE (t) != SSA_NAME
918 || !has_single_use (t))
919 return stmt;
920 stmt1 = SSA_NAME_DEF_STMT (t);
921 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
922 return stmt;
925 /* Verify that B is loop invariant but A is not. Verify that with
926 all the stmt walking we are still in the same loop. */
927 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
928 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
929 return stmt;
931 a = gimple_assign_rhs1 (stmt1);
932 b = gimple_assign_rhs2 (stmt1);
934 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
935 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
937 gimple_stmt_iterator rsi;
939 /* 1 << B */
940 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
941 build_int_cst (TREE_TYPE (a), 1), b);
942 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
943 stmt1 = gimple_build_assign (name, t);
945 /* A & (1 << B) */
946 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
947 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
948 stmt2 = gimple_build_assign (name, t);
950 /* Replace the SSA_NAME we compare against zero. Adjust
951 the type of zero accordingly. */
952 SET_USE (use, name);
953 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
955 /* Don't use gsi_replace here, none of the new assignments sets
956 the variable originally set in stmt. Move bsi to stmt1, and
957 then remove the original stmt, so that we get a chance to
958 retain debug info for it. */
959 rsi = *bsi;
960 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
961 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
962 gsi_remove (&rsi, true);
964 return stmt1;
967 return stmt;
970 /* For each statement determines the outermost loop in that it is invariant,
971 - statements on whose motion it depends and the cost of the computation.
972 - This information is stored to the LIM_DATA structure associated with
973 - each statement. */
974 class invariantness_dom_walker : public dom_walker
976 public:
977 invariantness_dom_walker (cdi_direction direction)
978 : dom_walker (direction) {}
980 virtual void before_dom_children (basic_block);
983 /* Determine the outermost loops in that statements in basic block BB are
984 invariant, and record them to the LIM_DATA associated with the statements.
985 Callback for dom_walker. */
987 void
988 invariantness_dom_walker::before_dom_children (basic_block bb)
990 enum move_pos pos;
991 gimple_stmt_iterator bsi;
992 gimple stmt;
993 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
994 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
995 struct lim_aux_data *lim_data;
997 if (!loop_outer (bb->loop_father))
998 return;
1000 if (dump_file && (dump_flags & TDF_DETAILS))
1001 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1002 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1004 /* Look at PHI nodes, but only if there is at most two.
1005 ??? We could relax this further by post-processing the inserted
1006 code and transforming adjacent cond-exprs with the same predicate
1007 to control flow again. */
1008 bsi = gsi_start_phis (bb);
1009 if (!gsi_end_p (bsi)
1010 && ((gsi_next (&bsi), gsi_end_p (bsi))
1011 || (gsi_next (&bsi), gsi_end_p (bsi))))
1012 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1014 stmt = gsi_stmt (bsi);
1016 pos = movement_possibility (stmt);
1017 if (pos == MOVE_IMPOSSIBLE)
1018 continue;
1020 lim_data = init_lim_data (stmt);
1021 lim_data->always_executed_in = outermost;
1023 if (!determine_max_movement (stmt, false))
1025 lim_data->max_loop = NULL;
1026 continue;
1029 if (dump_file && (dump_flags & TDF_DETAILS))
1031 print_gimple_stmt (dump_file, stmt, 2, 0);
1032 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1033 loop_depth (lim_data->max_loop),
1034 lim_data->cost);
1037 if (lim_data->cost >= LIM_EXPENSIVE)
1038 set_profitable_level (stmt);
1041 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1043 stmt = gsi_stmt (bsi);
1045 pos = movement_possibility (stmt);
1046 if (pos == MOVE_IMPOSSIBLE)
1048 if (nonpure_call_p (stmt))
1050 maybe_never = true;
1051 outermost = NULL;
1053 /* Make sure to note always_executed_in for stores to make
1054 store-motion work. */
1055 else if (stmt_makes_single_store (stmt))
1057 struct lim_aux_data *lim_data = init_lim_data (stmt);
1058 lim_data->always_executed_in = outermost;
1060 continue;
1063 if (is_gimple_assign (stmt)
1064 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1065 == GIMPLE_BINARY_RHS))
1067 tree op0 = gimple_assign_rhs1 (stmt);
1068 tree op1 = gimple_assign_rhs2 (stmt);
1069 struct loop *ol1 = outermost_invariant_loop (op1,
1070 loop_containing_stmt (stmt));
1072 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1073 to be hoisted out of loop, saving expensive divide. */
1074 if (pos == MOVE_POSSIBLE
1075 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1076 && flag_unsafe_math_optimizations
1077 && !flag_trapping_math
1078 && ol1 != NULL
1079 && outermost_invariant_loop (op0, ol1) == NULL)
1080 stmt = rewrite_reciprocal (&bsi);
1082 /* If the shift count is invariant, convert (A >> B) & 1 to
1083 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1084 saving an expensive shift. */
1085 if (pos == MOVE_POSSIBLE
1086 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1087 && integer_onep (op1)
1088 && TREE_CODE (op0) == SSA_NAME
1089 && has_single_use (op0))
1090 stmt = rewrite_bittest (&bsi);
1093 lim_data = init_lim_data (stmt);
1094 lim_data->always_executed_in = outermost;
1096 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1097 continue;
1099 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1101 lim_data->max_loop = NULL;
1102 continue;
1105 if (dump_file && (dump_flags & TDF_DETAILS))
1107 print_gimple_stmt (dump_file, stmt, 2, 0);
1108 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1109 loop_depth (lim_data->max_loop),
1110 lim_data->cost);
1113 if (lim_data->cost >= LIM_EXPENSIVE)
1114 set_profitable_level (stmt);
1118 class move_computations_dom_walker : public dom_walker
1120 public:
1121 move_computations_dom_walker (cdi_direction direction)
1122 : dom_walker (direction), todo_ (0) {}
1124 virtual void before_dom_children (basic_block);
1126 unsigned int todo_;
1129 /* Return true if CODE is an operation that when operating on signed
1130 integer types involves undefined behavior on overflow and the
1131 operation can be expressed with unsigned arithmetic. */
1133 static bool
1134 arith_code_with_undefined_signed_overflow (tree_code code)
1136 switch (code)
1138 case PLUS_EXPR:
1139 case MINUS_EXPR:
1140 case MULT_EXPR:
1141 case NEGATE_EXPR:
1142 case POINTER_PLUS_EXPR:
1143 return true;
1144 default:
1145 return false;
1149 /* Rewrite STMT, an assignment with a signed integer or pointer arithmetic
1150 operation that can be transformed to unsigned arithmetic by converting
1151 its operand, carrying out the operation in the corresponding unsigned
1152 type and converting the result back to the original type.
1154 Returns a sequence of statements that replace STMT and also contain
1155 a modified form of STMT itself. */
1157 static gimple_seq
1158 rewrite_to_defined_overflow (gimple stmt)
1160 if (dump_file && (dump_flags & TDF_DETAILS))
1162 fprintf (dump_file, "rewriting stmt with undefined signed "
1163 "overflow ");
1164 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1167 tree lhs = gimple_assign_lhs (stmt);
1168 tree type = unsigned_type_for (TREE_TYPE (lhs));
1169 gimple_seq stmts = NULL;
1170 for (unsigned i = 1; i < gimple_num_ops (stmt); ++i)
1172 gimple_seq stmts2 = NULL;
1173 gimple_set_op (stmt, i,
1174 force_gimple_operand (fold_convert (type,
1175 gimple_op (stmt, i)),
1176 &stmts2, true, NULL_TREE));
1177 gimple_seq_add_seq (&stmts, stmts2);
1179 gimple_assign_set_lhs (stmt, make_ssa_name (type, stmt));
1180 if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
1181 gimple_assign_set_rhs_code (stmt, PLUS_EXPR);
1182 gimple_seq_add_stmt (&stmts, stmt);
1183 gimple cvt = gimple_build_assign_with_ops
1184 (NOP_EXPR, lhs, gimple_assign_lhs (stmt), NULL_TREE);
1185 gimple_seq_add_stmt (&stmts, cvt);
1187 return stmts;
1190 /* Hoist the statements in basic block BB out of the loops prescribed by
1191 data stored in LIM_DATA structures associated with each statement. Callback
1192 for walk_dominator_tree. */
1194 void
1195 move_computations_dom_walker::before_dom_children (basic_block bb)
1197 struct loop *level;
1198 gimple_stmt_iterator bsi;
1199 gimple stmt;
1200 unsigned cost = 0;
1201 struct lim_aux_data *lim_data;
1203 if (!loop_outer (bb->loop_father))
1204 return;
1206 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1208 gimple new_stmt;
1209 stmt = gsi_stmt (bsi);
1211 lim_data = get_lim_data (stmt);
1212 if (lim_data == NULL)
1214 gsi_next (&bsi);
1215 continue;
1218 cost = lim_data->cost;
1219 level = lim_data->tgt_loop;
1220 clear_lim_data (stmt);
1222 if (!level)
1224 gsi_next (&bsi);
1225 continue;
1228 if (dump_file && (dump_flags & TDF_DETAILS))
1230 fprintf (dump_file, "Moving PHI node\n");
1231 print_gimple_stmt (dump_file, stmt, 0, 0);
1232 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1233 cost, level->num);
1236 if (gimple_phi_num_args (stmt) == 1)
1238 tree arg = PHI_ARG_DEF (stmt, 0);
1239 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1240 gimple_phi_result (stmt),
1241 arg, NULL_TREE);
1243 else
1245 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1246 gimple cond = gsi_stmt (gsi_last_bb (dom));
1247 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1248 /* Get the PHI arguments corresponding to the true and false
1249 edges of COND. */
1250 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1251 gcc_assert (arg0 && arg1);
1252 t = build2 (gimple_cond_code (cond), boolean_type_node,
1253 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1254 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1255 gimple_phi_result (stmt),
1256 t, arg0, arg1);
1257 todo_ |= TODO_cleanup_cfg;
1259 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1260 remove_phi_node (&bsi, false);
1263 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1265 edge e;
1267 stmt = gsi_stmt (bsi);
1269 lim_data = get_lim_data (stmt);
1270 if (lim_data == NULL)
1272 gsi_next (&bsi);
1273 continue;
1276 cost = lim_data->cost;
1277 level = lim_data->tgt_loop;
1278 clear_lim_data (stmt);
1280 if (!level)
1282 gsi_next (&bsi);
1283 continue;
1286 /* We do not really want to move conditionals out of the loop; we just
1287 placed it here to force its operands to be moved if necessary. */
1288 if (gimple_code (stmt) == GIMPLE_COND)
1289 continue;
1291 if (dump_file && (dump_flags & TDF_DETAILS))
1293 fprintf (dump_file, "Moving statement\n");
1294 print_gimple_stmt (dump_file, stmt, 0, 0);
1295 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1296 cost, level->num);
1299 e = loop_preheader_edge (level);
1300 gcc_assert (!gimple_vdef (stmt));
1301 if (gimple_vuse (stmt))
1303 /* The new VUSE is the one from the virtual PHI in the loop
1304 header or the one already present. */
1305 gimple_stmt_iterator gsi2;
1306 for (gsi2 = gsi_start_phis (e->dest);
1307 !gsi_end_p (gsi2); gsi_next (&gsi2))
1309 gimple phi = gsi_stmt (gsi2);
1310 if (virtual_operand_p (gimple_phi_result (phi)))
1312 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1313 break;
1317 gsi_remove (&bsi, false);
1318 /* In case this is a stmt that is not unconditionally executed
1319 when the target loop header is executed and the stmt may
1320 invoke undefined integer or pointer overflow rewrite it to
1321 unsigned arithmetic. */
1322 if (is_gimple_assign (stmt)
1323 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1324 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1325 && arith_code_with_undefined_signed_overflow
1326 (gimple_assign_rhs_code (stmt))
1327 && (!ALWAYS_EXECUTED_IN (bb)
1328 || !(ALWAYS_EXECUTED_IN (bb) == level
1329 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1330 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1331 else
1332 gsi_insert_on_edge (e, stmt);
1336 /* Hoist the statements out of the loops prescribed by data stored in
1337 LIM_DATA structures associated with each statement.*/
1339 static unsigned int
1340 move_computations (void)
1342 move_computations_dom_walker walker (CDI_DOMINATORS);
1343 walker.walk (cfun->cfg->x_entry_block_ptr);
1345 gsi_commit_edge_inserts ();
1346 if (need_ssa_update_p (cfun))
1347 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1349 return walker.todo_;
1352 /* Checks whether the statement defining variable *INDEX can be hoisted
1353 out of the loop passed in DATA. Callback for for_each_index. */
1355 static bool
1356 may_move_till (tree ref, tree *index, void *data)
1358 struct loop *loop = (struct loop *) data, *max_loop;
1360 /* If REF is an array reference, check also that the step and the lower
1361 bound is invariant in LOOP. */
1362 if (TREE_CODE (ref) == ARRAY_REF)
1364 tree step = TREE_OPERAND (ref, 3);
1365 tree lbound = TREE_OPERAND (ref, 2);
1367 max_loop = outermost_invariant_loop (step, loop);
1368 if (!max_loop)
1369 return false;
1371 max_loop = outermost_invariant_loop (lbound, loop);
1372 if (!max_loop)
1373 return false;
1376 max_loop = outermost_invariant_loop (*index, loop);
1377 if (!max_loop)
1378 return false;
1380 return true;
1383 /* If OP is SSA NAME, force the statement that defines it to be
1384 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1386 static void
1387 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1389 gimple stmt;
1391 if (!op
1392 || is_gimple_min_invariant (op))
1393 return;
1395 gcc_assert (TREE_CODE (op) == SSA_NAME);
1397 stmt = SSA_NAME_DEF_STMT (op);
1398 if (gimple_nop_p (stmt))
1399 return;
1401 set_level (stmt, orig_loop, loop);
1404 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1405 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1406 for_each_index. */
1408 struct fmt_data
1410 struct loop *loop;
1411 struct loop *orig_loop;
1414 static bool
1415 force_move_till (tree ref, tree *index, void *data)
1417 struct fmt_data *fmt_data = (struct fmt_data *) data;
1419 if (TREE_CODE (ref) == ARRAY_REF)
1421 tree step = TREE_OPERAND (ref, 3);
1422 tree lbound = TREE_OPERAND (ref, 2);
1424 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1425 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1428 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1430 return true;
1433 /* A function to free the mem_ref object OBJ. */
1435 static void
1436 memref_free (struct mem_ref *mem)
1438 unsigned i;
1439 vec<mem_ref_loc> *accs;
1441 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1442 accs->release ();
1443 mem->accesses_in_loop.release ();
1445 free (mem);
1448 /* Allocates and returns a memory reference description for MEM whose hash
1449 value is HASH and id is ID. */
1451 static mem_ref_p
1452 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1454 mem_ref_p ref = XNEW (struct mem_ref);
1455 ao_ref_init (&ref->mem, mem);
1456 ref->id = id;
1457 ref->hash = hash;
1458 bitmap_initialize (&ref->stored, &lim_bitmap_obstack);
1459 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1460 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1461 ref->accesses_in_loop.create (0);
1463 return ref;
1466 /* Records memory reference location *LOC in LOOP to the memory reference
1467 description REF. The reference occurs in statement STMT. */
1469 static void
1470 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1472 mem_ref_loc aref;
1474 if (ref->accesses_in_loop.length ()
1475 <= (unsigned) loop->num)
1476 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1478 aref.stmt = stmt;
1479 aref.ref = loc;
1480 ref->accesses_in_loop[loop->num].safe_push (aref);
1483 /* Marks reference REF as stored in LOOP. */
1485 static void
1486 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1488 while (loop != current_loops->tree_root
1489 && bitmap_set_bit (&ref->stored, loop->num))
1490 loop = loop_outer (loop);
1493 /* Gathers memory references in statement STMT in LOOP, storing the
1494 information about them in the memory_accesses structure. Marks
1495 the vops accessed through unrecognized statements there as
1496 well. */
1498 static void
1499 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1501 tree *mem = NULL;
1502 hashval_t hash;
1503 mem_ref **slot;
1504 mem_ref_p ref;
1505 bool is_stored;
1506 unsigned id;
1508 if (!gimple_vuse (stmt))
1509 return;
1511 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1512 if (!mem)
1514 /* We use the shared mem_ref for all unanalyzable refs. */
1515 id = UNANALYZABLE_MEM_ID;
1516 ref = memory_accesses.refs_list[id];
1517 if (dump_file && (dump_flags & TDF_DETAILS))
1519 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1520 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1522 is_stored = gimple_vdef (stmt);
1524 else
1526 hash = iterative_hash_expr (*mem, 0);
1527 slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
1528 if (*slot)
1530 ref = (mem_ref_p) *slot;
1531 id = ref->id;
1533 else
1535 id = memory_accesses.refs_list.length ();
1536 ref = mem_ref_alloc (*mem, hash, id);
1537 memory_accesses.refs_list.safe_push (ref);
1538 *slot = ref;
1540 if (dump_file && (dump_flags & TDF_DETAILS))
1542 fprintf (dump_file, "Memory reference %u: ", id);
1543 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1544 fprintf (dump_file, "\n");
1548 record_mem_ref_loc (ref, loop, stmt, mem);
1550 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1551 if (is_stored)
1553 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1554 mark_ref_stored (ref, loop);
1556 return;
1559 static unsigned *bb_loop_postorder;
1561 /* qsort sort function to sort blocks after their loop fathers postorder. */
1563 static int
1564 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1566 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1567 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1568 struct loop *loop1 = bb1->loop_father;
1569 struct loop *loop2 = bb2->loop_father;
1570 if (loop1->num == loop2->num)
1571 return 0;
1572 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1575 /* Gathers memory references in loops. */
1577 static void
1578 analyze_memory_references (void)
1580 gimple_stmt_iterator bsi;
1581 basic_block bb, *bbs;
1582 struct loop *loop, *outer;
1583 loop_iterator li;
1584 unsigned i, n;
1586 /* Initialize bb_loop_postorder with a mapping from loop->num to
1587 its postorder index. */
1588 i = 0;
1589 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
1590 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1591 bb_loop_postorder[loop->num] = i++;
1592 /* Collect all basic-blocks in loops and sort them after their
1593 loops postorder. */
1594 i = 0;
1595 bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
1596 FOR_EACH_BB (bb)
1597 if (bb->loop_father != current_loops->tree_root)
1598 bbs[i++] = bb;
1599 n = i;
1600 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1601 free (bb_loop_postorder);
1603 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1604 That results in better locality for all the bitmaps. */
1605 for (i = 0; i < n; ++i)
1607 basic_block bb = bbs[i];
1608 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1609 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1612 free (bbs);
1614 /* Propagate the information about accessed memory references up
1615 the loop hierarchy. */
1616 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1618 /* Finalize the overall touched references (including subloops). */
1619 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1620 &memory_accesses.refs_stored_in_loop[loop->num]);
1622 /* Propagate the information about accessed memory references up
1623 the loop hierarchy. */
1624 outer = loop_outer (loop);
1625 if (outer == current_loops->tree_root)
1626 continue;
1628 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1629 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1633 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1634 tree_to_aff_combination_expand. */
1636 static bool
1637 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1638 struct pointer_map_t **ttae_cache)
1640 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1641 object and their offset differ in such a way that the locations cannot
1642 overlap, then they cannot alias. */
1643 double_int size1, size2;
1644 aff_tree off1, off2;
1646 /* Perform basic offset and type-based disambiguation. */
1647 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1648 return false;
1650 /* The expansion of addresses may be a bit expensive, thus we only do
1651 the check at -O2 and higher optimization levels. */
1652 if (optimize < 2)
1653 return true;
1655 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1656 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1657 aff_combination_expand (&off1, ttae_cache);
1658 aff_combination_expand (&off2, ttae_cache);
1659 aff_combination_scale (&off1, double_int_minus_one);
1660 aff_combination_add (&off2, &off1);
1662 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1663 return false;
1665 return true;
1668 /* Iterates over all locations of REF in LOOP and its subloops calling
1669 fn.operator() with the location as argument. When that operator
1670 returns true the iteration is stopped and true is returned.
1671 Otherwise false is returned. */
1673 template <typename FN>
1674 static bool
1675 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1677 unsigned i;
1678 mem_ref_loc_p loc;
1679 struct loop *subloop;
1681 if (ref->accesses_in_loop.length () > (unsigned) loop->num)
1682 FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc)
1683 if (fn (loc))
1684 return true;
1686 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1687 if (for_all_locs_in_loop (subloop, ref, fn))
1688 return true;
1690 return false;
1693 /* Rewrites location LOC by TMP_VAR. */
1695 struct rewrite_mem_ref_loc
1697 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1698 bool operator () (mem_ref_loc_p loc);
1699 tree tmp_var;
1702 bool
1703 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1705 *loc->ref = tmp_var;
1706 update_stmt (loc->stmt);
1707 return false;
1710 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1712 static void
1713 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1715 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1718 /* Stores the first reference location in LOCP. */
1720 struct first_mem_ref_loc_1
1722 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1723 bool operator () (mem_ref_loc_p loc);
1724 mem_ref_loc_p *locp;
1727 bool
1728 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1730 *locp = loc;
1731 return true;
1734 /* Returns the first reference location to REF in LOOP. */
1736 static mem_ref_loc_p
1737 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1739 mem_ref_loc_p locp = NULL;
1740 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1741 return locp;
1744 struct prev_flag_edges {
1745 /* Edge to insert new flag comparison code. */
1746 edge append_cond_position;
1748 /* Edge for fall through from previous flag comparison. */
1749 edge last_cond_fallthru;
1752 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1753 MEM along edge EX.
1755 The store is only done if MEM has changed. We do this so no
1756 changes to MEM occur on code paths that did not originally store
1757 into it.
1759 The common case for execute_sm will transform:
1761 for (...) {
1762 if (foo)
1763 stuff;
1764 else
1765 MEM = TMP_VAR;
1768 into:
1770 lsm = MEM;
1771 for (...) {
1772 if (foo)
1773 stuff;
1774 else
1775 lsm = TMP_VAR;
1777 MEM = lsm;
1779 This function will generate:
1781 lsm = MEM;
1783 lsm_flag = false;
1785 for (...) {
1786 if (foo)
1787 stuff;
1788 else {
1789 lsm = TMP_VAR;
1790 lsm_flag = true;
1793 if (lsm_flag) <--
1794 MEM = lsm; <--
1797 static void
1798 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1800 basic_block new_bb, then_bb, old_dest;
1801 bool loop_has_only_one_exit;
1802 edge then_old_edge, orig_ex = ex;
1803 gimple_stmt_iterator gsi;
1804 gimple stmt;
1805 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1807 /* ?? Insert store after previous store if applicable. See note
1808 below. */
1809 if (prev_edges)
1810 ex = prev_edges->append_cond_position;
1812 loop_has_only_one_exit = single_pred_p (ex->dest);
1814 if (loop_has_only_one_exit)
1815 ex = split_block_after_labels (ex->dest);
1817 old_dest = ex->dest;
1818 new_bb = split_edge (ex);
1819 then_bb = create_empty_bb (new_bb);
1820 if (current_loops && new_bb->loop_father)
1821 add_bb_to_loop (then_bb, new_bb->loop_father);
1823 gsi = gsi_start_bb (new_bb);
1824 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1825 NULL_TREE, NULL_TREE);
1826 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1828 gsi = gsi_start_bb (then_bb);
1829 /* Insert actual store. */
1830 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1831 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1833 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
1834 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
1835 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
1837 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1839 if (prev_edges)
1841 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1842 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1843 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1844 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1845 recompute_dominator (CDI_DOMINATORS, old_dest));
1848 /* ?? Because stores may alias, they must happen in the exact
1849 sequence they originally happened. Save the position right after
1850 the (_lsm) store we just created so we can continue appending after
1851 it and maintain the original order. */
1853 struct prev_flag_edges *p;
1855 if (orig_ex->aux)
1856 orig_ex->aux = NULL;
1857 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1858 p = (struct prev_flag_edges *) orig_ex->aux;
1859 p->append_cond_position = then_old_edge;
1860 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1861 orig_ex->aux = (void *) p;
1864 if (!loop_has_only_one_exit)
1865 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
1867 gimple phi = gsi_stmt (gsi);
1868 unsigned i;
1870 for (i = 0; i < gimple_phi_num_args (phi); i++)
1871 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1873 tree arg = gimple_phi_arg_def (phi, i);
1874 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1875 update_stmt (phi);
1878 /* Remove the original fall through edge. This was the
1879 single_succ_edge (new_bb). */
1880 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1883 /* When REF is set on the location, set flag indicating the store. */
1885 struct sm_set_flag_if_changed
1887 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1888 bool operator () (mem_ref_loc_p loc);
1889 tree flag;
1892 bool
1893 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1895 /* Only set the flag for writes. */
1896 if (is_gimple_assign (loc->stmt)
1897 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1899 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1900 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1901 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1903 return false;
1906 /* Helper function for execute_sm. On every location where REF is
1907 set, set an appropriate flag indicating the store. */
1909 static tree
1910 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1912 tree flag;
1913 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1914 flag = create_tmp_reg (boolean_type_node, str);
1915 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1916 return flag;
1919 /* Executes store motion of memory reference REF from LOOP.
1920 Exits from the LOOP are stored in EXITS. The initialization of the
1921 temporary variable is put to the preheader of the loop, and assignments
1922 to the reference from the temporary variable are emitted to exits. */
1924 static void
1925 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1927 tree tmp_var, store_flag;
1928 unsigned i;
1929 gimple load;
1930 struct fmt_data fmt_data;
1931 edge ex;
1932 struct lim_aux_data *lim_data;
1933 bool multi_threaded_model_p = false;
1934 gimple_stmt_iterator gsi;
1936 if (dump_file && (dump_flags & TDF_DETAILS))
1938 fprintf (dump_file, "Executing store motion of ");
1939 print_generic_expr (dump_file, ref->mem.ref, 0);
1940 fprintf (dump_file, " from loop %d\n", loop->num);
1943 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1944 get_lsm_tmp_name (ref->mem.ref, ~0));
1946 fmt_data.loop = loop;
1947 fmt_data.orig_loop = loop;
1948 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1950 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1951 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1952 multi_threaded_model_p = true;
1954 if (multi_threaded_model_p)
1955 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1957 rewrite_mem_refs (loop, ref, tmp_var);
1959 /* Emit the load code on a random exit edge or into the latch if
1960 the loop does not exit, so that we are sure it will be processed
1961 by move_computations after all dependencies. */
1962 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1964 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1965 load altogether, since the store is predicated by a flag. We
1966 could, do the load only if it was originally in the loop. */
1967 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1968 lim_data = init_lim_data (load);
1969 lim_data->max_loop = loop;
1970 lim_data->tgt_loop = loop;
1971 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1973 if (multi_threaded_model_p)
1975 load = gimple_build_assign (store_flag, boolean_false_node);
1976 lim_data = init_lim_data (load);
1977 lim_data->max_loop = loop;
1978 lim_data->tgt_loop = loop;
1979 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1982 /* Sink the store to every exit from the loop. */
1983 FOR_EACH_VEC_ELT (exits, i, ex)
1984 if (!multi_threaded_model_p)
1986 gimple store;
1987 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
1988 gsi_insert_on_edge (ex, store);
1990 else
1991 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
1994 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
1995 edges of the LOOP. */
1997 static void
1998 hoist_memory_references (struct loop *loop, bitmap mem_refs,
1999 vec<edge> exits)
2001 mem_ref_p ref;
2002 unsigned i;
2003 bitmap_iterator bi;
2005 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2007 ref = memory_accesses.refs_list[i];
2008 execute_sm (loop, exits, ref);
2012 struct ref_always_accessed
2014 ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_)
2015 : loop (loop_), base (base_), stored_p (stored_p_) {}
2016 bool operator () (mem_ref_loc_p loc);
2017 struct loop *loop;
2018 tree base;
2019 bool stored_p;
2022 bool
2023 ref_always_accessed::operator () (mem_ref_loc_p loc)
2025 struct loop *must_exec;
2027 if (!get_lim_data (loc->stmt))
2028 return false;
2030 /* If we require an always executed store make sure the statement
2031 stores to the reference. */
2032 if (stored_p)
2034 tree lhs;
2035 if (!gimple_get_lhs (loc->stmt))
2036 return false;
2037 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2038 if (!lhs)
2039 return false;
2040 if (INDIRECT_REF_P (lhs)
2041 || TREE_CODE (lhs) == MEM_REF)
2042 lhs = TREE_OPERAND (lhs, 0);
2043 if (lhs != base)
2044 return false;
2047 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2048 if (!must_exec)
2049 return false;
2051 if (must_exec == loop
2052 || flow_loop_nested_p (must_exec, loop))
2053 return true;
2055 return false;
2058 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2059 make sure REF is always stored to in LOOP. */
2061 static bool
2062 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2064 tree base = ao_ref_base (&ref->mem);
2065 if (TREE_CODE (base) == MEM_REF)
2066 base = TREE_OPERAND (base, 0);
2068 return for_all_locs_in_loop (loop, ref,
2069 ref_always_accessed (loop, base, stored_p));
2072 /* Returns true if REF1 and REF2 are independent. */
2074 static bool
2075 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2077 if (ref1 == ref2)
2078 return true;
2080 if (dump_file && (dump_flags & TDF_DETAILS))
2081 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2082 ref1->id, ref2->id);
2084 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2086 if (dump_file && (dump_flags & TDF_DETAILS))
2087 fprintf (dump_file, "dependent.\n");
2088 return false;
2090 else
2092 if (dump_file && (dump_flags & TDF_DETAILS))
2093 fprintf (dump_file, "independent.\n");
2094 return true;
2098 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2099 and its super-loops. */
2101 static void
2102 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2104 /* We can propagate dependent-in-loop bits up the loop
2105 hierarchy to all outer loops. */
2106 while (loop != current_loops->tree_root
2107 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2108 loop = loop_outer (loop);
2111 /* Returns true if REF is independent on all other memory references in
2112 LOOP. */
2114 static bool
2115 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2117 bitmap refs_to_check;
2118 unsigned i;
2119 bitmap_iterator bi;
2120 mem_ref_p aref;
2122 if (stored_p)
2123 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2124 else
2125 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2127 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2128 return false;
2130 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2132 aref = memory_accesses.refs_list[i];
2133 if (!refs_independent_p (ref, aref))
2134 return false;
2137 return true;
2140 /* Returns true if REF is independent on all other memory references in
2141 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2143 static bool
2144 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2146 stored_p |= bitmap_bit_p (&ref->stored, loop->num);
2148 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2149 return true;
2150 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2151 return false;
2153 struct loop *inner = loop->inner;
2154 while (inner)
2156 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2157 return false;
2158 inner = inner->next;
2161 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2163 if (dump_file && (dump_flags & TDF_DETAILS))
2164 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2165 ref->id, loop->num, indep_p ? "independent" : "dependent");
2167 /* Record the computed result in the cache. */
2168 if (indep_p)
2170 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2171 && stored_p)
2173 /* If it's independend against all refs then it's independent
2174 against stores, too. */
2175 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2178 else
2180 record_dep_loop (loop, ref, stored_p);
2181 if (!stored_p)
2183 /* If it's dependent against stores it's dependent against
2184 all refs, too. */
2185 record_dep_loop (loop, ref, true);
2189 return indep_p;
2192 /* Returns true if REF is independent on all other memory references in
2193 LOOP. */
2195 static bool
2196 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2198 gcc_checking_assert (MEM_ANALYZABLE (ref));
2200 return ref_indep_loop_p_2 (loop, ref, false);
2203 /* Returns true if we can perform store motion of REF from LOOP. */
2205 static bool
2206 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2208 tree base;
2210 /* Can't hoist unanalyzable refs. */
2211 if (!MEM_ANALYZABLE (ref))
2212 return false;
2214 /* It should be movable. */
2215 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2216 || TREE_THIS_VOLATILE (ref->mem.ref)
2217 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2218 return false;
2220 /* If it can throw fail, we do not properly update EH info. */
2221 if (tree_could_throw_p (ref->mem.ref))
2222 return false;
2224 /* If it can trap, it must be always executed in LOOP.
2225 Readonly memory locations may trap when storing to them, but
2226 tree_could_trap_p is a predicate for rvalues, so check that
2227 explicitly. */
2228 base = get_base_address (ref->mem.ref);
2229 if ((tree_could_trap_p (ref->mem.ref)
2230 || (DECL_P (base) && TREE_READONLY (base)))
2231 && !ref_always_accessed_p (loop, ref, true))
2232 return false;
2234 /* And it must be independent on all other memory references
2235 in LOOP. */
2236 if (!ref_indep_loop_p (loop, ref))
2237 return false;
2239 return true;
2242 /* Marks the references in LOOP for that store motion should be performed
2243 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2244 motion was performed in one of the outer loops. */
2246 static void
2247 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2249 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2250 unsigned i;
2251 bitmap_iterator bi;
2252 mem_ref_p ref;
2254 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2256 ref = memory_accesses.refs_list[i];
2257 if (can_sm_ref_p (loop, ref))
2258 bitmap_set_bit (refs_to_sm, i);
2262 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2263 for a store motion optimization (i.e. whether we can insert statement
2264 on its exits). */
2266 static bool
2267 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2268 vec<edge> exits)
2270 unsigned i;
2271 edge ex;
2273 FOR_EACH_VEC_ELT (exits, i, ex)
2274 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2275 return false;
2277 return true;
2280 /* Try to perform store motion for all memory references modified inside
2281 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2282 store motion was executed in one of the outer loops. */
2284 static void
2285 store_motion_loop (struct loop *loop, bitmap sm_executed)
2287 vec<edge> exits = get_loop_exit_edges (loop);
2288 struct loop *subloop;
2289 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2291 if (loop_suitable_for_sm (loop, exits))
2293 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2294 hoist_memory_references (loop, sm_in_loop, exits);
2296 exits.release ();
2298 bitmap_ior_into (sm_executed, sm_in_loop);
2299 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2300 store_motion_loop (subloop, sm_executed);
2301 bitmap_and_compl_into (sm_executed, sm_in_loop);
2302 BITMAP_FREE (sm_in_loop);
2305 /* Try to perform store motion for all memory references modified inside
2306 loops. */
2308 static void
2309 store_motion (void)
2311 struct loop *loop;
2312 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2314 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2315 store_motion_loop (loop, sm_executed);
2317 BITMAP_FREE (sm_executed);
2318 gsi_commit_edge_inserts ();
2321 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2322 for each such basic block bb records the outermost loop for that execution
2323 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2324 blocks that contain a nonpure call. */
2326 static void
2327 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2329 basic_block bb = NULL, *bbs, last = NULL;
2330 unsigned i;
2331 edge e;
2332 struct loop *inn_loop = loop;
2334 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2336 bbs = get_loop_body_in_dom_order (loop);
2338 for (i = 0; i < loop->num_nodes; i++)
2340 edge_iterator ei;
2341 bb = bbs[i];
2343 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2344 last = bb;
2346 if (bitmap_bit_p (contains_call, bb->index))
2347 break;
2349 FOR_EACH_EDGE (e, ei, bb->succs)
2350 if (!flow_bb_inside_loop_p (loop, e->dest))
2351 break;
2352 if (e)
2353 break;
2355 /* A loop might be infinite (TODO use simple loop analysis
2356 to disprove this if possible). */
2357 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2358 break;
2360 if (!flow_bb_inside_loop_p (inn_loop, bb))
2361 break;
2363 if (bb->loop_father->header == bb)
2365 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2366 break;
2368 /* In a loop that is always entered we may proceed anyway.
2369 But record that we entered it and stop once we leave it. */
2370 inn_loop = bb->loop_father;
2374 while (1)
2376 SET_ALWAYS_EXECUTED_IN (last, loop);
2377 if (last == loop->header)
2378 break;
2379 last = get_immediate_dominator (CDI_DOMINATORS, last);
2382 free (bbs);
2385 for (loop = loop->inner; loop; loop = loop->next)
2386 fill_always_executed_in_1 (loop, contains_call);
2389 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2390 for each such basic block bb records the outermost loop for that execution
2391 of its header implies execution of bb. */
2393 static void
2394 fill_always_executed_in (void)
2396 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2397 basic_block bb;
2398 struct loop *loop;
2400 bitmap_clear (contains_call);
2401 FOR_EACH_BB (bb)
2403 gimple_stmt_iterator gsi;
2404 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2406 if (nonpure_call_p (gsi_stmt (gsi)))
2407 break;
2410 if (!gsi_end_p (gsi))
2411 bitmap_set_bit (contains_call, bb->index);
2414 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2415 fill_always_executed_in_1 (loop, contains_call);
2417 sbitmap_free (contains_call);
2421 /* Compute the global information needed by the loop invariant motion pass. */
2423 static void
2424 tree_ssa_lim_initialize (void)
2426 unsigned i;
2428 bitmap_obstack_initialize (&lim_bitmap_obstack);
2429 lim_aux_data_map = pointer_map_create ();
2431 if (flag_tm)
2432 compute_transaction_bits ();
2434 alloc_aux_for_edges (0);
2436 memory_accesses.refs.create (100);
2437 memory_accesses.refs_list.create (100);
2438 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2439 memory_accesses.refs_list.quick_push
2440 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2442 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2443 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2444 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2445 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2446 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2447 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2449 for (i = 0; i < number_of_loops (cfun); i++)
2451 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2452 &lim_bitmap_obstack);
2453 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2454 &lim_bitmap_obstack);
2455 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2456 &lim_bitmap_obstack);
2459 memory_accesses.ttae_cache = NULL;
2462 /* Cleans up after the invariant motion pass. */
2464 static void
2465 tree_ssa_lim_finalize (void)
2467 basic_block bb;
2468 unsigned i;
2469 mem_ref_p ref;
2471 free_aux_for_edges ();
2473 FOR_EACH_BB (bb)
2474 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2476 bitmap_obstack_release (&lim_bitmap_obstack);
2477 pointer_map_destroy (lim_aux_data_map);
2479 memory_accesses.refs.dispose ();
2481 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2482 memref_free (ref);
2483 memory_accesses.refs_list.release ();
2485 memory_accesses.refs_in_loop.release ();
2486 memory_accesses.refs_stored_in_loop.release ();
2487 memory_accesses.all_refs_stored_in_loop.release ();
2489 if (memory_accesses.ttae_cache)
2490 free_affine_expand_cache (&memory_accesses.ttae_cache);
2493 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2494 i.e. those that are likely to be win regardless of the register pressure. */
2496 unsigned int
2497 tree_ssa_lim (void)
2499 unsigned int todo;
2501 tree_ssa_lim_initialize ();
2503 /* Gathers information about memory accesses in the loops. */
2504 analyze_memory_references ();
2506 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2507 fill_always_executed_in ();
2509 /* For each statement determine the outermost loop in that it is
2510 invariant and cost for computing the invariant. */
2511 invariantness_dom_walker (CDI_DOMINATORS)
2512 .walk (cfun->cfg->x_entry_block_ptr);
2514 /* Execute store motion. Force the necessary invariants to be moved
2515 out of the loops as well. */
2516 store_motion ();
2518 /* Move the expressions that are expensive enough. */
2519 todo = move_computations ();
2521 tree_ssa_lim_finalize ();
2523 return todo;
2526 /* Loop invariant motion pass. */
2528 static unsigned int
2529 tree_ssa_loop_im (void)
2531 if (number_of_loops (cfun) <= 1)
2532 return 0;
2534 return tree_ssa_lim ();
2537 static bool
2538 gate_tree_ssa_loop_im (void)
2540 return flag_tree_loop_im != 0;
2543 namespace {
2545 const pass_data pass_data_lim =
2547 GIMPLE_PASS, /* type */
2548 "lim", /* name */
2549 OPTGROUP_LOOP, /* optinfo_flags */
2550 true, /* has_gate */
2551 true, /* has_execute */
2552 TV_LIM, /* tv_id */
2553 PROP_cfg, /* properties_required */
2554 0, /* properties_provided */
2555 0, /* properties_destroyed */
2556 0, /* todo_flags_start */
2557 0, /* todo_flags_finish */
2560 class pass_lim : public gimple_opt_pass
2562 public:
2563 pass_lim (gcc::context *ctxt)
2564 : gimple_opt_pass (pass_data_lim, ctxt)
2567 /* opt_pass methods: */
2568 opt_pass * clone () { return new pass_lim (m_ctxt); }
2569 bool gate () { return gate_tree_ssa_loop_im (); }
2570 unsigned int execute () { return tree_ssa_loop_im (); }
2572 }; // class pass_lim
2574 } // anon namespace
2576 gimple_opt_pass *
2577 make_pass_lim (gcc::context *ctxt)
2579 return new pass_lim (ctxt);