2015-01-03 Sandra Loosemore <sandra@codesourcery.com>
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob9c0186d2df053245d12fe980160a5090a57712de
1 /* Loop invariant motion.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "predict.h"
27 #include "vec.h"
28 #include "hashtab.h"
29 #include "hash-set.h"
30 #include "machmode.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "cfganal.h"
37 #include "basic-block.h"
38 #include "gimple-pretty-print.h"
39 #include "hash-map.h"
40 #include "hash-table.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "tree-eh.h"
44 #include "gimple-expr.h"
45 #include "is-a.h"
46 #include "gimple.h"
47 #include "gimplify.h"
48 #include "gimple-iterator.h"
49 #include "gimple-ssa.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "tree-ssa-loop-manip.h"
56 #include "tree-ssa-loop.h"
57 #include "tree-into-ssa.h"
58 #include "cfgloop.h"
59 #include "domwalk.h"
60 #include "params.h"
61 #include "tree-pass.h"
62 #include "flags.h"
63 #include "tree-affine.h"
64 #include "tree-ssa-propagate.h"
65 #include "trans-mem.h"
66 #include "gimple-fold.h"
68 /* TODO: Support for predicated code motion. I.e.
70 while (1)
72 if (cond)
74 a = inv;
75 something;
79 Where COND and INV are invariants, but evaluating INV may trap or be
80 invalid from some other reason if !COND. This may be transformed to
82 if (cond)
83 a = inv;
84 while (1)
86 if (cond)
87 something;
88 } */
90 /* The auxiliary data kept for each statement. */
92 struct lim_aux_data
94 struct loop *max_loop; /* The outermost loop in that the statement
95 is invariant. */
97 struct loop *tgt_loop; /* The loop out of that we want to move the
98 invariant. */
100 struct loop *always_executed_in;
101 /* The outermost loop for that we are sure
102 the statement is executed if the loop
103 is entered. */
105 unsigned cost; /* Cost of the computation performed by the
106 statement. */
108 vec<gimple> depends; /* Vector of statements that must be also
109 hoisted out of the loop when this statement
110 is hoisted; i.e. those that define the
111 operands of the statement and are inside of
112 the MAX_LOOP loop. */
115 /* Maps statements to their lim_aux_data. */
117 static hash_map<gimple, lim_aux_data *> *lim_aux_data_map;
119 /* Description of a memory reference location. */
121 typedef struct mem_ref_loc
123 tree *ref; /* The reference itself. */
124 gimple stmt; /* The statement in that it occurs. */
125 } *mem_ref_loc_p;
128 /* Description of a memory reference. */
130 typedef struct im_mem_ref
132 unsigned id; /* ID assigned to the memory reference
133 (its index in memory_accesses.refs_list) */
134 hashval_t hash; /* Its hash value. */
136 /* The memory access itself and associated caching of alias-oracle
137 query meta-data. */
138 ao_ref mem;
140 bitmap stored; /* The set of loops in that this memory location
141 is stored to. */
142 vec<mem_ref_loc> accesses_in_loop;
143 /* The locations of the accesses. Vector
144 indexed by the loop number. */
146 /* The following sets are computed on demand. We keep both set and
147 its complement, so that we know whether the information was
148 already computed or not. */
149 bitmap_head indep_loop; /* The set of loops in that the memory
150 reference is independent, meaning:
151 If it is stored in the loop, this store
152 is independent on all other loads and
153 stores.
154 If it is only loaded, then it is independent
155 on all stores in the loop. */
156 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
157 } *mem_ref_p;
159 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
160 to record (in)dependence against stores in the loop and its subloops, the
161 second to record (in)dependence against all references in the loop
162 and its subloops. */
163 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
165 /* Mem_ref hashtable helpers. */
167 struct mem_ref_hasher : typed_noop_remove <im_mem_ref>
169 typedef im_mem_ref value_type;
170 typedef tree_node compare_type;
171 static inline hashval_t hash (const value_type *);
172 static inline bool equal (const value_type *, const compare_type *);
175 /* A hash function for struct im_mem_ref object OBJ. */
177 inline hashval_t
178 mem_ref_hasher::hash (const value_type *mem)
180 return mem->hash;
183 /* An equality function for struct im_mem_ref object MEM1 with
184 memory reference OBJ2. */
186 inline bool
187 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
189 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
193 /* Description of memory accesses in loops. */
195 static struct
197 /* The hash table of memory references accessed in loops. */
198 hash_table<mem_ref_hasher> *refs;
200 /* The list of memory references. */
201 vec<mem_ref_p> refs_list;
203 /* The set of memory references accessed in each loop. */
204 vec<bitmap_head> refs_in_loop;
206 /* The set of memory references stored in each loop. */
207 vec<bitmap_head> refs_stored_in_loop;
209 /* The set of memory references stored in each loop, including subloops . */
210 vec<bitmap_head> all_refs_stored_in_loop;
212 /* Cache for expanding memory addresses. */
213 hash_map<tree, name_expansion *> *ttae_cache;
214 } memory_accesses;
216 /* Obstack for the bitmaps in the above data structures. */
217 static bitmap_obstack lim_bitmap_obstack;
218 static obstack mem_ref_obstack;
220 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
222 /* Minimum cost of an expensive expression. */
223 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
225 /* The outermost loop for which execution of the header guarantees that the
226 block will be executed. */
227 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
228 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
230 /* ID of the shared unanalyzable mem. */
231 #define UNANALYZABLE_MEM_ID 0
233 /* Whether the reference was analyzable. */
234 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
236 static struct lim_aux_data *
237 init_lim_data (gimple stmt)
239 lim_aux_data *p = XCNEW (struct lim_aux_data);
240 lim_aux_data_map->put (stmt, p);
242 return p;
245 static struct lim_aux_data *
246 get_lim_data (gimple stmt)
248 lim_aux_data **p = lim_aux_data_map->get (stmt);
249 if (!p)
250 return NULL;
252 return *p;
255 /* Releases the memory occupied by DATA. */
257 static void
258 free_lim_aux_data (struct lim_aux_data *data)
260 data->depends.release ();
261 free (data);
264 static void
265 clear_lim_data (gimple stmt)
267 lim_aux_data **p = lim_aux_data_map->get (stmt);
268 if (!p)
269 return;
271 free_lim_aux_data (*p);
272 *p = NULL;
276 /* The possibilities of statement movement. */
277 enum move_pos
279 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
280 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
281 become executed -- memory accesses, ... */
282 MOVE_POSSIBLE /* Unlimited movement. */
286 /* If it is possible to hoist the statement STMT unconditionally,
287 returns MOVE_POSSIBLE.
288 If it is possible to hoist the statement STMT, but we must avoid making
289 it executed if it would not be executed in the original program (e.g.
290 because it may trap), return MOVE_PRESERVE_EXECUTION.
291 Otherwise return MOVE_IMPOSSIBLE. */
293 enum move_pos
294 movement_possibility (gimple stmt)
296 tree lhs;
297 enum move_pos ret = MOVE_POSSIBLE;
299 if (flag_unswitch_loops
300 && gimple_code (stmt) == GIMPLE_COND)
302 /* If we perform unswitching, force the operands of the invariant
303 condition to be moved out of the loop. */
304 return MOVE_POSSIBLE;
307 if (gimple_code (stmt) == GIMPLE_PHI
308 && gimple_phi_num_args (stmt) <= 2
309 && !virtual_operand_p (gimple_phi_result (stmt))
310 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
311 return MOVE_POSSIBLE;
313 if (gimple_get_lhs (stmt) == NULL_TREE)
314 return MOVE_IMPOSSIBLE;
316 if (gimple_vdef (stmt))
317 return MOVE_IMPOSSIBLE;
319 if (stmt_ends_bb_p (stmt)
320 || gimple_has_volatile_ops (stmt)
321 || gimple_has_side_effects (stmt)
322 || stmt_could_throw_p (stmt))
323 return MOVE_IMPOSSIBLE;
325 if (is_gimple_call (stmt))
327 /* While pure or const call is guaranteed to have no side effects, we
328 cannot move it arbitrarily. Consider code like
330 char *s = something ();
332 while (1)
334 if (s)
335 t = strlen (s);
336 else
337 t = 0;
340 Here the strlen call cannot be moved out of the loop, even though
341 s is invariant. In addition to possibly creating a call with
342 invalid arguments, moving out a function call that is not executed
343 may cause performance regressions in case the call is costly and
344 not executed at all. */
345 ret = MOVE_PRESERVE_EXECUTION;
346 lhs = gimple_call_lhs (stmt);
348 else if (is_gimple_assign (stmt))
349 lhs = gimple_assign_lhs (stmt);
350 else
351 return MOVE_IMPOSSIBLE;
353 if (TREE_CODE (lhs) == SSA_NAME
354 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
355 return MOVE_IMPOSSIBLE;
357 if (TREE_CODE (lhs) != SSA_NAME
358 || gimple_could_trap_p (stmt))
359 return MOVE_PRESERVE_EXECUTION;
361 /* Non local loads in a transaction cannot be hoisted out. Well,
362 unless the load happens on every path out of the loop, but we
363 don't take this into account yet. */
364 if (flag_tm
365 && gimple_in_transaction (stmt)
366 && gimple_assign_single_p (stmt))
368 tree rhs = gimple_assign_rhs1 (stmt);
369 if (DECL_P (rhs) && is_global_var (rhs))
371 if (dump_file)
373 fprintf (dump_file, "Cannot hoist conditional load of ");
374 print_generic_expr (dump_file, rhs, TDF_SLIM);
375 fprintf (dump_file, " because it is in a transaction.\n");
377 return MOVE_IMPOSSIBLE;
381 return ret;
384 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
385 loop to that we could move the expression using DEF if it did not have
386 other operands, i.e. the outermost loop enclosing LOOP in that the value
387 of DEF is invariant. */
389 static struct loop *
390 outermost_invariant_loop (tree def, struct loop *loop)
392 gimple def_stmt;
393 basic_block def_bb;
394 struct loop *max_loop;
395 struct lim_aux_data *lim_data;
397 if (!def)
398 return superloop_at_depth (loop, 1);
400 if (TREE_CODE (def) != SSA_NAME)
402 gcc_assert (is_gimple_min_invariant (def));
403 return superloop_at_depth (loop, 1);
406 def_stmt = SSA_NAME_DEF_STMT (def);
407 def_bb = gimple_bb (def_stmt);
408 if (!def_bb)
409 return superloop_at_depth (loop, 1);
411 max_loop = find_common_loop (loop, def_bb->loop_father);
413 lim_data = get_lim_data (def_stmt);
414 if (lim_data != NULL && lim_data->max_loop != NULL)
415 max_loop = find_common_loop (max_loop,
416 loop_outer (lim_data->max_loop));
417 if (max_loop == loop)
418 return NULL;
419 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
421 return max_loop;
424 /* DATA is a structure containing information associated with a statement
425 inside LOOP. DEF is one of the operands of this statement.
427 Find the outermost loop enclosing LOOP in that value of DEF is invariant
428 and record this in DATA->max_loop field. If DEF itself is defined inside
429 this loop as well (i.e. we need to hoist it out of the loop if we want
430 to hoist the statement represented by DATA), record the statement in that
431 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
432 add the cost of the computation of DEF to the DATA->cost.
434 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
436 static bool
437 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
438 bool add_cost)
440 gimple def_stmt = SSA_NAME_DEF_STMT (def);
441 basic_block def_bb = gimple_bb (def_stmt);
442 struct loop *max_loop;
443 struct lim_aux_data *def_data;
445 if (!def_bb)
446 return true;
448 max_loop = outermost_invariant_loop (def, loop);
449 if (!max_loop)
450 return false;
452 if (flow_loop_nested_p (data->max_loop, max_loop))
453 data->max_loop = max_loop;
455 def_data = get_lim_data (def_stmt);
456 if (!def_data)
457 return true;
459 if (add_cost
460 /* Only add the cost if the statement defining DEF is inside LOOP,
461 i.e. if it is likely that by moving the invariants dependent
462 on it, we will be able to avoid creating a new register for
463 it (since it will be only used in these dependent invariants). */
464 && def_bb->loop_father == loop)
465 data->cost += def_data->cost;
467 data->depends.safe_push (def_stmt);
469 return true;
472 /* Returns an estimate for a cost of statement STMT. The values here
473 are just ad-hoc constants, similar to costs for inlining. */
475 static unsigned
476 stmt_cost (gimple stmt)
478 /* Always try to create possibilities for unswitching. */
479 if (gimple_code (stmt) == GIMPLE_COND
480 || gimple_code (stmt) == GIMPLE_PHI)
481 return LIM_EXPENSIVE;
483 /* We should be hoisting calls if possible. */
484 if (is_gimple_call (stmt))
486 tree fndecl;
488 /* Unless the call is a builtin_constant_p; this always folds to a
489 constant, so moving it is useless. */
490 fndecl = gimple_call_fndecl (stmt);
491 if (fndecl
492 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
493 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
494 return 0;
496 return LIM_EXPENSIVE;
499 /* Hoisting memory references out should almost surely be a win. */
500 if (gimple_references_memory_p (stmt))
501 return LIM_EXPENSIVE;
503 if (gimple_code (stmt) != GIMPLE_ASSIGN)
504 return 1;
506 switch (gimple_assign_rhs_code (stmt))
508 case MULT_EXPR:
509 case WIDEN_MULT_EXPR:
510 case WIDEN_MULT_PLUS_EXPR:
511 case WIDEN_MULT_MINUS_EXPR:
512 case DOT_PROD_EXPR:
513 case FMA_EXPR:
514 case TRUNC_DIV_EXPR:
515 case CEIL_DIV_EXPR:
516 case FLOOR_DIV_EXPR:
517 case ROUND_DIV_EXPR:
518 case EXACT_DIV_EXPR:
519 case CEIL_MOD_EXPR:
520 case FLOOR_MOD_EXPR:
521 case ROUND_MOD_EXPR:
522 case TRUNC_MOD_EXPR:
523 case RDIV_EXPR:
524 /* Division and multiplication are usually expensive. */
525 return LIM_EXPENSIVE;
527 case LSHIFT_EXPR:
528 case RSHIFT_EXPR:
529 case WIDEN_LSHIFT_EXPR:
530 case LROTATE_EXPR:
531 case RROTATE_EXPR:
532 /* Shifts and rotates are usually expensive. */
533 return LIM_EXPENSIVE;
535 case CONSTRUCTOR:
536 /* Make vector construction cost proportional to the number
537 of elements. */
538 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
540 case SSA_NAME:
541 case PAREN_EXPR:
542 /* Whether or not something is wrapped inside a PAREN_EXPR
543 should not change move cost. Nor should an intermediate
544 unpropagated SSA name copy. */
545 return 0;
547 default:
548 return 1;
552 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
553 REF is independent. If REF is not independent in LOOP, NULL is returned
554 instead. */
556 static struct loop *
557 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
559 struct loop *aloop;
561 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
562 return NULL;
564 for (aloop = outer;
565 aloop != loop;
566 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
567 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
568 && ref_indep_loop_p (aloop, ref))
569 return aloop;
571 if (ref_indep_loop_p (loop, ref))
572 return loop;
573 else
574 return NULL;
577 /* If there is a simple load or store to a memory reference in STMT, returns
578 the location of the memory reference, and sets IS_STORE according to whether
579 it is a store or load. Otherwise, returns NULL. */
581 static tree *
582 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
584 tree *lhs, *rhs;
586 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
587 if (!gimple_assign_single_p (stmt))
588 return NULL;
590 lhs = gimple_assign_lhs_ptr (stmt);
591 rhs = gimple_assign_rhs1_ptr (stmt);
593 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
595 *is_store = false;
596 return rhs;
598 else if (gimple_vdef (stmt)
599 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
601 *is_store = true;
602 return lhs;
604 else
605 return NULL;
608 /* Returns the memory reference contained in STMT. */
610 static mem_ref_p
611 mem_ref_in_stmt (gimple stmt)
613 bool store;
614 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
615 hashval_t hash;
616 mem_ref_p ref;
618 if (!mem)
619 return NULL;
620 gcc_assert (!store);
622 hash = iterative_hash_expr (*mem, 0);
623 ref = memory_accesses.refs->find_with_hash (*mem, hash);
625 gcc_assert (ref != NULL);
626 return ref;
629 /* From a controlling predicate in DOM determine the arguments from
630 the PHI node PHI that are chosen if the predicate evaluates to
631 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
632 they are non-NULL. Returns true if the arguments can be determined,
633 else return false. */
635 static bool
636 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
637 tree *true_arg_p, tree *false_arg_p)
639 basic_block bb = gimple_bb (phi);
640 edge true_edge, false_edge, tem;
641 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
643 /* We have to verify that one edge into the PHI node is dominated
644 by the true edge of the predicate block and the other edge
645 dominated by the false edge. This ensures that the PHI argument
646 we are going to take is completely determined by the path we
647 take from the predicate block.
648 We can only use BB dominance checks below if the destination of
649 the true/false edges are dominated by their edge, thus only
650 have a single predecessor. */
651 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
652 tem = EDGE_PRED (bb, 0);
653 if (tem == true_edge
654 || (single_pred_p (true_edge->dest)
655 && (tem->src == true_edge->dest
656 || dominated_by_p (CDI_DOMINATORS,
657 tem->src, true_edge->dest))))
658 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
659 else if (tem == false_edge
660 || (single_pred_p (false_edge->dest)
661 && (tem->src == false_edge->dest
662 || dominated_by_p (CDI_DOMINATORS,
663 tem->src, false_edge->dest))))
664 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
665 else
666 return false;
667 tem = EDGE_PRED (bb, 1);
668 if (tem == true_edge
669 || (single_pred_p (true_edge->dest)
670 && (tem->src == true_edge->dest
671 || dominated_by_p (CDI_DOMINATORS,
672 tem->src, true_edge->dest))))
673 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
674 else if (tem == false_edge
675 || (single_pred_p (false_edge->dest)
676 && (tem->src == false_edge->dest
677 || dominated_by_p (CDI_DOMINATORS,
678 tem->src, false_edge->dest))))
679 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
680 else
681 return false;
682 if (!arg0 || !arg1)
683 return false;
685 if (true_arg_p)
686 *true_arg_p = arg0;
687 if (false_arg_p)
688 *false_arg_p = arg1;
690 return true;
693 /* Determine the outermost loop to that it is possible to hoist a statement
694 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
695 the outermost loop in that the value computed by STMT is invariant.
696 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
697 we preserve the fact whether STMT is executed. It also fills other related
698 information to LIM_DATA (STMT).
700 The function returns false if STMT cannot be hoisted outside of the loop it
701 is defined in, and true otherwise. */
703 static bool
704 determine_max_movement (gimple stmt, bool must_preserve_exec)
706 basic_block bb = gimple_bb (stmt);
707 struct loop *loop = bb->loop_father;
708 struct loop *level;
709 struct lim_aux_data *lim_data = get_lim_data (stmt);
710 tree val;
711 ssa_op_iter iter;
713 if (must_preserve_exec)
714 level = ALWAYS_EXECUTED_IN (bb);
715 else
716 level = superloop_at_depth (loop, 1);
717 lim_data->max_loop = level;
719 if (gphi *phi = dyn_cast <gphi *> (stmt))
721 use_operand_p use_p;
722 unsigned min_cost = UINT_MAX;
723 unsigned total_cost = 0;
724 struct lim_aux_data *def_data;
726 /* We will end up promoting dependencies to be unconditionally
727 evaluated. For this reason the PHI cost (and thus the
728 cost we remove from the loop by doing the invariant motion)
729 is that of the cheapest PHI argument dependency chain. */
730 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
732 val = USE_FROM_PTR (use_p);
734 if (TREE_CODE (val) != SSA_NAME)
736 /* Assign const 1 to constants. */
737 min_cost = MIN (min_cost, 1);
738 total_cost += 1;
739 continue;
741 if (!add_dependency (val, lim_data, loop, false))
742 return false;
744 gimple def_stmt = SSA_NAME_DEF_STMT (val);
745 if (gimple_bb (def_stmt)
746 && gimple_bb (def_stmt)->loop_father == loop)
748 def_data = get_lim_data (def_stmt);
749 if (def_data)
751 min_cost = MIN (min_cost, def_data->cost);
752 total_cost += def_data->cost;
757 min_cost = MIN (min_cost, total_cost);
758 lim_data->cost += min_cost;
760 if (gimple_phi_num_args (phi) > 1)
762 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
763 gimple cond;
764 if (gsi_end_p (gsi_last_bb (dom)))
765 return false;
766 cond = gsi_stmt (gsi_last_bb (dom));
767 if (gimple_code (cond) != GIMPLE_COND)
768 return false;
769 /* Verify that this is an extended form of a diamond and
770 the PHI arguments are completely controlled by the
771 predicate in DOM. */
772 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
773 return false;
775 /* Fold in dependencies and cost of the condition. */
776 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
778 if (!add_dependency (val, lim_data, loop, false))
779 return false;
780 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
781 if (def_data)
782 total_cost += def_data->cost;
785 /* We want to avoid unconditionally executing very expensive
786 operations. As costs for our dependencies cannot be
787 negative just claim we are not invariand for this case.
788 We also are not sure whether the control-flow inside the
789 loop will vanish. */
790 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
791 && !(min_cost != 0
792 && total_cost / min_cost <= 2))
793 return false;
795 /* Assume that the control-flow in the loop will vanish.
796 ??? We should verify this and not artificially increase
797 the cost if that is not the case. */
798 lim_data->cost += stmt_cost (stmt);
801 return true;
803 else
804 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
805 if (!add_dependency (val, lim_data, loop, true))
806 return false;
808 if (gimple_vuse (stmt))
810 mem_ref_p ref = mem_ref_in_stmt (stmt);
812 if (ref)
814 lim_data->max_loop
815 = outermost_indep_loop (lim_data->max_loop, loop, ref);
816 if (!lim_data->max_loop)
817 return false;
819 else
821 if ((val = gimple_vuse (stmt)) != NULL_TREE)
823 if (!add_dependency (val, lim_data, loop, false))
824 return false;
829 lim_data->cost += stmt_cost (stmt);
831 return true;
834 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
835 and that one of the operands of this statement is computed by STMT.
836 Ensure that STMT (together with all the statements that define its
837 operands) is hoisted at least out of the loop LEVEL. */
839 static void
840 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
842 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
843 struct lim_aux_data *lim_data;
844 gimple dep_stmt;
845 unsigned i;
847 stmt_loop = find_common_loop (orig_loop, stmt_loop);
848 lim_data = get_lim_data (stmt);
849 if (lim_data != NULL && lim_data->tgt_loop != NULL)
850 stmt_loop = find_common_loop (stmt_loop,
851 loop_outer (lim_data->tgt_loop));
852 if (flow_loop_nested_p (stmt_loop, level))
853 return;
855 gcc_assert (level == lim_data->max_loop
856 || flow_loop_nested_p (lim_data->max_loop, level));
858 lim_data->tgt_loop = level;
859 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
860 set_level (dep_stmt, orig_loop, level);
863 /* Determines an outermost loop from that we want to hoist the statement STMT.
864 For now we chose the outermost possible loop. TODO -- use profiling
865 information to set it more sanely. */
867 static void
868 set_profitable_level (gimple stmt)
870 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
873 /* Returns true if STMT is a call that has side effects. */
875 static bool
876 nonpure_call_p (gimple stmt)
878 if (gimple_code (stmt) != GIMPLE_CALL)
879 return false;
881 return gimple_has_side_effects (stmt);
884 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
886 static gimple
887 rewrite_reciprocal (gimple_stmt_iterator *bsi)
889 gassign *stmt, *stmt1, *stmt2;
890 tree name, lhs, type;
891 tree real_one;
892 gimple_stmt_iterator gsi;
894 stmt = as_a <gassign *> (gsi_stmt (*bsi));
895 lhs = gimple_assign_lhs (stmt);
896 type = TREE_TYPE (lhs);
898 real_one = build_one_cst (type);
900 name = make_temp_ssa_name (type, NULL, "reciptmp");
901 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
902 gimple_assign_rhs2 (stmt));
903 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
904 gimple_assign_rhs1 (stmt));
906 /* Replace division stmt with reciprocal and multiply stmts.
907 The multiply stmt is not invariant, so update iterator
908 and avoid rescanning. */
909 gsi = *bsi;
910 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
911 gsi_replace (&gsi, stmt2, true);
913 /* Continue processing with invariant reciprocal statement. */
914 return stmt1;
917 /* Check if the pattern at *BSI is a bittest of the form
918 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
920 static gimple
921 rewrite_bittest (gimple_stmt_iterator *bsi)
923 gassign *stmt;
924 gimple stmt1;
925 gassign *stmt2;
926 gimple use_stmt;
927 gcond *cond_stmt;
928 tree lhs, name, t, a, b;
929 use_operand_p use;
931 stmt = as_a <gassign *> (gsi_stmt (*bsi));
932 lhs = gimple_assign_lhs (stmt);
934 /* Verify that the single use of lhs is a comparison against zero. */
935 if (TREE_CODE (lhs) != SSA_NAME
936 || !single_imm_use (lhs, &use, &use_stmt))
937 return stmt;
938 cond_stmt = dyn_cast <gcond *> (use_stmt);
939 if (!cond_stmt)
940 return stmt;
941 if (gimple_cond_lhs (cond_stmt) != lhs
942 || (gimple_cond_code (cond_stmt) != NE_EXPR
943 && gimple_cond_code (cond_stmt) != EQ_EXPR)
944 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
945 return stmt;
947 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
948 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
949 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
950 return stmt;
952 /* There is a conversion in between possibly inserted by fold. */
953 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
955 t = gimple_assign_rhs1 (stmt1);
956 if (TREE_CODE (t) != SSA_NAME
957 || !has_single_use (t))
958 return stmt;
959 stmt1 = SSA_NAME_DEF_STMT (t);
960 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
961 return stmt;
964 /* Verify that B is loop invariant but A is not. Verify that with
965 all the stmt walking we are still in the same loop. */
966 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
967 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
968 return stmt;
970 a = gimple_assign_rhs1 (stmt1);
971 b = gimple_assign_rhs2 (stmt1);
973 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
974 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
976 gimple_stmt_iterator rsi;
978 /* 1 << B */
979 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
980 build_int_cst (TREE_TYPE (a), 1), b);
981 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
982 stmt1 = gimple_build_assign (name, t);
984 /* A & (1 << B) */
985 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
986 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
987 stmt2 = gimple_build_assign (name, t);
989 /* Replace the SSA_NAME we compare against zero. Adjust
990 the type of zero accordingly. */
991 SET_USE (use, name);
992 gimple_cond_set_rhs (cond_stmt,
993 build_int_cst_type (TREE_TYPE (name),
994 0));
996 /* Don't use gsi_replace here, none of the new assignments sets
997 the variable originally set in stmt. Move bsi to stmt1, and
998 then remove the original stmt, so that we get a chance to
999 retain debug info for it. */
1000 rsi = *bsi;
1001 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1002 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1003 gsi_remove (&rsi, true);
1005 return stmt1;
1008 return stmt;
1011 /* For each statement determines the outermost loop in that it is invariant,
1012 - statements on whose motion it depends and the cost of the computation.
1013 - This information is stored to the LIM_DATA structure associated with
1014 - each statement. */
1015 class invariantness_dom_walker : public dom_walker
1017 public:
1018 invariantness_dom_walker (cdi_direction direction)
1019 : dom_walker (direction) {}
1021 virtual void before_dom_children (basic_block);
1024 /* Determine the outermost loops in that statements in basic block BB are
1025 invariant, and record them to the LIM_DATA associated with the statements.
1026 Callback for dom_walker. */
1028 void
1029 invariantness_dom_walker::before_dom_children (basic_block bb)
1031 enum move_pos pos;
1032 gimple_stmt_iterator bsi;
1033 gimple stmt;
1034 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1035 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1036 struct lim_aux_data *lim_data;
1038 if (!loop_outer (bb->loop_father))
1039 return;
1041 if (dump_file && (dump_flags & TDF_DETAILS))
1042 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1043 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1045 /* Look at PHI nodes, but only if there is at most two.
1046 ??? We could relax this further by post-processing the inserted
1047 code and transforming adjacent cond-exprs with the same predicate
1048 to control flow again. */
1049 bsi = gsi_start_phis (bb);
1050 if (!gsi_end_p (bsi)
1051 && ((gsi_next (&bsi), gsi_end_p (bsi))
1052 || (gsi_next (&bsi), gsi_end_p (bsi))))
1053 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1055 stmt = gsi_stmt (bsi);
1057 pos = movement_possibility (stmt);
1058 if (pos == MOVE_IMPOSSIBLE)
1059 continue;
1061 lim_data = init_lim_data (stmt);
1062 lim_data->always_executed_in = outermost;
1064 if (!determine_max_movement (stmt, false))
1066 lim_data->max_loop = NULL;
1067 continue;
1070 if (dump_file && (dump_flags & TDF_DETAILS))
1072 print_gimple_stmt (dump_file, stmt, 2, 0);
1073 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1074 loop_depth (lim_data->max_loop),
1075 lim_data->cost);
1078 if (lim_data->cost >= LIM_EXPENSIVE)
1079 set_profitable_level (stmt);
1082 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1084 stmt = gsi_stmt (bsi);
1086 pos = movement_possibility (stmt);
1087 if (pos == MOVE_IMPOSSIBLE)
1089 if (nonpure_call_p (stmt))
1091 maybe_never = true;
1092 outermost = NULL;
1094 /* Make sure to note always_executed_in for stores to make
1095 store-motion work. */
1096 else if (stmt_makes_single_store (stmt))
1098 struct lim_aux_data *lim_data = init_lim_data (stmt);
1099 lim_data->always_executed_in = outermost;
1101 continue;
1104 if (is_gimple_assign (stmt)
1105 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1106 == GIMPLE_BINARY_RHS))
1108 tree op0 = gimple_assign_rhs1 (stmt);
1109 tree op1 = gimple_assign_rhs2 (stmt);
1110 struct loop *ol1 = outermost_invariant_loop (op1,
1111 loop_containing_stmt (stmt));
1113 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1114 to be hoisted out of loop, saving expensive divide. */
1115 if (pos == MOVE_POSSIBLE
1116 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1117 && flag_unsafe_math_optimizations
1118 && !flag_trapping_math
1119 && ol1 != NULL
1120 && outermost_invariant_loop (op0, ol1) == NULL)
1121 stmt = rewrite_reciprocal (&bsi);
1123 /* If the shift count is invariant, convert (A >> B) & 1 to
1124 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1125 saving an expensive shift. */
1126 if (pos == MOVE_POSSIBLE
1127 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1128 && integer_onep (op1)
1129 && TREE_CODE (op0) == SSA_NAME
1130 && has_single_use (op0))
1131 stmt = rewrite_bittest (&bsi);
1134 lim_data = init_lim_data (stmt);
1135 lim_data->always_executed_in = outermost;
1137 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1138 continue;
1140 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1142 lim_data->max_loop = NULL;
1143 continue;
1146 if (dump_file && (dump_flags & TDF_DETAILS))
1148 print_gimple_stmt (dump_file, stmt, 2, 0);
1149 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1150 loop_depth (lim_data->max_loop),
1151 lim_data->cost);
1154 if (lim_data->cost >= LIM_EXPENSIVE)
1155 set_profitable_level (stmt);
1159 class move_computations_dom_walker : public dom_walker
1161 public:
1162 move_computations_dom_walker (cdi_direction direction)
1163 : dom_walker (direction), todo_ (0) {}
1165 virtual void before_dom_children (basic_block);
1167 unsigned int todo_;
1170 /* Hoist the statements in basic block BB out of the loops prescribed by
1171 data stored in LIM_DATA structures associated with each statement. Callback
1172 for walk_dominator_tree. */
1174 void
1175 move_computations_dom_walker::before_dom_children (basic_block bb)
1177 struct loop *level;
1178 unsigned cost = 0;
1179 struct lim_aux_data *lim_data;
1181 if (!loop_outer (bb->loop_father))
1182 return;
1184 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1186 gassign *new_stmt;
1187 gphi *stmt = bsi.phi ();
1189 lim_data = get_lim_data (stmt);
1190 if (lim_data == NULL)
1192 gsi_next (&bsi);
1193 continue;
1196 cost = lim_data->cost;
1197 level = lim_data->tgt_loop;
1198 clear_lim_data (stmt);
1200 if (!level)
1202 gsi_next (&bsi);
1203 continue;
1206 if (dump_file && (dump_flags & TDF_DETAILS))
1208 fprintf (dump_file, "Moving PHI node\n");
1209 print_gimple_stmt (dump_file, stmt, 0, 0);
1210 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1211 cost, level->num);
1214 if (gimple_phi_num_args (stmt) == 1)
1216 tree arg = PHI_ARG_DEF (stmt, 0);
1217 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1218 TREE_CODE (arg), arg);
1220 else
1222 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1223 gimple cond = gsi_stmt (gsi_last_bb (dom));
1224 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1225 /* Get the PHI arguments corresponding to the true and false
1226 edges of COND. */
1227 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1228 gcc_assert (arg0 && arg1);
1229 t = build2 (gimple_cond_code (cond), boolean_type_node,
1230 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1231 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1232 COND_EXPR, t, arg0, arg1);
1233 todo_ |= TODO_cleanup_cfg;
1235 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1236 && (!ALWAYS_EXECUTED_IN (bb)
1237 || (ALWAYS_EXECUTED_IN (bb) != level
1238 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1239 SSA_NAME_RANGE_INFO (gimple_assign_lhs (new_stmt)) = NULL;
1240 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1241 remove_phi_node (&bsi, false);
1244 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1246 edge e;
1248 gimple stmt = gsi_stmt (bsi);
1250 lim_data = get_lim_data (stmt);
1251 if (lim_data == NULL)
1253 gsi_next (&bsi);
1254 continue;
1257 cost = lim_data->cost;
1258 level = lim_data->tgt_loop;
1259 clear_lim_data (stmt);
1261 if (!level)
1263 gsi_next (&bsi);
1264 continue;
1267 /* We do not really want to move conditionals out of the loop; we just
1268 placed it here to force its operands to be moved if necessary. */
1269 if (gimple_code (stmt) == GIMPLE_COND)
1270 continue;
1272 if (dump_file && (dump_flags & TDF_DETAILS))
1274 fprintf (dump_file, "Moving statement\n");
1275 print_gimple_stmt (dump_file, stmt, 0, 0);
1276 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1277 cost, level->num);
1280 e = loop_preheader_edge (level);
1281 gcc_assert (!gimple_vdef (stmt));
1282 if (gimple_vuse (stmt))
1284 /* The new VUSE is the one from the virtual PHI in the loop
1285 header or the one already present. */
1286 gphi_iterator gsi2;
1287 for (gsi2 = gsi_start_phis (e->dest);
1288 !gsi_end_p (gsi2); gsi_next (&gsi2))
1290 gphi *phi = gsi2.phi ();
1291 if (virtual_operand_p (gimple_phi_result (phi)))
1293 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1294 break;
1298 gsi_remove (&bsi, false);
1299 if (gimple_has_lhs (stmt)
1300 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1301 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1302 && (!ALWAYS_EXECUTED_IN (bb)
1303 || !(ALWAYS_EXECUTED_IN (bb) == level
1304 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1305 SSA_NAME_RANGE_INFO (gimple_get_lhs (stmt)) = NULL;
1306 /* In case this is a stmt that is not unconditionally executed
1307 when the target loop header is executed and the stmt may
1308 invoke undefined integer or pointer overflow rewrite it to
1309 unsigned arithmetic. */
1310 if (is_gimple_assign (stmt)
1311 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1312 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1313 && arith_code_with_undefined_signed_overflow
1314 (gimple_assign_rhs_code (stmt))
1315 && (!ALWAYS_EXECUTED_IN (bb)
1316 || !(ALWAYS_EXECUTED_IN (bb) == level
1317 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1318 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1319 else
1320 gsi_insert_on_edge (e, stmt);
1324 /* Hoist the statements out of the loops prescribed by data stored in
1325 LIM_DATA structures associated with each statement.*/
1327 static unsigned int
1328 move_computations (void)
1330 move_computations_dom_walker walker (CDI_DOMINATORS);
1331 walker.walk (cfun->cfg->x_entry_block_ptr);
1333 gsi_commit_edge_inserts ();
1334 if (need_ssa_update_p (cfun))
1335 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1337 return walker.todo_;
1340 /* Checks whether the statement defining variable *INDEX can be hoisted
1341 out of the loop passed in DATA. Callback for for_each_index. */
1343 static bool
1344 may_move_till (tree ref, tree *index, void *data)
1346 struct loop *loop = (struct loop *) data, *max_loop;
1348 /* If REF is an array reference, check also that the step and the lower
1349 bound is invariant in LOOP. */
1350 if (TREE_CODE (ref) == ARRAY_REF)
1352 tree step = TREE_OPERAND (ref, 3);
1353 tree lbound = TREE_OPERAND (ref, 2);
1355 max_loop = outermost_invariant_loop (step, loop);
1356 if (!max_loop)
1357 return false;
1359 max_loop = outermost_invariant_loop (lbound, loop);
1360 if (!max_loop)
1361 return false;
1364 max_loop = outermost_invariant_loop (*index, loop);
1365 if (!max_loop)
1366 return false;
1368 return true;
1371 /* If OP is SSA NAME, force the statement that defines it to be
1372 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1374 static void
1375 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1377 gimple stmt;
1379 if (!op
1380 || is_gimple_min_invariant (op))
1381 return;
1383 gcc_assert (TREE_CODE (op) == SSA_NAME);
1385 stmt = SSA_NAME_DEF_STMT (op);
1386 if (gimple_nop_p (stmt))
1387 return;
1389 set_level (stmt, orig_loop, loop);
1392 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1393 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1394 for_each_index. */
1396 struct fmt_data
1398 struct loop *loop;
1399 struct loop *orig_loop;
1402 static bool
1403 force_move_till (tree ref, tree *index, void *data)
1405 struct fmt_data *fmt_data = (struct fmt_data *) data;
1407 if (TREE_CODE (ref) == ARRAY_REF)
1409 tree step = TREE_OPERAND (ref, 3);
1410 tree lbound = TREE_OPERAND (ref, 2);
1412 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1413 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1416 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1418 return true;
1421 /* A function to free the mem_ref object OBJ. */
1423 static void
1424 memref_free (struct im_mem_ref *mem)
1426 mem->accesses_in_loop.release ();
1429 /* Allocates and returns a memory reference description for MEM whose hash
1430 value is HASH and id is ID. */
1432 static mem_ref_p
1433 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1435 mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1436 ao_ref_init (&ref->mem, mem);
1437 ref->id = id;
1438 ref->hash = hash;
1439 ref->stored = NULL;
1440 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1441 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1442 ref->accesses_in_loop.create (1);
1444 return ref;
1447 /* Records memory reference location *LOC in LOOP to the memory reference
1448 description REF. The reference occurs in statement STMT. */
1450 static void
1451 record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc)
1453 mem_ref_loc aref;
1454 aref.stmt = stmt;
1455 aref.ref = loc;
1456 ref->accesses_in_loop.safe_push (aref);
1459 /* Set the LOOP bit in REF stored bitmap and allocate that if
1460 necessary. Return whether a bit was changed. */
1462 static bool
1463 set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop)
1465 if (!ref->stored)
1466 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1467 return bitmap_set_bit (ref->stored, loop->num);
1470 /* Marks reference REF as stored in LOOP. */
1472 static void
1473 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1475 while (loop != current_loops->tree_root
1476 && set_ref_stored_in_loop (ref, loop))
1477 loop = loop_outer (loop);
1480 /* Gathers memory references in statement STMT in LOOP, storing the
1481 information about them in the memory_accesses structure. Marks
1482 the vops accessed through unrecognized statements there as
1483 well. */
1485 static void
1486 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1488 tree *mem = NULL;
1489 hashval_t hash;
1490 im_mem_ref **slot;
1491 mem_ref_p ref;
1492 bool is_stored;
1493 unsigned id;
1495 if (!gimple_vuse (stmt))
1496 return;
1498 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1499 if (!mem)
1501 /* We use the shared mem_ref for all unanalyzable refs. */
1502 id = UNANALYZABLE_MEM_ID;
1503 ref = memory_accesses.refs_list[id];
1504 if (dump_file && (dump_flags & TDF_DETAILS))
1506 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1507 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1509 is_stored = gimple_vdef (stmt);
1511 else
1513 hash = iterative_hash_expr (*mem, 0);
1514 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1515 if (*slot)
1517 ref = (mem_ref_p) *slot;
1518 id = ref->id;
1520 else
1522 id = memory_accesses.refs_list.length ();
1523 ref = mem_ref_alloc (*mem, hash, id);
1524 memory_accesses.refs_list.safe_push (ref);
1525 *slot = ref;
1527 if (dump_file && (dump_flags & TDF_DETAILS))
1529 fprintf (dump_file, "Memory reference %u: ", id);
1530 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1531 fprintf (dump_file, "\n");
1535 record_mem_ref_loc (ref, stmt, mem);
1537 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1538 if (is_stored)
1540 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1541 mark_ref_stored (ref, loop);
1543 return;
1546 static unsigned *bb_loop_postorder;
1548 /* qsort sort function to sort blocks after their loop fathers postorder. */
1550 static int
1551 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1553 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1554 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1555 struct loop *loop1 = bb1->loop_father;
1556 struct loop *loop2 = bb2->loop_father;
1557 if (loop1->num == loop2->num)
1558 return 0;
1559 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1562 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1564 static int
1565 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1567 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1568 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1569 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1570 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1571 if (loop1->num == loop2->num)
1572 return 0;
1573 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1576 /* Gathers memory references in loops. */
1578 static void
1579 analyze_memory_references (void)
1581 gimple_stmt_iterator bsi;
1582 basic_block bb, *bbs;
1583 struct loop *loop, *outer;
1584 unsigned i, n;
1586 /* Collect all basic-blocks in loops and sort them after their
1587 loops postorder. */
1588 i = 0;
1589 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1590 FOR_EACH_BB_FN (bb, cfun)
1591 if (bb->loop_father != current_loops->tree_root)
1592 bbs[i++] = bb;
1593 n = i;
1594 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1596 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1597 That results in better locality for all the bitmaps. */
1598 for (i = 0; i < n; ++i)
1600 basic_block bb = bbs[i];
1601 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1602 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1605 /* Sort the location list of gathered memory references after their
1606 loop postorder number. */
1607 im_mem_ref *ref;
1608 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1609 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1611 free (bbs);
1612 // free (bb_loop_postorder);
1614 /* Propagate the information about accessed memory references up
1615 the loop hierarchy. */
1616 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1618 /* Finalize the overall touched references (including subloops). */
1619 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1620 &memory_accesses.refs_stored_in_loop[loop->num]);
1622 /* Propagate the information about accessed memory references up
1623 the loop hierarchy. */
1624 outer = loop_outer (loop);
1625 if (outer == current_loops->tree_root)
1626 continue;
1628 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1629 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1633 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1634 tree_to_aff_combination_expand. */
1636 static bool
1637 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1638 hash_map<tree, name_expansion *> **ttae_cache)
1640 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1641 object and their offset differ in such a way that the locations cannot
1642 overlap, then they cannot alias. */
1643 widest_int size1, size2;
1644 aff_tree off1, off2;
1646 /* Perform basic offset and type-based disambiguation. */
1647 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1648 return false;
1650 /* The expansion of addresses may be a bit expensive, thus we only do
1651 the check at -O2 and higher optimization levels. */
1652 if (optimize < 2)
1653 return true;
1655 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1656 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1657 aff_combination_expand (&off1, ttae_cache);
1658 aff_combination_expand (&off2, ttae_cache);
1659 aff_combination_scale (&off1, -1);
1660 aff_combination_add (&off2, &off1);
1662 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1663 return false;
1665 return true;
1668 /* Compare function for bsearch searching for reference locations
1669 in a loop. */
1671 static int
1672 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1674 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1675 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1676 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1677 if (loop->num == loc_loop->num
1678 || flow_loop_nested_p (loop, loc_loop))
1679 return 0;
1680 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1681 ? -1 : 1);
1684 /* Iterates over all locations of REF in LOOP and its subloops calling
1685 fn.operator() with the location as argument. When that operator
1686 returns true the iteration is stopped and true is returned.
1687 Otherwise false is returned. */
1689 template <typename FN>
1690 static bool
1691 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1693 unsigned i;
1694 mem_ref_loc_p loc;
1696 /* Search for the cluster of locs in the accesses_in_loop vector
1697 which is sorted after postorder index of the loop father. */
1698 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1699 if (!loc)
1700 return false;
1702 /* We have found one location inside loop or its sub-loops. Iterate
1703 both forward and backward to cover the whole cluster. */
1704 i = loc - ref->accesses_in_loop.address ();
1705 while (i > 0)
1707 --i;
1708 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1709 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1710 break;
1711 if (fn (l))
1712 return true;
1714 for (i = loc - ref->accesses_in_loop.address ();
1715 i < ref->accesses_in_loop.length (); ++i)
1717 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1718 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1719 break;
1720 if (fn (l))
1721 return true;
1724 return false;
1727 /* Rewrites location LOC by TMP_VAR. */
1729 struct rewrite_mem_ref_loc
1731 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1732 bool operator () (mem_ref_loc_p loc);
1733 tree tmp_var;
1736 bool
1737 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1739 *loc->ref = tmp_var;
1740 update_stmt (loc->stmt);
1741 return false;
1744 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1746 static void
1747 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1749 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1752 /* Stores the first reference location in LOCP. */
1754 struct first_mem_ref_loc_1
1756 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1757 bool operator () (mem_ref_loc_p loc);
1758 mem_ref_loc_p *locp;
1761 bool
1762 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1764 *locp = loc;
1765 return true;
1768 /* Returns the first reference location to REF in LOOP. */
1770 static mem_ref_loc_p
1771 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1773 mem_ref_loc_p locp = NULL;
1774 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1775 return locp;
1778 struct prev_flag_edges {
1779 /* Edge to insert new flag comparison code. */
1780 edge append_cond_position;
1782 /* Edge for fall through from previous flag comparison. */
1783 edge last_cond_fallthru;
1786 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1787 MEM along edge EX.
1789 The store is only done if MEM has changed. We do this so no
1790 changes to MEM occur on code paths that did not originally store
1791 into it.
1793 The common case for execute_sm will transform:
1795 for (...) {
1796 if (foo)
1797 stuff;
1798 else
1799 MEM = TMP_VAR;
1802 into:
1804 lsm = MEM;
1805 for (...) {
1806 if (foo)
1807 stuff;
1808 else
1809 lsm = TMP_VAR;
1811 MEM = lsm;
1813 This function will generate:
1815 lsm = MEM;
1817 lsm_flag = false;
1819 for (...) {
1820 if (foo)
1821 stuff;
1822 else {
1823 lsm = TMP_VAR;
1824 lsm_flag = true;
1827 if (lsm_flag) <--
1828 MEM = lsm; <--
1831 static void
1832 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1834 basic_block new_bb, then_bb, old_dest;
1835 bool loop_has_only_one_exit;
1836 edge then_old_edge, orig_ex = ex;
1837 gimple_stmt_iterator gsi;
1838 gimple stmt;
1839 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1840 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1842 /* ?? Insert store after previous store if applicable. See note
1843 below. */
1844 if (prev_edges)
1845 ex = prev_edges->append_cond_position;
1847 loop_has_only_one_exit = single_pred_p (ex->dest);
1849 if (loop_has_only_one_exit)
1850 ex = split_block_after_labels (ex->dest);
1852 old_dest = ex->dest;
1853 new_bb = split_edge (ex);
1854 then_bb = create_empty_bb (new_bb);
1855 if (irr)
1856 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1857 add_bb_to_loop (then_bb, new_bb->loop_father);
1859 gsi = gsi_start_bb (new_bb);
1860 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1861 NULL_TREE, NULL_TREE);
1862 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1864 gsi = gsi_start_bb (then_bb);
1865 /* Insert actual store. */
1866 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1867 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1869 make_edge (new_bb, then_bb,
1870 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1871 make_edge (new_bb, old_dest,
1872 EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1873 then_old_edge = make_edge (then_bb, old_dest,
1874 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1876 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1878 if (prev_edges)
1880 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1881 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1882 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1883 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1884 recompute_dominator (CDI_DOMINATORS, old_dest));
1887 /* ?? Because stores may alias, they must happen in the exact
1888 sequence they originally happened. Save the position right after
1889 the (_lsm) store we just created so we can continue appending after
1890 it and maintain the original order. */
1892 struct prev_flag_edges *p;
1894 if (orig_ex->aux)
1895 orig_ex->aux = NULL;
1896 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1897 p = (struct prev_flag_edges *) orig_ex->aux;
1898 p->append_cond_position = then_old_edge;
1899 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1900 orig_ex->aux = (void *) p;
1903 if (!loop_has_only_one_exit)
1904 for (gphi_iterator gpi = gsi_start_phis (old_dest);
1905 !gsi_end_p (gpi); gsi_next (&gpi))
1907 gphi *phi = gpi.phi ();
1908 unsigned i;
1910 for (i = 0; i < gimple_phi_num_args (phi); i++)
1911 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1913 tree arg = gimple_phi_arg_def (phi, i);
1914 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1915 update_stmt (phi);
1918 /* Remove the original fall through edge. This was the
1919 single_succ_edge (new_bb). */
1920 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1923 /* When REF is set on the location, set flag indicating the store. */
1925 struct sm_set_flag_if_changed
1927 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1928 bool operator () (mem_ref_loc_p loc);
1929 tree flag;
1932 bool
1933 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1935 /* Only set the flag for writes. */
1936 if (is_gimple_assign (loc->stmt)
1937 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1939 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1940 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1941 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1943 return false;
1946 /* Helper function for execute_sm. On every location where REF is
1947 set, set an appropriate flag indicating the store. */
1949 static tree
1950 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1952 tree flag;
1953 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1954 flag = create_tmp_reg (boolean_type_node, str);
1955 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1956 return flag;
1959 /* Executes store motion of memory reference REF from LOOP.
1960 Exits from the LOOP are stored in EXITS. The initialization of the
1961 temporary variable is put to the preheader of the loop, and assignments
1962 to the reference from the temporary variable are emitted to exits. */
1964 static void
1965 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1967 tree tmp_var, store_flag = NULL_TREE;
1968 unsigned i;
1969 gassign *load;
1970 struct fmt_data fmt_data;
1971 edge ex;
1972 struct lim_aux_data *lim_data;
1973 bool multi_threaded_model_p = false;
1974 gimple_stmt_iterator gsi;
1976 if (dump_file && (dump_flags & TDF_DETAILS))
1978 fprintf (dump_file, "Executing store motion of ");
1979 print_generic_expr (dump_file, ref->mem.ref, 0);
1980 fprintf (dump_file, " from loop %d\n", loop->num);
1983 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1984 get_lsm_tmp_name (ref->mem.ref, ~0));
1986 fmt_data.loop = loop;
1987 fmt_data.orig_loop = loop;
1988 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1990 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1991 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1992 multi_threaded_model_p = true;
1994 if (multi_threaded_model_p)
1995 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1997 rewrite_mem_refs (loop, ref, tmp_var);
1999 /* Emit the load code on a random exit edge or into the latch if
2000 the loop does not exit, so that we are sure it will be processed
2001 by move_computations after all dependencies. */
2002 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2004 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2005 load altogether, since the store is predicated by a flag. We
2006 could, do the load only if it was originally in the loop. */
2007 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2008 lim_data = init_lim_data (load);
2009 lim_data->max_loop = loop;
2010 lim_data->tgt_loop = loop;
2011 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2013 if (multi_threaded_model_p)
2015 load = gimple_build_assign (store_flag, boolean_false_node);
2016 lim_data = init_lim_data (load);
2017 lim_data->max_loop = loop;
2018 lim_data->tgt_loop = loop;
2019 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2022 /* Sink the store to every exit from the loop. */
2023 FOR_EACH_VEC_ELT (exits, i, ex)
2024 if (!multi_threaded_model_p)
2026 gassign *store;
2027 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2028 gsi_insert_on_edge (ex, store);
2030 else
2031 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
2034 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2035 edges of the LOOP. */
2037 static void
2038 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2039 vec<edge> exits)
2041 mem_ref_p ref;
2042 unsigned i;
2043 bitmap_iterator bi;
2045 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2047 ref = memory_accesses.refs_list[i];
2048 execute_sm (loop, exits, ref);
2052 struct ref_always_accessed
2054 ref_always_accessed (struct loop *loop_, bool stored_p_)
2055 : loop (loop_), stored_p (stored_p_) {}
2056 bool operator () (mem_ref_loc_p loc);
2057 struct loop *loop;
2058 bool stored_p;
2061 bool
2062 ref_always_accessed::operator () (mem_ref_loc_p loc)
2064 struct loop *must_exec;
2066 if (!get_lim_data (loc->stmt))
2067 return false;
2069 /* If we require an always executed store make sure the statement
2070 stores to the reference. */
2071 if (stored_p)
2073 tree lhs = gimple_get_lhs (loc->stmt);
2074 if (!lhs
2075 || lhs != *loc->ref)
2076 return false;
2079 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2080 if (!must_exec)
2081 return false;
2083 if (must_exec == loop
2084 || flow_loop_nested_p (must_exec, loop))
2085 return true;
2087 return false;
2090 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2091 make sure REF is always stored to in LOOP. */
2093 static bool
2094 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2096 return for_all_locs_in_loop (loop, ref,
2097 ref_always_accessed (loop, stored_p));
2100 /* Returns true if REF1 and REF2 are independent. */
2102 static bool
2103 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2105 if (ref1 == ref2)
2106 return true;
2108 if (dump_file && (dump_flags & TDF_DETAILS))
2109 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2110 ref1->id, ref2->id);
2112 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2114 if (dump_file && (dump_flags & TDF_DETAILS))
2115 fprintf (dump_file, "dependent.\n");
2116 return false;
2118 else
2120 if (dump_file && (dump_flags & TDF_DETAILS))
2121 fprintf (dump_file, "independent.\n");
2122 return true;
2126 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2127 and its super-loops. */
2129 static void
2130 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2132 /* We can propagate dependent-in-loop bits up the loop
2133 hierarchy to all outer loops. */
2134 while (loop != current_loops->tree_root
2135 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2136 loop = loop_outer (loop);
2139 /* Returns true if REF is independent on all other memory references in
2140 LOOP. */
2142 static bool
2143 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2145 bitmap refs_to_check;
2146 unsigned i;
2147 bitmap_iterator bi;
2148 mem_ref_p aref;
2150 if (stored_p)
2151 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2152 else
2153 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2155 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2156 return false;
2158 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2160 aref = memory_accesses.refs_list[i];
2161 if (!refs_independent_p (ref, aref))
2162 return false;
2165 return true;
2168 /* Returns true if REF is independent on all other memory references in
2169 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2171 static bool
2172 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2174 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2176 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2177 return true;
2178 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2179 return false;
2181 struct loop *inner = loop->inner;
2182 while (inner)
2184 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2185 return false;
2186 inner = inner->next;
2189 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2191 if (dump_file && (dump_flags & TDF_DETAILS))
2192 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2193 ref->id, loop->num, indep_p ? "independent" : "dependent");
2195 /* Record the computed result in the cache. */
2196 if (indep_p)
2198 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2199 && stored_p)
2201 /* If it's independend against all refs then it's independent
2202 against stores, too. */
2203 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2206 else
2208 record_dep_loop (loop, ref, stored_p);
2209 if (!stored_p)
2211 /* If it's dependent against stores it's dependent against
2212 all refs, too. */
2213 record_dep_loop (loop, ref, true);
2217 return indep_p;
2220 /* Returns true if REF is independent on all other memory references in
2221 LOOP. */
2223 static bool
2224 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2226 gcc_checking_assert (MEM_ANALYZABLE (ref));
2228 return ref_indep_loop_p_2 (loop, ref, false);
2231 /* Returns true if we can perform store motion of REF from LOOP. */
2233 static bool
2234 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2236 tree base;
2238 /* Can't hoist unanalyzable refs. */
2239 if (!MEM_ANALYZABLE (ref))
2240 return false;
2242 /* It should be movable. */
2243 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2244 || TREE_THIS_VOLATILE (ref->mem.ref)
2245 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2246 return false;
2248 /* If it can throw fail, we do not properly update EH info. */
2249 if (tree_could_throw_p (ref->mem.ref))
2250 return false;
2252 /* If it can trap, it must be always executed in LOOP.
2253 Readonly memory locations may trap when storing to them, but
2254 tree_could_trap_p is a predicate for rvalues, so check that
2255 explicitly. */
2256 base = get_base_address (ref->mem.ref);
2257 if ((tree_could_trap_p (ref->mem.ref)
2258 || (DECL_P (base) && TREE_READONLY (base)))
2259 && !ref_always_accessed_p (loop, ref, true))
2260 return false;
2262 /* And it must be independent on all other memory references
2263 in LOOP. */
2264 if (!ref_indep_loop_p (loop, ref))
2265 return false;
2267 return true;
2270 /* Marks the references in LOOP for that store motion should be performed
2271 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2272 motion was performed in one of the outer loops. */
2274 static void
2275 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2277 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2278 unsigned i;
2279 bitmap_iterator bi;
2280 mem_ref_p ref;
2282 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2284 ref = memory_accesses.refs_list[i];
2285 if (can_sm_ref_p (loop, ref))
2286 bitmap_set_bit (refs_to_sm, i);
2290 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2291 for a store motion optimization (i.e. whether we can insert statement
2292 on its exits). */
2294 static bool
2295 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2296 vec<edge> exits)
2298 unsigned i;
2299 edge ex;
2301 FOR_EACH_VEC_ELT (exits, i, ex)
2302 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2303 return false;
2305 return true;
2308 /* Try to perform store motion for all memory references modified inside
2309 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2310 store motion was executed in one of the outer loops. */
2312 static void
2313 store_motion_loop (struct loop *loop, bitmap sm_executed)
2315 vec<edge> exits = get_loop_exit_edges (loop);
2316 struct loop *subloop;
2317 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2319 if (loop_suitable_for_sm (loop, exits))
2321 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2322 hoist_memory_references (loop, sm_in_loop, exits);
2324 exits.release ();
2326 bitmap_ior_into (sm_executed, sm_in_loop);
2327 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2328 store_motion_loop (subloop, sm_executed);
2329 bitmap_and_compl_into (sm_executed, sm_in_loop);
2330 BITMAP_FREE (sm_in_loop);
2333 /* Try to perform store motion for all memory references modified inside
2334 loops. */
2336 static void
2337 store_motion (void)
2339 struct loop *loop;
2340 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2342 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2343 store_motion_loop (loop, sm_executed);
2345 BITMAP_FREE (sm_executed);
2346 gsi_commit_edge_inserts ();
2349 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2350 for each such basic block bb records the outermost loop for that execution
2351 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2352 blocks that contain a nonpure call. */
2354 static void
2355 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2357 basic_block bb = NULL, *bbs, last = NULL;
2358 unsigned i;
2359 edge e;
2360 struct loop *inn_loop = loop;
2362 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2364 bbs = get_loop_body_in_dom_order (loop);
2366 for (i = 0; i < loop->num_nodes; i++)
2368 edge_iterator ei;
2369 bb = bbs[i];
2371 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2372 last = bb;
2374 if (bitmap_bit_p (contains_call, bb->index))
2375 break;
2377 FOR_EACH_EDGE (e, ei, bb->succs)
2378 if (!flow_bb_inside_loop_p (loop, e->dest))
2379 break;
2380 if (e)
2381 break;
2383 /* A loop might be infinite (TODO use simple loop analysis
2384 to disprove this if possible). */
2385 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2386 break;
2388 if (!flow_bb_inside_loop_p (inn_loop, bb))
2389 break;
2391 if (bb->loop_father->header == bb)
2393 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2394 break;
2396 /* In a loop that is always entered we may proceed anyway.
2397 But record that we entered it and stop once we leave it. */
2398 inn_loop = bb->loop_father;
2402 while (1)
2404 SET_ALWAYS_EXECUTED_IN (last, loop);
2405 if (last == loop->header)
2406 break;
2407 last = get_immediate_dominator (CDI_DOMINATORS, last);
2410 free (bbs);
2413 for (loop = loop->inner; loop; loop = loop->next)
2414 fill_always_executed_in_1 (loop, contains_call);
2417 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2418 for each such basic block bb records the outermost loop for that execution
2419 of its header implies execution of bb. */
2421 static void
2422 fill_always_executed_in (void)
2424 sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
2425 basic_block bb;
2426 struct loop *loop;
2428 bitmap_clear (contains_call);
2429 FOR_EACH_BB_FN (bb, cfun)
2431 gimple_stmt_iterator gsi;
2432 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2434 if (nonpure_call_p (gsi_stmt (gsi)))
2435 break;
2438 if (!gsi_end_p (gsi))
2439 bitmap_set_bit (contains_call, bb->index);
2442 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2443 fill_always_executed_in_1 (loop, contains_call);
2445 sbitmap_free (contains_call);
2449 /* Compute the global information needed by the loop invariant motion pass. */
2451 static void
2452 tree_ssa_lim_initialize (void)
2454 struct loop *loop;
2455 unsigned i;
2457 bitmap_obstack_initialize (&lim_bitmap_obstack);
2458 gcc_obstack_init (&mem_ref_obstack);
2459 lim_aux_data_map = new hash_map<gimple, lim_aux_data *>;
2461 if (flag_tm)
2462 compute_transaction_bits ();
2464 alloc_aux_for_edges (0);
2466 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2467 memory_accesses.refs_list.create (100);
2468 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2469 memory_accesses.refs_list.quick_push
2470 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2472 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2473 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2474 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2475 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2476 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2477 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2479 for (i = 0; i < number_of_loops (cfun); i++)
2481 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2482 &lim_bitmap_obstack);
2483 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2484 &lim_bitmap_obstack);
2485 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2486 &lim_bitmap_obstack);
2489 memory_accesses.ttae_cache = NULL;
2491 /* Initialize bb_loop_postorder with a mapping from loop->num to
2492 its postorder index. */
2493 i = 0;
2494 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2495 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2496 bb_loop_postorder[loop->num] = i++;
2499 /* Cleans up after the invariant motion pass. */
2501 static void
2502 tree_ssa_lim_finalize (void)
2504 basic_block bb;
2505 unsigned i;
2506 mem_ref_p ref;
2508 free_aux_for_edges ();
2510 FOR_EACH_BB_FN (bb, cfun)
2511 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2513 bitmap_obstack_release (&lim_bitmap_obstack);
2514 delete lim_aux_data_map;
2516 delete memory_accesses.refs;
2517 memory_accesses.refs = NULL;
2519 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2520 memref_free (ref);
2521 memory_accesses.refs_list.release ();
2522 obstack_free (&mem_ref_obstack, NULL);
2524 memory_accesses.refs_in_loop.release ();
2525 memory_accesses.refs_stored_in_loop.release ();
2526 memory_accesses.all_refs_stored_in_loop.release ();
2528 if (memory_accesses.ttae_cache)
2529 free_affine_expand_cache (&memory_accesses.ttae_cache);
2531 free (bb_loop_postorder);
2534 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2535 i.e. those that are likely to be win regardless of the register pressure. */
2537 unsigned int
2538 tree_ssa_lim (void)
2540 unsigned int todo;
2542 tree_ssa_lim_initialize ();
2544 /* Gathers information about memory accesses in the loops. */
2545 analyze_memory_references ();
2547 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2548 fill_always_executed_in ();
2550 /* For each statement determine the outermost loop in that it is
2551 invariant and cost for computing the invariant. */
2552 invariantness_dom_walker (CDI_DOMINATORS)
2553 .walk (cfun->cfg->x_entry_block_ptr);
2555 /* Execute store motion. Force the necessary invariants to be moved
2556 out of the loops as well. */
2557 store_motion ();
2559 /* Move the expressions that are expensive enough. */
2560 todo = move_computations ();
2562 tree_ssa_lim_finalize ();
2564 return todo;
2567 /* Loop invariant motion pass. */
2569 namespace {
2571 const pass_data pass_data_lim =
2573 GIMPLE_PASS, /* type */
2574 "lim", /* name */
2575 OPTGROUP_LOOP, /* optinfo_flags */
2576 TV_LIM, /* tv_id */
2577 PROP_cfg, /* properties_required */
2578 0, /* properties_provided */
2579 0, /* properties_destroyed */
2580 0, /* todo_flags_start */
2581 0, /* todo_flags_finish */
2584 class pass_lim : public gimple_opt_pass
2586 public:
2587 pass_lim (gcc::context *ctxt)
2588 : gimple_opt_pass (pass_data_lim, ctxt)
2591 /* opt_pass methods: */
2592 opt_pass * clone () { return new pass_lim (m_ctxt); }
2593 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2594 virtual unsigned int execute (function *);
2596 }; // class pass_lim
2598 unsigned int
2599 pass_lim::execute (function *fun)
2601 if (number_of_loops (fun) <= 1)
2602 return 0;
2604 return tree_ssa_lim ();
2607 } // anon namespace
2609 gimple_opt_pass *
2610 make_pass_lim (gcc::context *ctxt)
2612 return new pass_lim (ctxt);