2017-06-14 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blobe92eaa6e2d737b062aabf540c345ca254862d5a1
1 /* Loop invariant motion.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "params.h"
42 #include "tree-affine.h"
43 #include "tree-ssa-propagate.h"
44 #include "trans-mem.h"
45 #include "gimple-fold.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-ssa-loop-niter.h"
49 /* TODO: Support for predicated code motion. I.e.
51 while (1)
53 if (cond)
55 a = inv;
56 something;
60 Where COND and INV are invariants, but evaluating INV may trap or be
61 invalid from some other reason if !COND. This may be transformed to
63 if (cond)
64 a = inv;
65 while (1)
67 if (cond)
68 something;
69 } */
71 /* The auxiliary data kept for each statement. */
73 struct lim_aux_data
75 struct loop *max_loop; /* The outermost loop in that the statement
76 is invariant. */
78 struct loop *tgt_loop; /* The loop out of that we want to move the
79 invariant. */
81 struct loop *always_executed_in;
82 /* The outermost loop for that we are sure
83 the statement is executed if the loop
84 is entered. */
86 unsigned cost; /* Cost of the computation performed by the
87 statement. */
89 vec<gimple *> depends; /* Vector of statements that must be also
90 hoisted out of the loop when this statement
91 is hoisted; i.e. those that define the
92 operands of the statement and are inside of
93 the MAX_LOOP loop. */
96 /* Maps statements to their lim_aux_data. */
98 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
100 /* Description of a memory reference location. */
102 struct mem_ref_loc
104 tree *ref; /* The reference itself. */
105 gimple *stmt; /* The statement in that it occurs. */
109 /* Description of a memory reference. */
111 struct im_mem_ref
113 unsigned id; /* ID assigned to the memory reference
114 (its index in memory_accesses.refs_list) */
115 hashval_t hash; /* Its hash value. */
117 /* The memory access itself and associated caching of alias-oracle
118 query meta-data. */
119 ao_ref mem;
121 bitmap stored; /* The set of loops in that this memory location
122 is stored to. */
123 vec<mem_ref_loc> accesses_in_loop;
124 /* The locations of the accesses. Vector
125 indexed by the loop number. */
127 /* The following sets are computed on demand. We keep both set and
128 its complement, so that we know whether the information was
129 already computed or not. */
130 bitmap_head indep_loop; /* The set of loops in that the memory
131 reference is independent, meaning:
132 If it is stored in the loop, this store
133 is independent on all other loads and
134 stores.
135 If it is only loaded, then it is independent
136 on all stores in the loop. */
137 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
140 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
141 to record (in)dependence against stores in the loop and its subloops, the
142 second to record (in)dependence against all references in the loop
143 and its subloops. */
144 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
146 /* Mem_ref hashtable helpers. */
148 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
150 typedef tree_node *compare_type;
151 static inline hashval_t hash (const im_mem_ref *);
152 static inline bool equal (const im_mem_ref *, const tree_node *);
155 /* A hash function for struct im_mem_ref object OBJ. */
157 inline hashval_t
158 mem_ref_hasher::hash (const im_mem_ref *mem)
160 return mem->hash;
163 /* An equality function for struct im_mem_ref object MEM1 with
164 memory reference OBJ2. */
166 inline bool
167 mem_ref_hasher::equal (const im_mem_ref *mem1, const tree_node *obj2)
169 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
173 /* Description of memory accesses in loops. */
175 static struct
177 /* The hash table of memory references accessed in loops. */
178 hash_table<mem_ref_hasher> *refs;
180 /* The list of memory references. */
181 vec<im_mem_ref *> refs_list;
183 /* The set of memory references accessed in each loop. */
184 vec<bitmap_head> refs_in_loop;
186 /* The set of memory references stored in each loop. */
187 vec<bitmap_head> refs_stored_in_loop;
189 /* The set of memory references stored in each loop, including subloops . */
190 vec<bitmap_head> all_refs_stored_in_loop;
192 /* Cache for expanding memory addresses. */
193 hash_map<tree, name_expansion *> *ttae_cache;
194 } memory_accesses;
196 /* Obstack for the bitmaps in the above data structures. */
197 static bitmap_obstack lim_bitmap_obstack;
198 static obstack mem_ref_obstack;
200 static bool ref_indep_loop_p (struct loop *, im_mem_ref *, struct loop *);
201 static bool ref_always_accessed_p (struct loop *, im_mem_ref *, bool);
203 /* Minimum cost of an expensive expression. */
204 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
206 /* The outermost loop for which execution of the header guarantees that the
207 block will be executed. */
208 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
209 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
211 /* ID of the shared unanalyzable mem. */
212 #define UNANALYZABLE_MEM_ID 0
214 /* Whether the reference was analyzable. */
215 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
217 static struct lim_aux_data *
218 init_lim_data (gimple *stmt)
220 lim_aux_data *p = XCNEW (struct lim_aux_data);
221 lim_aux_data_map->put (stmt, p);
223 return p;
226 static struct lim_aux_data *
227 get_lim_data (gimple *stmt)
229 lim_aux_data **p = lim_aux_data_map->get (stmt);
230 if (!p)
231 return NULL;
233 return *p;
236 /* Releases the memory occupied by DATA. */
238 static void
239 free_lim_aux_data (struct lim_aux_data *data)
241 data->depends.release ();
242 free (data);
245 static void
246 clear_lim_data (gimple *stmt)
248 lim_aux_data **p = lim_aux_data_map->get (stmt);
249 if (!p)
250 return;
252 free_lim_aux_data (*p);
253 *p = NULL;
257 /* The possibilities of statement movement. */
258 enum move_pos
260 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
261 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
262 become executed -- memory accesses, ... */
263 MOVE_POSSIBLE /* Unlimited movement. */
267 /* If it is possible to hoist the statement STMT unconditionally,
268 returns MOVE_POSSIBLE.
269 If it is possible to hoist the statement STMT, but we must avoid making
270 it executed if it would not be executed in the original program (e.g.
271 because it may trap), return MOVE_PRESERVE_EXECUTION.
272 Otherwise return MOVE_IMPOSSIBLE. */
274 enum move_pos
275 movement_possibility (gimple *stmt)
277 tree lhs;
278 enum move_pos ret = MOVE_POSSIBLE;
280 if (flag_unswitch_loops
281 && gimple_code (stmt) == GIMPLE_COND)
283 /* If we perform unswitching, force the operands of the invariant
284 condition to be moved out of the loop. */
285 return MOVE_POSSIBLE;
288 if (gimple_code (stmt) == GIMPLE_PHI
289 && gimple_phi_num_args (stmt) <= 2
290 && !virtual_operand_p (gimple_phi_result (stmt))
291 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
292 return MOVE_POSSIBLE;
294 if (gimple_get_lhs (stmt) == NULL_TREE)
295 return MOVE_IMPOSSIBLE;
297 if (gimple_vdef (stmt))
298 return MOVE_IMPOSSIBLE;
300 if (stmt_ends_bb_p (stmt)
301 || gimple_has_volatile_ops (stmt)
302 || gimple_has_side_effects (stmt)
303 || stmt_could_throw_p (stmt))
304 return MOVE_IMPOSSIBLE;
306 if (is_gimple_call (stmt))
308 /* While pure or const call is guaranteed to have no side effects, we
309 cannot move it arbitrarily. Consider code like
311 char *s = something ();
313 while (1)
315 if (s)
316 t = strlen (s);
317 else
318 t = 0;
321 Here the strlen call cannot be moved out of the loop, even though
322 s is invariant. In addition to possibly creating a call with
323 invalid arguments, moving out a function call that is not executed
324 may cause performance regressions in case the call is costly and
325 not executed at all. */
326 ret = MOVE_PRESERVE_EXECUTION;
327 lhs = gimple_call_lhs (stmt);
329 else if (is_gimple_assign (stmt))
330 lhs = gimple_assign_lhs (stmt);
331 else
332 return MOVE_IMPOSSIBLE;
334 if (TREE_CODE (lhs) == SSA_NAME
335 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
336 return MOVE_IMPOSSIBLE;
338 if (TREE_CODE (lhs) != SSA_NAME
339 || gimple_could_trap_p (stmt))
340 return MOVE_PRESERVE_EXECUTION;
342 /* Non local loads in a transaction cannot be hoisted out. Well,
343 unless the load happens on every path out of the loop, but we
344 don't take this into account yet. */
345 if (flag_tm
346 && gimple_in_transaction (stmt)
347 && gimple_assign_single_p (stmt))
349 tree rhs = gimple_assign_rhs1 (stmt);
350 if (DECL_P (rhs) && is_global_var (rhs))
352 if (dump_file)
354 fprintf (dump_file, "Cannot hoist conditional load of ");
355 print_generic_expr (dump_file, rhs, TDF_SLIM);
356 fprintf (dump_file, " because it is in a transaction.\n");
358 return MOVE_IMPOSSIBLE;
362 return ret;
365 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
366 loop to that we could move the expression using DEF if it did not have
367 other operands, i.e. the outermost loop enclosing LOOP in that the value
368 of DEF is invariant. */
370 static struct loop *
371 outermost_invariant_loop (tree def, struct loop *loop)
373 gimple *def_stmt;
374 basic_block def_bb;
375 struct loop *max_loop;
376 struct lim_aux_data *lim_data;
378 if (!def)
379 return superloop_at_depth (loop, 1);
381 if (TREE_CODE (def) != SSA_NAME)
383 gcc_assert (is_gimple_min_invariant (def));
384 return superloop_at_depth (loop, 1);
387 def_stmt = SSA_NAME_DEF_STMT (def);
388 def_bb = gimple_bb (def_stmt);
389 if (!def_bb)
390 return superloop_at_depth (loop, 1);
392 max_loop = find_common_loop (loop, def_bb->loop_father);
394 lim_data = get_lim_data (def_stmt);
395 if (lim_data != NULL && lim_data->max_loop != NULL)
396 max_loop = find_common_loop (max_loop,
397 loop_outer (lim_data->max_loop));
398 if (max_loop == loop)
399 return NULL;
400 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
402 return max_loop;
405 /* DATA is a structure containing information associated with a statement
406 inside LOOP. DEF is one of the operands of this statement.
408 Find the outermost loop enclosing LOOP in that value of DEF is invariant
409 and record this in DATA->max_loop field. If DEF itself is defined inside
410 this loop as well (i.e. we need to hoist it out of the loop if we want
411 to hoist the statement represented by DATA), record the statement in that
412 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
413 add the cost of the computation of DEF to the DATA->cost.
415 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
417 static bool
418 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
419 bool add_cost)
421 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
422 basic_block def_bb = gimple_bb (def_stmt);
423 struct loop *max_loop;
424 struct lim_aux_data *def_data;
426 if (!def_bb)
427 return true;
429 max_loop = outermost_invariant_loop (def, loop);
430 if (!max_loop)
431 return false;
433 if (flow_loop_nested_p (data->max_loop, max_loop))
434 data->max_loop = max_loop;
436 def_data = get_lim_data (def_stmt);
437 if (!def_data)
438 return true;
440 if (add_cost
441 /* Only add the cost if the statement defining DEF is inside LOOP,
442 i.e. if it is likely that by moving the invariants dependent
443 on it, we will be able to avoid creating a new register for
444 it (since it will be only used in these dependent invariants). */
445 && def_bb->loop_father == loop)
446 data->cost += def_data->cost;
448 data->depends.safe_push (def_stmt);
450 return true;
453 /* Returns an estimate for a cost of statement STMT. The values here
454 are just ad-hoc constants, similar to costs for inlining. */
456 static unsigned
457 stmt_cost (gimple *stmt)
459 /* Always try to create possibilities for unswitching. */
460 if (gimple_code (stmt) == GIMPLE_COND
461 || gimple_code (stmt) == GIMPLE_PHI)
462 return LIM_EXPENSIVE;
464 /* We should be hoisting calls if possible. */
465 if (is_gimple_call (stmt))
467 tree fndecl;
469 /* Unless the call is a builtin_constant_p; this always folds to a
470 constant, so moving it is useless. */
471 fndecl = gimple_call_fndecl (stmt);
472 if (fndecl
473 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
474 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
475 return 0;
477 return LIM_EXPENSIVE;
480 /* Hoisting memory references out should almost surely be a win. */
481 if (gimple_references_memory_p (stmt))
482 return LIM_EXPENSIVE;
484 if (gimple_code (stmt) != GIMPLE_ASSIGN)
485 return 1;
487 switch (gimple_assign_rhs_code (stmt))
489 case MULT_EXPR:
490 case WIDEN_MULT_EXPR:
491 case WIDEN_MULT_PLUS_EXPR:
492 case WIDEN_MULT_MINUS_EXPR:
493 case DOT_PROD_EXPR:
494 case FMA_EXPR:
495 case TRUNC_DIV_EXPR:
496 case CEIL_DIV_EXPR:
497 case FLOOR_DIV_EXPR:
498 case ROUND_DIV_EXPR:
499 case EXACT_DIV_EXPR:
500 case CEIL_MOD_EXPR:
501 case FLOOR_MOD_EXPR:
502 case ROUND_MOD_EXPR:
503 case TRUNC_MOD_EXPR:
504 case RDIV_EXPR:
505 /* Division and multiplication are usually expensive. */
506 return LIM_EXPENSIVE;
508 case LSHIFT_EXPR:
509 case RSHIFT_EXPR:
510 case WIDEN_LSHIFT_EXPR:
511 case LROTATE_EXPR:
512 case RROTATE_EXPR:
513 /* Shifts and rotates are usually expensive. */
514 return LIM_EXPENSIVE;
516 case CONSTRUCTOR:
517 /* Make vector construction cost proportional to the number
518 of elements. */
519 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
521 case SSA_NAME:
522 case PAREN_EXPR:
523 /* Whether or not something is wrapped inside a PAREN_EXPR
524 should not change move cost. Nor should an intermediate
525 unpropagated SSA name copy. */
526 return 0;
528 default:
529 return 1;
533 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
534 REF is independent. If REF is not independent in LOOP, NULL is returned
535 instead. */
537 static struct loop *
538 outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref)
540 struct loop *aloop;
542 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
543 return NULL;
545 for (aloop = outer;
546 aloop != loop;
547 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
548 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
549 && ref_indep_loop_p (aloop, ref, loop))
550 return aloop;
552 if (ref_indep_loop_p (loop, ref, loop))
553 return loop;
554 else
555 return NULL;
558 /* If there is a simple load or store to a memory reference in STMT, returns
559 the location of the memory reference, and sets IS_STORE according to whether
560 it is a store or load. Otherwise, returns NULL. */
562 static tree *
563 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
565 tree *lhs, *rhs;
567 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
568 if (!gimple_assign_single_p (stmt))
569 return NULL;
571 lhs = gimple_assign_lhs_ptr (stmt);
572 rhs = gimple_assign_rhs1_ptr (stmt);
574 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
576 *is_store = false;
577 return rhs;
579 else if (gimple_vdef (stmt)
580 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
582 *is_store = true;
583 return lhs;
585 else
586 return NULL;
589 /* Returns the memory reference contained in STMT. */
591 static im_mem_ref *
592 mem_ref_in_stmt (gimple *stmt)
594 bool store;
595 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
596 hashval_t hash;
597 im_mem_ref *ref;
599 if (!mem)
600 return NULL;
601 gcc_assert (!store);
603 hash = iterative_hash_expr (*mem, 0);
604 ref = memory_accesses.refs->find_with_hash (*mem, hash);
606 gcc_assert (ref != NULL);
607 return ref;
610 /* From a controlling predicate in DOM determine the arguments from
611 the PHI node PHI that are chosen if the predicate evaluates to
612 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
613 they are non-NULL. Returns true if the arguments can be determined,
614 else return false. */
616 static bool
617 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
618 tree *true_arg_p, tree *false_arg_p)
620 edge te, fe;
621 if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
622 &te, &fe))
623 return false;
625 if (true_arg_p)
626 *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
627 if (false_arg_p)
628 *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
630 return true;
633 /* Determine the outermost loop to that it is possible to hoist a statement
634 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
635 the outermost loop in that the value computed by STMT is invariant.
636 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
637 we preserve the fact whether STMT is executed. It also fills other related
638 information to LIM_DATA (STMT).
640 The function returns false if STMT cannot be hoisted outside of the loop it
641 is defined in, and true otherwise. */
643 static bool
644 determine_max_movement (gimple *stmt, bool must_preserve_exec)
646 basic_block bb = gimple_bb (stmt);
647 struct loop *loop = bb->loop_father;
648 struct loop *level;
649 struct lim_aux_data *lim_data = get_lim_data (stmt);
650 tree val;
651 ssa_op_iter iter;
653 if (must_preserve_exec)
654 level = ALWAYS_EXECUTED_IN (bb);
655 else
656 level = superloop_at_depth (loop, 1);
657 lim_data->max_loop = level;
659 if (gphi *phi = dyn_cast <gphi *> (stmt))
661 use_operand_p use_p;
662 unsigned min_cost = UINT_MAX;
663 unsigned total_cost = 0;
664 struct lim_aux_data *def_data;
666 /* We will end up promoting dependencies to be unconditionally
667 evaluated. For this reason the PHI cost (and thus the
668 cost we remove from the loop by doing the invariant motion)
669 is that of the cheapest PHI argument dependency chain. */
670 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
672 val = USE_FROM_PTR (use_p);
674 if (TREE_CODE (val) != SSA_NAME)
676 /* Assign const 1 to constants. */
677 min_cost = MIN (min_cost, 1);
678 total_cost += 1;
679 continue;
681 if (!add_dependency (val, lim_data, loop, false))
682 return false;
684 gimple *def_stmt = SSA_NAME_DEF_STMT (val);
685 if (gimple_bb (def_stmt)
686 && gimple_bb (def_stmt)->loop_father == loop)
688 def_data = get_lim_data (def_stmt);
689 if (def_data)
691 min_cost = MIN (min_cost, def_data->cost);
692 total_cost += def_data->cost;
697 min_cost = MIN (min_cost, total_cost);
698 lim_data->cost += min_cost;
700 if (gimple_phi_num_args (phi) > 1)
702 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
703 gimple *cond;
704 if (gsi_end_p (gsi_last_bb (dom)))
705 return false;
706 cond = gsi_stmt (gsi_last_bb (dom));
707 if (gimple_code (cond) != GIMPLE_COND)
708 return false;
709 /* Verify that this is an extended form of a diamond and
710 the PHI arguments are completely controlled by the
711 predicate in DOM. */
712 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
713 return false;
715 /* Fold in dependencies and cost of the condition. */
716 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
718 if (!add_dependency (val, lim_data, loop, false))
719 return false;
720 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
721 if (def_data)
722 lim_data->cost += def_data->cost;
725 /* We want to avoid unconditionally executing very expensive
726 operations. As costs for our dependencies cannot be
727 negative just claim we are not invariand for this case.
728 We also are not sure whether the control-flow inside the
729 loop will vanish. */
730 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
731 && !(min_cost != 0
732 && total_cost / min_cost <= 2))
733 return false;
735 /* Assume that the control-flow in the loop will vanish.
736 ??? We should verify this and not artificially increase
737 the cost if that is not the case. */
738 lim_data->cost += stmt_cost (stmt);
741 return true;
743 else
744 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
745 if (!add_dependency (val, lim_data, loop, true))
746 return false;
748 if (gimple_vuse (stmt))
750 im_mem_ref *ref = mem_ref_in_stmt (stmt);
752 if (ref)
754 lim_data->max_loop
755 = outermost_indep_loop (lim_data->max_loop, loop, ref);
756 if (!lim_data->max_loop)
757 return false;
759 else
761 if ((val = gimple_vuse (stmt)) != NULL_TREE)
763 if (!add_dependency (val, lim_data, loop, false))
764 return false;
769 lim_data->cost += stmt_cost (stmt);
771 return true;
774 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
775 and that one of the operands of this statement is computed by STMT.
776 Ensure that STMT (together with all the statements that define its
777 operands) is hoisted at least out of the loop LEVEL. */
779 static void
780 set_level (gimple *stmt, struct loop *orig_loop, struct loop *level)
782 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
783 struct lim_aux_data *lim_data;
784 gimple *dep_stmt;
785 unsigned i;
787 stmt_loop = find_common_loop (orig_loop, stmt_loop);
788 lim_data = get_lim_data (stmt);
789 if (lim_data != NULL && lim_data->tgt_loop != NULL)
790 stmt_loop = find_common_loop (stmt_loop,
791 loop_outer (lim_data->tgt_loop));
792 if (flow_loop_nested_p (stmt_loop, level))
793 return;
795 gcc_assert (level == lim_data->max_loop
796 || flow_loop_nested_p (lim_data->max_loop, level));
798 lim_data->tgt_loop = level;
799 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
800 set_level (dep_stmt, orig_loop, level);
803 /* Determines an outermost loop from that we want to hoist the statement STMT.
804 For now we chose the outermost possible loop. TODO -- use profiling
805 information to set it more sanely. */
807 static void
808 set_profitable_level (gimple *stmt)
810 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
813 /* Returns true if STMT is a call that has side effects. */
815 static bool
816 nonpure_call_p (gimple *stmt)
818 if (gimple_code (stmt) != GIMPLE_CALL)
819 return false;
821 return gimple_has_side_effects (stmt);
824 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
826 static gimple *
827 rewrite_reciprocal (gimple_stmt_iterator *bsi)
829 gassign *stmt, *stmt1, *stmt2;
830 tree name, lhs, type;
831 tree real_one;
832 gimple_stmt_iterator gsi;
834 stmt = as_a <gassign *> (gsi_stmt (*bsi));
835 lhs = gimple_assign_lhs (stmt);
836 type = TREE_TYPE (lhs);
838 real_one = build_one_cst (type);
840 name = make_temp_ssa_name (type, NULL, "reciptmp");
841 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
842 gimple_assign_rhs2 (stmt));
843 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
844 gimple_assign_rhs1 (stmt));
846 /* Replace division stmt with reciprocal and multiply stmts.
847 The multiply stmt is not invariant, so update iterator
848 and avoid rescanning. */
849 gsi = *bsi;
850 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
851 gsi_replace (&gsi, stmt2, true);
853 /* Continue processing with invariant reciprocal statement. */
854 return stmt1;
857 /* Check if the pattern at *BSI is a bittest of the form
858 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
860 static gimple *
861 rewrite_bittest (gimple_stmt_iterator *bsi)
863 gassign *stmt;
864 gimple *stmt1;
865 gassign *stmt2;
866 gimple *use_stmt;
867 gcond *cond_stmt;
868 tree lhs, name, t, a, b;
869 use_operand_p use;
871 stmt = as_a <gassign *> (gsi_stmt (*bsi));
872 lhs = gimple_assign_lhs (stmt);
874 /* Verify that the single use of lhs is a comparison against zero. */
875 if (TREE_CODE (lhs) != SSA_NAME
876 || !single_imm_use (lhs, &use, &use_stmt))
877 return stmt;
878 cond_stmt = dyn_cast <gcond *> (use_stmt);
879 if (!cond_stmt)
880 return stmt;
881 if (gimple_cond_lhs (cond_stmt) != lhs
882 || (gimple_cond_code (cond_stmt) != NE_EXPR
883 && gimple_cond_code (cond_stmt) != EQ_EXPR)
884 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
885 return stmt;
887 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
888 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
889 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
890 return stmt;
892 /* There is a conversion in between possibly inserted by fold. */
893 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
895 t = gimple_assign_rhs1 (stmt1);
896 if (TREE_CODE (t) != SSA_NAME
897 || !has_single_use (t))
898 return stmt;
899 stmt1 = SSA_NAME_DEF_STMT (t);
900 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
901 return stmt;
904 /* Verify that B is loop invariant but A is not. Verify that with
905 all the stmt walking we are still in the same loop. */
906 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
907 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
908 return stmt;
910 a = gimple_assign_rhs1 (stmt1);
911 b = gimple_assign_rhs2 (stmt1);
913 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
914 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
916 gimple_stmt_iterator rsi;
918 /* 1 << B */
919 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
920 build_int_cst (TREE_TYPE (a), 1), b);
921 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
922 stmt1 = gimple_build_assign (name, t);
924 /* A & (1 << B) */
925 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
926 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
927 stmt2 = gimple_build_assign (name, t);
929 /* Replace the SSA_NAME we compare against zero. Adjust
930 the type of zero accordingly. */
931 SET_USE (use, name);
932 gimple_cond_set_rhs (cond_stmt,
933 build_int_cst_type (TREE_TYPE (name),
934 0));
936 /* Don't use gsi_replace here, none of the new assignments sets
937 the variable originally set in stmt. Move bsi to stmt1, and
938 then remove the original stmt, so that we get a chance to
939 retain debug info for it. */
940 rsi = *bsi;
941 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
942 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
943 gimple *to_release = gsi_stmt (rsi);
944 gsi_remove (&rsi, true);
945 release_defs (to_release);
947 return stmt1;
950 return stmt;
953 /* For each statement determines the outermost loop in that it is invariant,
954 - statements on whose motion it depends and the cost of the computation.
955 - This information is stored to the LIM_DATA structure associated with
956 - each statement. */
957 class invariantness_dom_walker : public dom_walker
959 public:
960 invariantness_dom_walker (cdi_direction direction)
961 : dom_walker (direction) {}
963 virtual edge before_dom_children (basic_block);
966 /* Determine the outermost loops in that statements in basic block BB are
967 invariant, and record them to the LIM_DATA associated with the statements.
968 Callback for dom_walker. */
970 edge
971 invariantness_dom_walker::before_dom_children (basic_block bb)
973 enum move_pos pos;
974 gimple_stmt_iterator bsi;
975 gimple *stmt;
976 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
977 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
978 struct lim_aux_data *lim_data;
980 if (!loop_outer (bb->loop_father))
981 return NULL;
983 if (dump_file && (dump_flags & TDF_DETAILS))
984 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
985 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
987 /* Look at PHI nodes, but only if there is at most two.
988 ??? We could relax this further by post-processing the inserted
989 code and transforming adjacent cond-exprs with the same predicate
990 to control flow again. */
991 bsi = gsi_start_phis (bb);
992 if (!gsi_end_p (bsi)
993 && ((gsi_next (&bsi), gsi_end_p (bsi))
994 || (gsi_next (&bsi), gsi_end_p (bsi))))
995 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
997 stmt = gsi_stmt (bsi);
999 pos = movement_possibility (stmt);
1000 if (pos == MOVE_IMPOSSIBLE)
1001 continue;
1003 lim_data = init_lim_data (stmt);
1004 lim_data->always_executed_in = outermost;
1006 if (!determine_max_movement (stmt, false))
1008 lim_data->max_loop = NULL;
1009 continue;
1012 if (dump_file && (dump_flags & TDF_DETAILS))
1014 print_gimple_stmt (dump_file, stmt, 2);
1015 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1016 loop_depth (lim_data->max_loop),
1017 lim_data->cost);
1020 if (lim_data->cost >= LIM_EXPENSIVE)
1021 set_profitable_level (stmt);
1024 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1026 stmt = gsi_stmt (bsi);
1028 pos = movement_possibility (stmt);
1029 if (pos == MOVE_IMPOSSIBLE)
1031 if (nonpure_call_p (stmt))
1033 maybe_never = true;
1034 outermost = NULL;
1036 /* Make sure to note always_executed_in for stores to make
1037 store-motion work. */
1038 else if (stmt_makes_single_store (stmt))
1040 struct lim_aux_data *lim_data = init_lim_data (stmt);
1041 lim_data->always_executed_in = outermost;
1043 continue;
1046 if (is_gimple_assign (stmt)
1047 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1048 == GIMPLE_BINARY_RHS))
1050 tree op0 = gimple_assign_rhs1 (stmt);
1051 tree op1 = gimple_assign_rhs2 (stmt);
1052 struct loop *ol1 = outermost_invariant_loop (op1,
1053 loop_containing_stmt (stmt));
1055 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1056 to be hoisted out of loop, saving expensive divide. */
1057 if (pos == MOVE_POSSIBLE
1058 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1059 && flag_unsafe_math_optimizations
1060 && !flag_trapping_math
1061 && ol1 != NULL
1062 && outermost_invariant_loop (op0, ol1) == NULL)
1063 stmt = rewrite_reciprocal (&bsi);
1065 /* If the shift count is invariant, convert (A >> B) & 1 to
1066 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1067 saving an expensive shift. */
1068 if (pos == MOVE_POSSIBLE
1069 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1070 && integer_onep (op1)
1071 && TREE_CODE (op0) == SSA_NAME
1072 && has_single_use (op0))
1073 stmt = rewrite_bittest (&bsi);
1076 lim_data = init_lim_data (stmt);
1077 lim_data->always_executed_in = outermost;
1079 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1080 continue;
1082 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1084 lim_data->max_loop = NULL;
1085 continue;
1088 if (dump_file && (dump_flags & TDF_DETAILS))
1090 print_gimple_stmt (dump_file, stmt, 2);
1091 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1092 loop_depth (lim_data->max_loop),
1093 lim_data->cost);
1096 if (lim_data->cost >= LIM_EXPENSIVE)
1097 set_profitable_level (stmt);
1099 return NULL;
1102 class move_computations_dom_walker : public dom_walker
1104 public:
1105 move_computations_dom_walker (cdi_direction direction)
1106 : dom_walker (direction), todo_ (0) {}
1108 virtual edge before_dom_children (basic_block);
1110 unsigned int todo_;
1113 /* Hoist the statements in basic block BB out of the loops prescribed by
1114 data stored in LIM_DATA structures associated with each statement. Callback
1115 for walk_dominator_tree. */
1117 unsigned int
1118 move_computations_worker (basic_block bb)
1120 struct loop *level;
1121 unsigned cost = 0;
1122 struct lim_aux_data *lim_data;
1123 unsigned int todo = 0;
1125 if (!loop_outer (bb->loop_father))
1126 return todo;
1128 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1130 gassign *new_stmt;
1131 gphi *stmt = bsi.phi ();
1133 lim_data = get_lim_data (stmt);
1134 if (lim_data == NULL)
1136 gsi_next (&bsi);
1137 continue;
1140 cost = lim_data->cost;
1141 level = lim_data->tgt_loop;
1142 clear_lim_data (stmt);
1144 if (!level)
1146 gsi_next (&bsi);
1147 continue;
1150 if (dump_file && (dump_flags & TDF_DETAILS))
1152 fprintf (dump_file, "Moving PHI node\n");
1153 print_gimple_stmt (dump_file, stmt, 0);
1154 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1155 cost, level->num);
1158 if (gimple_phi_num_args (stmt) == 1)
1160 tree arg = PHI_ARG_DEF (stmt, 0);
1161 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1162 TREE_CODE (arg), arg);
1164 else
1166 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1167 gimple *cond = gsi_stmt (gsi_last_bb (dom));
1168 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1169 /* Get the PHI arguments corresponding to the true and false
1170 edges of COND. */
1171 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1172 gcc_assert (arg0 && arg1);
1173 t = build2 (gimple_cond_code (cond), boolean_type_node,
1174 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1175 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1176 COND_EXPR, t, arg0, arg1);
1177 todo |= TODO_cleanup_cfg;
1179 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1180 && (!ALWAYS_EXECUTED_IN (bb)
1181 || (ALWAYS_EXECUTED_IN (bb) != level
1182 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1184 tree lhs = gimple_assign_lhs (new_stmt);
1185 SSA_NAME_RANGE_INFO (lhs) = NULL;
1187 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1188 remove_phi_node (&bsi, false);
1191 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1193 edge e;
1195 gimple *stmt = gsi_stmt (bsi);
1197 lim_data = get_lim_data (stmt);
1198 if (lim_data == NULL)
1200 gsi_next (&bsi);
1201 continue;
1204 cost = lim_data->cost;
1205 level = lim_data->tgt_loop;
1206 clear_lim_data (stmt);
1208 if (!level)
1210 gsi_next (&bsi);
1211 continue;
1214 /* We do not really want to move conditionals out of the loop; we just
1215 placed it here to force its operands to be moved if necessary. */
1216 if (gimple_code (stmt) == GIMPLE_COND)
1217 continue;
1219 if (dump_file && (dump_flags & TDF_DETAILS))
1221 fprintf (dump_file, "Moving statement\n");
1222 print_gimple_stmt (dump_file, stmt, 0);
1223 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1224 cost, level->num);
1227 e = loop_preheader_edge (level);
1228 gcc_assert (!gimple_vdef (stmt));
1229 if (gimple_vuse (stmt))
1231 /* The new VUSE is the one from the virtual PHI in the loop
1232 header or the one already present. */
1233 gphi_iterator gsi2;
1234 for (gsi2 = gsi_start_phis (e->dest);
1235 !gsi_end_p (gsi2); gsi_next (&gsi2))
1237 gphi *phi = gsi2.phi ();
1238 if (virtual_operand_p (gimple_phi_result (phi)))
1240 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1241 break;
1245 gsi_remove (&bsi, false);
1246 if (gimple_has_lhs (stmt)
1247 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1248 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1249 && (!ALWAYS_EXECUTED_IN (bb)
1250 || !(ALWAYS_EXECUTED_IN (bb) == level
1251 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1253 tree lhs = gimple_get_lhs (stmt);
1254 SSA_NAME_RANGE_INFO (lhs) = NULL;
1256 /* In case this is a stmt that is not unconditionally executed
1257 when the target loop header is executed and the stmt may
1258 invoke undefined integer or pointer overflow rewrite it to
1259 unsigned arithmetic. */
1260 if (is_gimple_assign (stmt)
1261 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1262 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1263 && arith_code_with_undefined_signed_overflow
1264 (gimple_assign_rhs_code (stmt))
1265 && (!ALWAYS_EXECUTED_IN (bb)
1266 || !(ALWAYS_EXECUTED_IN (bb) == level
1267 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1268 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1269 else
1270 gsi_insert_on_edge (e, stmt);
1273 return todo;
1276 /* Hoist the statements out of the loops prescribed by data stored in
1277 LIM_DATA structures associated with each statement.*/
1279 static unsigned int
1280 move_computations (void)
1282 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1283 int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1284 unsigned todo = 0;
1286 for (int i = 0; i < n; ++i)
1287 todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1289 free (rpo);
1291 gsi_commit_edge_inserts ();
1292 if (need_ssa_update_p (cfun))
1293 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1295 return todo;
1298 /* Checks whether the statement defining variable *INDEX can be hoisted
1299 out of the loop passed in DATA. Callback for for_each_index. */
1301 static bool
1302 may_move_till (tree ref, tree *index, void *data)
1304 struct loop *loop = (struct loop *) data, *max_loop;
1306 /* If REF is an array reference, check also that the step and the lower
1307 bound is invariant in LOOP. */
1308 if (TREE_CODE (ref) == ARRAY_REF)
1310 tree step = TREE_OPERAND (ref, 3);
1311 tree lbound = TREE_OPERAND (ref, 2);
1313 max_loop = outermost_invariant_loop (step, loop);
1314 if (!max_loop)
1315 return false;
1317 max_loop = outermost_invariant_loop (lbound, loop);
1318 if (!max_loop)
1319 return false;
1322 max_loop = outermost_invariant_loop (*index, loop);
1323 if (!max_loop)
1324 return false;
1326 return true;
1329 /* If OP is SSA NAME, force the statement that defines it to be
1330 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1332 static void
1333 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1335 gimple *stmt;
1337 if (!op
1338 || is_gimple_min_invariant (op))
1339 return;
1341 gcc_assert (TREE_CODE (op) == SSA_NAME);
1343 stmt = SSA_NAME_DEF_STMT (op);
1344 if (gimple_nop_p (stmt))
1345 return;
1347 set_level (stmt, orig_loop, loop);
1350 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1351 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1352 for_each_index. */
1354 struct fmt_data
1356 struct loop *loop;
1357 struct loop *orig_loop;
1360 static bool
1361 force_move_till (tree ref, tree *index, void *data)
1363 struct fmt_data *fmt_data = (struct fmt_data *) data;
1365 if (TREE_CODE (ref) == ARRAY_REF)
1367 tree step = TREE_OPERAND (ref, 3);
1368 tree lbound = TREE_OPERAND (ref, 2);
1370 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1371 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1374 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1376 return true;
1379 /* A function to free the mem_ref object OBJ. */
1381 static void
1382 memref_free (struct im_mem_ref *mem)
1384 mem->accesses_in_loop.release ();
1387 /* Allocates and returns a memory reference description for MEM whose hash
1388 value is HASH and id is ID. */
1390 static im_mem_ref *
1391 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1393 im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1394 ao_ref_init (&ref->mem, mem);
1395 ref->id = id;
1396 ref->hash = hash;
1397 ref->stored = NULL;
1398 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1399 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1400 ref->accesses_in_loop.create (1);
1402 return ref;
1405 /* Records memory reference location *LOC in LOOP to the memory reference
1406 description REF. The reference occurs in statement STMT. */
1408 static void
1409 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1411 mem_ref_loc aref;
1412 aref.stmt = stmt;
1413 aref.ref = loc;
1414 ref->accesses_in_loop.safe_push (aref);
1417 /* Set the LOOP bit in REF stored bitmap and allocate that if
1418 necessary. Return whether a bit was changed. */
1420 static bool
1421 set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
1423 if (!ref->stored)
1424 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1425 return bitmap_set_bit (ref->stored, loop->num);
1428 /* Marks reference REF as stored in LOOP. */
1430 static void
1431 mark_ref_stored (im_mem_ref *ref, struct loop *loop)
1433 while (loop != current_loops->tree_root
1434 && set_ref_stored_in_loop (ref, loop))
1435 loop = loop_outer (loop);
1438 /* Gathers memory references in statement STMT in LOOP, storing the
1439 information about them in the memory_accesses structure. Marks
1440 the vops accessed through unrecognized statements there as
1441 well. */
1443 static void
1444 gather_mem_refs_stmt (struct loop *loop, gimple *stmt)
1446 tree *mem = NULL;
1447 hashval_t hash;
1448 im_mem_ref **slot;
1449 im_mem_ref *ref;
1450 bool is_stored;
1451 unsigned id;
1453 if (!gimple_vuse (stmt))
1454 return;
1456 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1457 if (!mem)
1459 /* We use the shared mem_ref for all unanalyzable refs. */
1460 id = UNANALYZABLE_MEM_ID;
1461 ref = memory_accesses.refs_list[id];
1462 if (dump_file && (dump_flags & TDF_DETAILS))
1464 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1465 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1467 is_stored = gimple_vdef (stmt);
1469 else
1471 hash = iterative_hash_expr (*mem, 0);
1472 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1473 if (*slot)
1475 ref = *slot;
1476 id = ref->id;
1478 else
1480 id = memory_accesses.refs_list.length ();
1481 ref = mem_ref_alloc (*mem, hash, id);
1482 memory_accesses.refs_list.safe_push (ref);
1483 *slot = ref;
1485 if (dump_file && (dump_flags & TDF_DETAILS))
1487 fprintf (dump_file, "Memory reference %u: ", id);
1488 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1489 fprintf (dump_file, "\n");
1493 record_mem_ref_loc (ref, stmt, mem);
1495 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1496 if (is_stored)
1498 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1499 mark_ref_stored (ref, loop);
1501 return;
1504 static unsigned *bb_loop_postorder;
1506 /* qsort sort function to sort blocks after their loop fathers postorder. */
1508 static int
1509 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1511 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1512 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1513 struct loop *loop1 = bb1->loop_father;
1514 struct loop *loop2 = bb2->loop_father;
1515 if (loop1->num == loop2->num)
1516 return 0;
1517 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1520 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1522 static int
1523 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1525 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1526 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1527 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1528 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1529 if (loop1->num == loop2->num)
1530 return 0;
1531 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1534 /* Gathers memory references in loops. */
1536 static void
1537 analyze_memory_references (void)
1539 gimple_stmt_iterator bsi;
1540 basic_block bb, *bbs;
1541 struct loop *loop, *outer;
1542 unsigned i, n;
1544 /* Collect all basic-blocks in loops and sort them after their
1545 loops postorder. */
1546 i = 0;
1547 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1548 FOR_EACH_BB_FN (bb, cfun)
1549 if (bb->loop_father != current_loops->tree_root)
1550 bbs[i++] = bb;
1551 n = i;
1552 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1554 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1555 That results in better locality for all the bitmaps. */
1556 for (i = 0; i < n; ++i)
1558 basic_block bb = bbs[i];
1559 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1560 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1563 /* Sort the location list of gathered memory references after their
1564 loop postorder number. */
1565 im_mem_ref *ref;
1566 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1567 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1569 free (bbs);
1570 // free (bb_loop_postorder);
1572 /* Propagate the information about accessed memory references up
1573 the loop hierarchy. */
1574 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1576 /* Finalize the overall touched references (including subloops). */
1577 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1578 &memory_accesses.refs_stored_in_loop[loop->num]);
1580 /* Propagate the information about accessed memory references up
1581 the loop hierarchy. */
1582 outer = loop_outer (loop);
1583 if (outer == current_loops->tree_root)
1584 continue;
1586 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1587 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1591 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1592 tree_to_aff_combination_expand. */
1594 static bool
1595 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1596 hash_map<tree, name_expansion *> **ttae_cache)
1598 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1599 object and their offset differ in such a way that the locations cannot
1600 overlap, then they cannot alias. */
1601 widest_int size1, size2;
1602 aff_tree off1, off2;
1604 /* Perform basic offset and type-based disambiguation. */
1605 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1606 return false;
1608 /* The expansion of addresses may be a bit expensive, thus we only do
1609 the check at -O2 and higher optimization levels. */
1610 if (optimize < 2)
1611 return true;
1613 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1614 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1615 aff_combination_expand (&off1, ttae_cache);
1616 aff_combination_expand (&off2, ttae_cache);
1617 aff_combination_scale (&off1, -1);
1618 aff_combination_add (&off2, &off1);
1620 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1621 return false;
1623 return true;
1626 /* Compare function for bsearch searching for reference locations
1627 in a loop. */
1629 static int
1630 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1632 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1633 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1634 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1635 if (loop->num == loc_loop->num
1636 || flow_loop_nested_p (loop, loc_loop))
1637 return 0;
1638 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1639 ? -1 : 1);
1642 /* Iterates over all locations of REF in LOOP and its subloops calling
1643 fn.operator() with the location as argument. When that operator
1644 returns true the iteration is stopped and true is returned.
1645 Otherwise false is returned. */
1647 template <typename FN>
1648 static bool
1649 for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn)
1651 unsigned i;
1652 mem_ref_loc *loc;
1654 /* Search for the cluster of locs in the accesses_in_loop vector
1655 which is sorted after postorder index of the loop father. */
1656 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1657 if (!loc)
1658 return false;
1660 /* We have found one location inside loop or its sub-loops. Iterate
1661 both forward and backward to cover the whole cluster. */
1662 i = loc - ref->accesses_in_loop.address ();
1663 while (i > 0)
1665 --i;
1666 mem_ref_loc *l = &ref->accesses_in_loop[i];
1667 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1668 break;
1669 if (fn (l))
1670 return true;
1672 for (i = loc - ref->accesses_in_loop.address ();
1673 i < ref->accesses_in_loop.length (); ++i)
1675 mem_ref_loc *l = &ref->accesses_in_loop[i];
1676 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1677 break;
1678 if (fn (l))
1679 return true;
1682 return false;
1685 /* Rewrites location LOC by TMP_VAR. */
1687 struct rewrite_mem_ref_loc
1689 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1690 bool operator () (mem_ref_loc *loc);
1691 tree tmp_var;
1694 bool
1695 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1697 *loc->ref = tmp_var;
1698 update_stmt (loc->stmt);
1699 return false;
1702 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1704 static void
1705 rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var)
1707 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1710 /* Stores the first reference location in LOCP. */
1712 struct first_mem_ref_loc_1
1714 first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1715 bool operator () (mem_ref_loc *loc);
1716 mem_ref_loc **locp;
1719 bool
1720 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1722 *locp = loc;
1723 return true;
1726 /* Returns the first reference location to REF in LOOP. */
1728 static mem_ref_loc *
1729 first_mem_ref_loc (struct loop *loop, im_mem_ref *ref)
1731 mem_ref_loc *locp = NULL;
1732 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1733 return locp;
1736 struct prev_flag_edges {
1737 /* Edge to insert new flag comparison code. */
1738 edge append_cond_position;
1740 /* Edge for fall through from previous flag comparison. */
1741 edge last_cond_fallthru;
1744 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1745 MEM along edge EX.
1747 The store is only done if MEM has changed. We do this so no
1748 changes to MEM occur on code paths that did not originally store
1749 into it.
1751 The common case for execute_sm will transform:
1753 for (...) {
1754 if (foo)
1755 stuff;
1756 else
1757 MEM = TMP_VAR;
1760 into:
1762 lsm = MEM;
1763 for (...) {
1764 if (foo)
1765 stuff;
1766 else
1767 lsm = TMP_VAR;
1769 MEM = lsm;
1771 This function will generate:
1773 lsm = MEM;
1775 lsm_flag = false;
1777 for (...) {
1778 if (foo)
1779 stuff;
1780 else {
1781 lsm = TMP_VAR;
1782 lsm_flag = true;
1785 if (lsm_flag) <--
1786 MEM = lsm; <--
1789 static void
1790 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1791 edge preheader, hash_set <basic_block> *flag_bbs)
1793 basic_block new_bb, then_bb, old_dest;
1794 bool loop_has_only_one_exit;
1795 edge then_old_edge, orig_ex = ex;
1796 gimple_stmt_iterator gsi;
1797 gimple *stmt;
1798 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1799 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1801 int freq_sum = 0;
1802 profile_count count_sum = profile_count::zero ();
1803 int nbbs = 0, ncount = 0;
1804 int flag_probability = -1;
1806 /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1807 at loop exit.
1809 This code may look fancy, but it can not update profile very realistically
1810 because we do not know the probability that flag will be true at given
1811 loop exit.
1813 We look for two interesting extremes
1814 - when exit is dominated by block setting the flag, we know it will
1815 always be true. This is a common case.
1816 - when all blocks setting the flag have very low frequency we know
1817 it will likely be false.
1818 In all other cases we default to 2/3 for flag being true. */
1820 for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1821 it != flag_bbs->end (); ++it)
1823 freq_sum += (*it)->frequency;
1824 if ((*it)->count.initialized_p ())
1825 count_sum += (*it)->count, ncount ++;
1826 if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1827 flag_probability = REG_BR_PROB_BASE;
1828 nbbs++;
1831 if (flag_probability != -1)
1833 else if (ncount == nbbs && count_sum > 0 && preheader->count >= count_sum)
1835 flag_probability = count_sum.probability_in (preheader->count);
1836 if (flag_probability > REG_BR_PROB_BASE * 2 / 3)
1837 flag_probability = REG_BR_PROB_BASE * 2 / 3;
1839 else if (freq_sum > 0 && EDGE_FREQUENCY (preheader) >= freq_sum)
1841 flag_probability = GCOV_COMPUTE_SCALE (freq_sum,
1842 EDGE_FREQUENCY (preheader));
1843 if (flag_probability > REG_BR_PROB_BASE * 2 / 3)
1844 flag_probability = REG_BR_PROB_BASE * 2 / 3;
1846 else
1847 flag_probability = REG_BR_PROB_BASE * 2 / 3;
1849 /* ?? Insert store after previous store if applicable. See note
1850 below. */
1851 if (prev_edges)
1852 ex = prev_edges->append_cond_position;
1854 loop_has_only_one_exit = single_pred_p (ex->dest);
1856 if (loop_has_only_one_exit)
1857 ex = split_block_after_labels (ex->dest);
1858 else
1860 for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1861 !gsi_end_p (gpi); gsi_next (&gpi))
1863 gphi *phi = gpi.phi ();
1864 if (virtual_operand_p (gimple_phi_result (phi)))
1865 continue;
1867 /* When the destination has a non-virtual PHI node with multiple
1868 predecessors make sure we preserve the PHI structure by
1869 forcing a forwarder block so that hoisting of that PHI will
1870 still work. */
1871 split_edge (ex);
1872 break;
1876 old_dest = ex->dest;
1877 new_bb = split_edge (ex);
1878 then_bb = create_empty_bb (new_bb);
1879 then_bb->frequency = apply_probability (new_bb->frequency, flag_probability);
1880 then_bb->count = new_bb->count.apply_probability (flag_probability);
1881 if (irr)
1882 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1883 add_bb_to_loop (then_bb, new_bb->loop_father);
1885 gsi = gsi_start_bb (new_bb);
1886 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1887 NULL_TREE, NULL_TREE);
1888 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1890 gsi = gsi_start_bb (then_bb);
1891 /* Insert actual store. */
1892 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1893 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1895 edge e1 = single_succ_edge (new_bb);
1896 edge e2 = make_edge (new_bb, then_bb,
1897 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1898 e2->probability = flag_probability;
1899 e2->count = then_bb->count;
1901 e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1902 e1->flags &= ~EDGE_FALLTHRU;
1904 e1->probability = REG_BR_PROB_BASE - flag_probability;
1905 e1->count = new_bb->count - then_bb->count;
1907 then_old_edge = make_edge (then_bb, old_dest,
1908 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1909 then_old_edge->probability = REG_BR_PROB_BASE;
1910 then_old_edge->count = then_bb->count;
1912 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1914 if (prev_edges)
1916 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1917 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1918 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1919 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1920 recompute_dominator (CDI_DOMINATORS, old_dest));
1923 /* ?? Because stores may alias, they must happen in the exact
1924 sequence they originally happened. Save the position right after
1925 the (_lsm) store we just created so we can continue appending after
1926 it and maintain the original order. */
1928 struct prev_flag_edges *p;
1930 if (orig_ex->aux)
1931 orig_ex->aux = NULL;
1932 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1933 p = (struct prev_flag_edges *) orig_ex->aux;
1934 p->append_cond_position = then_old_edge;
1935 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1936 orig_ex->aux = (void *) p;
1939 if (!loop_has_only_one_exit)
1940 for (gphi_iterator gpi = gsi_start_phis (old_dest);
1941 !gsi_end_p (gpi); gsi_next (&gpi))
1943 gphi *phi = gpi.phi ();
1944 unsigned i;
1946 for (i = 0; i < gimple_phi_num_args (phi); i++)
1947 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1949 tree arg = gimple_phi_arg_def (phi, i);
1950 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1951 update_stmt (phi);
1956 /* When REF is set on the location, set flag indicating the store. */
1958 struct sm_set_flag_if_changed
1960 sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
1961 : flag (flag_), bbs (bbs_) {}
1962 bool operator () (mem_ref_loc *loc);
1963 tree flag;
1964 hash_set <basic_block> *bbs;
1967 bool
1968 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
1970 /* Only set the flag for writes. */
1971 if (is_gimple_assign (loc->stmt)
1972 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1974 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1975 gimple *stmt = gimple_build_assign (flag, boolean_true_node);
1976 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1977 bbs->add (gimple_bb (stmt));
1979 return false;
1982 /* Helper function for execute_sm. On every location where REF is
1983 set, set an appropriate flag indicating the store. */
1985 static tree
1986 execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref,
1987 hash_set <basic_block> *bbs)
1989 tree flag;
1990 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1991 flag = create_tmp_reg (boolean_type_node, str);
1992 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
1993 return flag;
1996 /* Executes store motion of memory reference REF from LOOP.
1997 Exits from the LOOP are stored in EXITS. The initialization of the
1998 temporary variable is put to the preheader of the loop, and assignments
1999 to the reference from the temporary variable are emitted to exits. */
2001 static void
2002 execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
2004 tree tmp_var, store_flag = NULL_TREE;
2005 unsigned i;
2006 gassign *load;
2007 struct fmt_data fmt_data;
2008 edge ex;
2009 struct lim_aux_data *lim_data;
2010 bool multi_threaded_model_p = false;
2011 gimple_stmt_iterator gsi;
2012 hash_set<basic_block> flag_bbs;
2014 if (dump_file && (dump_flags & TDF_DETAILS))
2016 fprintf (dump_file, "Executing store motion of ");
2017 print_generic_expr (dump_file, ref->mem.ref);
2018 fprintf (dump_file, " from loop %d\n", loop->num);
2021 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2022 get_lsm_tmp_name (ref->mem.ref, ~0));
2024 fmt_data.loop = loop;
2025 fmt_data.orig_loop = loop;
2026 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2028 if (bb_in_transaction (loop_preheader_edge (loop)->src)
2029 || (! PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES)
2030 && ! ref_always_accessed_p (loop, ref, true)))
2031 multi_threaded_model_p = true;
2033 if (multi_threaded_model_p)
2034 store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2036 rewrite_mem_refs (loop, ref, tmp_var);
2038 /* Emit the load code on a random exit edge or into the latch if
2039 the loop does not exit, so that we are sure it will be processed
2040 by move_computations after all dependencies. */
2041 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2043 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2044 load altogether, since the store is predicated by a flag. We
2045 could, do the load only if it was originally in the loop. */
2046 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2047 lim_data = init_lim_data (load);
2048 lim_data->max_loop = loop;
2049 lim_data->tgt_loop = loop;
2050 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2052 if (multi_threaded_model_p)
2054 load = gimple_build_assign (store_flag, boolean_false_node);
2055 lim_data = init_lim_data (load);
2056 lim_data->max_loop = loop;
2057 lim_data->tgt_loop = loop;
2058 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2061 /* Sink the store to every exit from the loop. */
2062 FOR_EACH_VEC_ELT (exits, i, ex)
2063 if (!multi_threaded_model_p)
2065 gassign *store;
2066 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2067 gsi_insert_on_edge (ex, store);
2069 else
2070 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2071 loop_preheader_edge (loop), &flag_bbs);
2074 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2075 edges of the LOOP. */
2077 static void
2078 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2079 vec<edge> exits)
2081 im_mem_ref *ref;
2082 unsigned i;
2083 bitmap_iterator bi;
2085 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2087 ref = memory_accesses.refs_list[i];
2088 execute_sm (loop, exits, ref);
2092 struct ref_always_accessed
2094 ref_always_accessed (struct loop *loop_, bool stored_p_)
2095 : loop (loop_), stored_p (stored_p_) {}
2096 bool operator () (mem_ref_loc *loc);
2097 struct loop *loop;
2098 bool stored_p;
2101 bool
2102 ref_always_accessed::operator () (mem_ref_loc *loc)
2104 struct loop *must_exec;
2106 if (!get_lim_data (loc->stmt))
2107 return false;
2109 /* If we require an always executed store make sure the statement
2110 stores to the reference. */
2111 if (stored_p)
2113 tree lhs = gimple_get_lhs (loc->stmt);
2114 if (!lhs
2115 || lhs != *loc->ref)
2116 return false;
2119 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2120 if (!must_exec)
2121 return false;
2123 if (must_exec == loop
2124 || flow_loop_nested_p (must_exec, loop))
2125 return true;
2127 return false;
2130 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2131 make sure REF is always stored to in LOOP. */
2133 static bool
2134 ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p)
2136 return for_all_locs_in_loop (loop, ref,
2137 ref_always_accessed (loop, stored_p));
2140 /* Returns true if REF1 and REF2 are independent. */
2142 static bool
2143 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2145 if (ref1 == ref2)
2146 return true;
2148 if (dump_file && (dump_flags & TDF_DETAILS))
2149 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2150 ref1->id, ref2->id);
2152 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2154 if (dump_file && (dump_flags & TDF_DETAILS))
2155 fprintf (dump_file, "dependent.\n");
2156 return false;
2158 else
2160 if (dump_file && (dump_flags & TDF_DETAILS))
2161 fprintf (dump_file, "independent.\n");
2162 return true;
2166 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2167 and its super-loops. */
2169 static void
2170 record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
2172 /* We can propagate dependent-in-loop bits up the loop
2173 hierarchy to all outer loops. */
2174 while (loop != current_loops->tree_root
2175 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2176 loop = loop_outer (loop);
2179 /* Returns true if REF is independent on all other memory
2180 references in LOOP. REF_LOOP is where REF is accessed, SAFELEN is the
2181 safelen to apply. */
2183 static bool
2184 ref_indep_loop_p_1 (int safelen, struct loop *loop, im_mem_ref *ref,
2185 bool stored_p, struct loop *ref_loop)
2187 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2189 if (loop->safelen > safelen
2190 /* Check that REF is accessed inside LOOP. */
2191 && (loop == ref_loop || flow_loop_nested_p (loop, ref_loop)))
2192 safelen = loop->safelen;
2194 bool indep_p = true;
2195 bitmap refs_to_check;
2197 if (stored_p)
2198 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2199 else
2200 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2202 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2203 indep_p = false;
2204 else if (safelen > 1)
2206 if (dump_file && (dump_flags & TDF_DETAILS))
2208 fprintf (dump_file,"REF is independent due to safelen %d\n",
2209 safelen);
2210 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
2211 fprintf (dump_file, "\n");
2214 /* We need to recurse to properly handle UNANALYZABLE_MEM_ID. */
2215 struct loop *inner = loop->inner;
2216 while (inner)
2218 if (!ref_indep_loop_p_1 (safelen, inner, ref, stored_p, ref_loop))
2220 indep_p = false;
2221 break;
2223 inner = inner->next;
2226 /* Avoid caching here as safelen depends on context and refs
2227 are shared between different contexts. */
2228 return indep_p;
2230 else
2232 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2233 return true;
2234 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2235 return false;
2237 struct loop *inner = loop->inner;
2238 while (inner)
2240 if (!ref_indep_loop_p_1 (safelen, inner, ref, stored_p, ref_loop))
2242 indep_p = false;
2243 break;
2245 inner = inner->next;
2248 if (indep_p)
2250 unsigned i;
2251 bitmap_iterator bi;
2252 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2254 im_mem_ref *aref = memory_accesses.refs_list[i];
2255 if (!refs_independent_p (ref, aref))
2257 indep_p = false;
2258 break;
2264 if (dump_file && (dump_flags & TDF_DETAILS))
2265 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2266 ref->id, loop->num, indep_p ? "independent" : "dependent");
2268 /* Record the computed result in the cache. */
2269 if (indep_p)
2271 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2272 && stored_p)
2274 /* If it's independend against all refs then it's independent
2275 against stores, too. */
2276 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2279 else
2281 record_dep_loop (loop, ref, stored_p);
2282 if (!stored_p)
2284 /* If it's dependent against stores it's dependent against
2285 all refs, too. */
2286 record_dep_loop (loop, ref, true);
2290 return indep_p;
2293 /* Returns true if REF is independent on all other memory references in
2294 LOOP. REF_LOOP is the loop where REF is accessed. */
2296 static bool
2297 ref_indep_loop_p (struct loop *loop, im_mem_ref *ref, struct loop *ref_loop)
2299 gcc_checking_assert (MEM_ANALYZABLE (ref));
2301 return ref_indep_loop_p_1 (0, loop, ref, false, ref_loop);
2304 /* Returns true if we can perform store motion of REF from LOOP. */
2306 static bool
2307 can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
2309 tree base;
2311 /* Can't hoist unanalyzable refs. */
2312 if (!MEM_ANALYZABLE (ref))
2313 return false;
2315 /* It should be movable. */
2316 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2317 || TREE_THIS_VOLATILE (ref->mem.ref)
2318 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2319 return false;
2321 /* If it can throw fail, we do not properly update EH info. */
2322 if (tree_could_throw_p (ref->mem.ref))
2323 return false;
2325 /* If it can trap, it must be always executed in LOOP.
2326 Readonly memory locations may trap when storing to them, but
2327 tree_could_trap_p is a predicate for rvalues, so check that
2328 explicitly. */
2329 base = get_base_address (ref->mem.ref);
2330 if ((tree_could_trap_p (ref->mem.ref)
2331 || (DECL_P (base) && TREE_READONLY (base)))
2332 && !ref_always_accessed_p (loop, ref, true))
2333 return false;
2335 /* And it must be independent on all other memory references
2336 in LOOP. */
2337 if (!ref_indep_loop_p (loop, ref, loop))
2338 return false;
2340 return true;
2343 /* Marks the references in LOOP for that store motion should be performed
2344 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2345 motion was performed in one of the outer loops. */
2347 static void
2348 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2350 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2351 unsigned i;
2352 bitmap_iterator bi;
2353 im_mem_ref *ref;
2355 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2357 ref = memory_accesses.refs_list[i];
2358 if (can_sm_ref_p (loop, ref))
2359 bitmap_set_bit (refs_to_sm, i);
2363 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2364 for a store motion optimization (i.e. whether we can insert statement
2365 on its exits). */
2367 static bool
2368 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2369 vec<edge> exits)
2371 unsigned i;
2372 edge ex;
2374 FOR_EACH_VEC_ELT (exits, i, ex)
2375 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2376 return false;
2378 return true;
2381 /* Try to perform store motion for all memory references modified inside
2382 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2383 store motion was executed in one of the outer loops. */
2385 static void
2386 store_motion_loop (struct loop *loop, bitmap sm_executed)
2388 vec<edge> exits = get_loop_exit_edges (loop);
2389 struct loop *subloop;
2390 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2392 if (loop_suitable_for_sm (loop, exits))
2394 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2395 hoist_memory_references (loop, sm_in_loop, exits);
2397 exits.release ();
2399 bitmap_ior_into (sm_executed, sm_in_loop);
2400 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2401 store_motion_loop (subloop, sm_executed);
2402 bitmap_and_compl_into (sm_executed, sm_in_loop);
2403 BITMAP_FREE (sm_in_loop);
2406 /* Try to perform store motion for all memory references modified inside
2407 loops. */
2409 static void
2410 store_motion (void)
2412 struct loop *loop;
2413 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2415 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2416 store_motion_loop (loop, sm_executed);
2418 BITMAP_FREE (sm_executed);
2419 gsi_commit_edge_inserts ();
2422 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2423 for each such basic block bb records the outermost loop for that execution
2424 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2425 blocks that contain a nonpure call. */
2427 static void
2428 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2430 basic_block bb = NULL, *bbs, last = NULL;
2431 unsigned i;
2432 edge e;
2433 struct loop *inn_loop = loop;
2435 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2437 bbs = get_loop_body_in_dom_order (loop);
2439 for (i = 0; i < loop->num_nodes; i++)
2441 edge_iterator ei;
2442 bb = bbs[i];
2444 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2445 last = bb;
2447 if (bitmap_bit_p (contains_call, bb->index))
2448 break;
2450 FOR_EACH_EDGE (e, ei, bb->succs)
2452 /* If there is an exit from this BB. */
2453 if (!flow_bb_inside_loop_p (loop, e->dest))
2454 break;
2455 /* Or we enter a possibly non-finite loop. */
2456 if (flow_loop_nested_p (bb->loop_father,
2457 e->dest->loop_father)
2458 && ! finite_loop_p (e->dest->loop_father))
2459 break;
2461 if (e)
2462 break;
2464 /* A loop might be infinite (TODO use simple loop analysis
2465 to disprove this if possible). */
2466 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2467 break;
2469 if (!flow_bb_inside_loop_p (inn_loop, bb))
2470 break;
2472 if (bb->loop_father->header == bb)
2474 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2475 break;
2477 /* In a loop that is always entered we may proceed anyway.
2478 But record that we entered it and stop once we leave it. */
2479 inn_loop = bb->loop_father;
2483 while (1)
2485 SET_ALWAYS_EXECUTED_IN (last, loop);
2486 if (last == loop->header)
2487 break;
2488 last = get_immediate_dominator (CDI_DOMINATORS, last);
2491 free (bbs);
2494 for (loop = loop->inner; loop; loop = loop->next)
2495 fill_always_executed_in_1 (loop, contains_call);
2498 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2499 for each such basic block bb records the outermost loop for that execution
2500 of its header implies execution of bb. */
2502 static void
2503 fill_always_executed_in (void)
2505 basic_block bb;
2506 struct loop *loop;
2508 auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2509 bitmap_clear (contains_call);
2510 FOR_EACH_BB_FN (bb, cfun)
2512 gimple_stmt_iterator gsi;
2513 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2515 if (nonpure_call_p (gsi_stmt (gsi)))
2516 break;
2519 if (!gsi_end_p (gsi))
2520 bitmap_set_bit (contains_call, bb->index);
2523 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2524 fill_always_executed_in_1 (loop, contains_call);
2528 /* Compute the global information needed by the loop invariant motion pass. */
2530 static void
2531 tree_ssa_lim_initialize (void)
2533 struct loop *loop;
2534 unsigned i;
2536 bitmap_obstack_initialize (&lim_bitmap_obstack);
2537 gcc_obstack_init (&mem_ref_obstack);
2538 lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2540 if (flag_tm)
2541 compute_transaction_bits ();
2543 alloc_aux_for_edges (0);
2545 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2546 memory_accesses.refs_list.create (100);
2547 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2548 memory_accesses.refs_list.quick_push
2549 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2551 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2552 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2553 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2554 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2555 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2556 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2558 for (i = 0; i < number_of_loops (cfun); i++)
2560 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2561 &lim_bitmap_obstack);
2562 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2563 &lim_bitmap_obstack);
2564 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2565 &lim_bitmap_obstack);
2568 memory_accesses.ttae_cache = NULL;
2570 /* Initialize bb_loop_postorder with a mapping from loop->num to
2571 its postorder index. */
2572 i = 0;
2573 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2574 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2575 bb_loop_postorder[loop->num] = i++;
2578 /* Cleans up after the invariant motion pass. */
2580 static void
2581 tree_ssa_lim_finalize (void)
2583 basic_block bb;
2584 unsigned i;
2585 im_mem_ref *ref;
2587 free_aux_for_edges ();
2589 FOR_EACH_BB_FN (bb, cfun)
2590 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2592 bitmap_obstack_release (&lim_bitmap_obstack);
2593 delete lim_aux_data_map;
2595 delete memory_accesses.refs;
2596 memory_accesses.refs = NULL;
2598 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2599 memref_free (ref);
2600 memory_accesses.refs_list.release ();
2601 obstack_free (&mem_ref_obstack, NULL);
2603 memory_accesses.refs_in_loop.release ();
2604 memory_accesses.refs_stored_in_loop.release ();
2605 memory_accesses.all_refs_stored_in_loop.release ();
2607 if (memory_accesses.ttae_cache)
2608 free_affine_expand_cache (&memory_accesses.ttae_cache);
2610 free (bb_loop_postorder);
2613 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2614 i.e. those that are likely to be win regardless of the register pressure. */
2616 static unsigned int
2617 tree_ssa_lim (void)
2619 unsigned int todo;
2621 tree_ssa_lim_initialize ();
2623 /* Gathers information about memory accesses in the loops. */
2624 analyze_memory_references ();
2626 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2627 fill_always_executed_in ();
2629 /* For each statement determine the outermost loop in that it is
2630 invariant and cost for computing the invariant. */
2631 invariantness_dom_walker (CDI_DOMINATORS)
2632 .walk (cfun->cfg->x_entry_block_ptr);
2634 /* Execute store motion. Force the necessary invariants to be moved
2635 out of the loops as well. */
2636 store_motion ();
2638 /* Move the expressions that are expensive enough. */
2639 todo = move_computations ();
2641 tree_ssa_lim_finalize ();
2643 return todo;
2646 /* Loop invariant motion pass. */
2648 namespace {
2650 const pass_data pass_data_lim =
2652 GIMPLE_PASS, /* type */
2653 "lim", /* name */
2654 OPTGROUP_LOOP, /* optinfo_flags */
2655 TV_LIM, /* tv_id */
2656 PROP_cfg, /* properties_required */
2657 0, /* properties_provided */
2658 0, /* properties_destroyed */
2659 0, /* todo_flags_start */
2660 0, /* todo_flags_finish */
2663 class pass_lim : public gimple_opt_pass
2665 public:
2666 pass_lim (gcc::context *ctxt)
2667 : gimple_opt_pass (pass_data_lim, ctxt)
2670 /* opt_pass methods: */
2671 opt_pass * clone () { return new pass_lim (m_ctxt); }
2672 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2673 virtual unsigned int execute (function *);
2675 }; // class pass_lim
2677 unsigned int
2678 pass_lim::execute (function *fun)
2680 bool in_loop_pipeline = scev_initialized_p ();
2681 if (!in_loop_pipeline)
2682 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2684 if (number_of_loops (fun) <= 1)
2685 return 0;
2686 unsigned int todo = tree_ssa_lim ();
2688 if (!in_loop_pipeline)
2689 loop_optimizer_finalize ();
2690 return todo;
2693 } // anon namespace
2695 gimple_opt_pass *
2696 make_pass_lim (gcc::context *ctxt)
2698 return new pass_lim (ctxt);