Add testcase of PR c++/92542, already fixed.
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob3e64ae71944982ffd13412f0c54513a00e731aa1
1 /* Loop invariant motion.
2 Copyright (C) 2003-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "tree-affine.h"
42 #include "tree-ssa-propagate.h"
43 #include "trans-mem.h"
44 #include "gimple-fold.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "alias.h"
48 #include "builtins.h"
49 #include "tree-dfa.h"
51 /* TODO: Support for predicated code motion. I.e.
53 while (1)
55 if (cond)
57 a = inv;
58 something;
62 Where COND and INV are invariants, but evaluating INV may trap or be
63 invalid from some other reason if !COND. This may be transformed to
65 if (cond)
66 a = inv;
67 while (1)
69 if (cond)
70 something;
71 } */
73 /* The auxiliary data kept for each statement. */
75 struct lim_aux_data
77 class loop *max_loop; /* The outermost loop in that the statement
78 is invariant. */
80 class loop *tgt_loop; /* The loop out of that we want to move the
81 invariant. */
83 class loop *always_executed_in;
84 /* The outermost loop for that we are sure
85 the statement is executed if the loop
86 is entered. */
88 unsigned cost; /* Cost of the computation performed by the
89 statement. */
91 unsigned ref; /* The simple_mem_ref in this stmt or 0. */
93 vec<gimple *> depends; /* Vector of statements that must be also
94 hoisted out of the loop when this statement
95 is hoisted; i.e. those that define the
96 operands of the statement and are inside of
97 the MAX_LOOP loop. */
100 /* Maps statements to their lim_aux_data. */
102 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
104 /* Description of a memory reference location. */
106 struct mem_ref_loc
108 tree *ref; /* The reference itself. */
109 gimple *stmt; /* The statement in that it occurs. */
113 /* Description of a memory reference. */
115 class im_mem_ref
117 public:
118 unsigned id : 30; /* ID assigned to the memory reference
119 (its index in memory_accesses.refs_list) */
120 unsigned ref_canonical : 1; /* Whether mem.ref was canonicalized. */
121 unsigned ref_decomposed : 1; /* Whether the ref was hashed from mem. */
122 hashval_t hash; /* Its hash value. */
124 /* The memory access itself and associated caching of alias-oracle
125 query meta-data. */
126 ao_ref mem;
128 bitmap stored; /* The set of loops in that this memory location
129 is stored to. */
130 vec<mem_ref_loc> accesses_in_loop;
131 /* The locations of the accesses. Vector
132 indexed by the loop number. */
134 /* The following sets are computed on demand. We keep both set and
135 its complement, so that we know whether the information was
136 already computed or not. */
137 bitmap_head indep_loop; /* The set of loops in that the memory
138 reference is independent, meaning:
139 If it is stored in the loop, this store
140 is independent on all other loads and
141 stores.
142 If it is only loaded, then it is independent
143 on all stores in the loop. */
144 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148 to record (in)dependence against stores in the loop and its subloops, the
149 second to record (in)dependence against all references in the loop
150 and its subloops. */
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
153 /* Mem_ref hashtable helpers. */
155 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
157 typedef ao_ref *compare_type;
158 static inline hashval_t hash (const im_mem_ref *);
159 static inline bool equal (const im_mem_ref *, const ao_ref *);
162 /* A hash function for class im_mem_ref object OBJ. */
164 inline hashval_t
165 mem_ref_hasher::hash (const im_mem_ref *mem)
167 return mem->hash;
170 /* An equality function for class im_mem_ref object MEM1 with
171 memory reference OBJ2. */
173 inline bool
174 mem_ref_hasher::equal (const im_mem_ref *mem1, const ao_ref *obj2)
176 if (obj2->max_size_known_p ())
177 return (mem1->ref_decomposed
178 && operand_equal_p (mem1->mem.base, obj2->base, 0)
179 && known_eq (mem1->mem.offset, obj2->offset)
180 && known_eq (mem1->mem.size, obj2->size)
181 && known_eq (mem1->mem.max_size, obj2->max_size)
182 && mem1->mem.volatile_p == obj2->volatile_p
183 && (mem1->mem.ref_alias_set == obj2->ref_alias_set
184 /* We are not canonicalizing alias-sets but for the
185 special-case we didn't canonicalize yet and the
186 incoming ref is a alias-set zero MEM we pick
187 the correct one already. */
188 || (!mem1->ref_canonical
189 && (TREE_CODE (obj2->ref) == MEM_REF
190 || TREE_CODE (obj2->ref) == TARGET_MEM_REF)
191 && obj2->ref_alias_set == 0)
192 /* Likewise if there's a canonical ref with alias-set zero. */
193 || (mem1->ref_canonical && mem1->mem.ref_alias_set == 0))
194 && types_compatible_p (TREE_TYPE (mem1->mem.ref),
195 TREE_TYPE (obj2->ref)));
196 else
197 return operand_equal_p (mem1->mem.ref, obj2->ref, 0);
201 /* Description of memory accesses in loops. */
203 static struct
205 /* The hash table of memory references accessed in loops. */
206 hash_table<mem_ref_hasher> *refs;
208 /* The list of memory references. */
209 vec<im_mem_ref *> refs_list;
211 /* The set of memory references accessed in each loop. */
212 vec<bitmap_head> refs_in_loop;
214 /* The set of memory references stored in each loop. */
215 vec<bitmap_head> refs_stored_in_loop;
217 /* The set of memory references stored in each loop, including subloops . */
218 vec<bitmap_head> all_refs_stored_in_loop;
220 /* Cache for expanding memory addresses. */
221 hash_map<tree, name_expansion *> *ttae_cache;
222 } memory_accesses;
224 /* Obstack for the bitmaps in the above data structures. */
225 static bitmap_obstack lim_bitmap_obstack;
226 static obstack mem_ref_obstack;
228 static bool ref_indep_loop_p (class loop *, im_mem_ref *);
229 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
231 /* Minimum cost of an expensive expression. */
232 #define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
234 /* The outermost loop for which execution of the header guarantees that the
235 block will be executed. */
236 #define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
239 /* ID of the shared unanalyzable mem. */
240 #define UNANALYZABLE_MEM_ID 0
242 /* Whether the reference was analyzable. */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
245 static struct lim_aux_data *
246 init_lim_data (gimple *stmt)
248 lim_aux_data *p = XCNEW (struct lim_aux_data);
249 lim_aux_data_map->put (stmt, p);
251 return p;
254 static struct lim_aux_data *
255 get_lim_data (gimple *stmt)
257 lim_aux_data **p = lim_aux_data_map->get (stmt);
258 if (!p)
259 return NULL;
261 return *p;
264 /* Releases the memory occupied by DATA. */
266 static void
267 free_lim_aux_data (struct lim_aux_data *data)
269 data->depends.release ();
270 free (data);
273 static void
274 clear_lim_data (gimple *stmt)
276 lim_aux_data **p = lim_aux_data_map->get (stmt);
277 if (!p)
278 return;
280 free_lim_aux_data (*p);
281 *p = NULL;
285 /* The possibilities of statement movement. */
286 enum move_pos
288 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
289 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
290 become executed -- memory accesses, ... */
291 MOVE_POSSIBLE /* Unlimited movement. */
295 /* If it is possible to hoist the statement STMT unconditionally,
296 returns MOVE_POSSIBLE.
297 If it is possible to hoist the statement STMT, but we must avoid making
298 it executed if it would not be executed in the original program (e.g.
299 because it may trap), return MOVE_PRESERVE_EXECUTION.
300 Otherwise return MOVE_IMPOSSIBLE. */
302 enum move_pos
303 movement_possibility (gimple *stmt)
305 tree lhs;
306 enum move_pos ret = MOVE_POSSIBLE;
308 if (flag_unswitch_loops
309 && gimple_code (stmt) == GIMPLE_COND)
311 /* If we perform unswitching, force the operands of the invariant
312 condition to be moved out of the loop. */
313 return MOVE_POSSIBLE;
316 if (gimple_code (stmt) == GIMPLE_PHI
317 && gimple_phi_num_args (stmt) <= 2
318 && !virtual_operand_p (gimple_phi_result (stmt))
319 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
320 return MOVE_POSSIBLE;
322 if (gimple_get_lhs (stmt) == NULL_TREE)
323 return MOVE_IMPOSSIBLE;
325 if (gimple_vdef (stmt))
326 return MOVE_IMPOSSIBLE;
328 if (stmt_ends_bb_p (stmt)
329 || gimple_has_volatile_ops (stmt)
330 || gimple_has_side_effects (stmt)
331 || stmt_could_throw_p (cfun, stmt))
332 return MOVE_IMPOSSIBLE;
334 if (is_gimple_call (stmt))
336 /* While pure or const call is guaranteed to have no side effects, we
337 cannot move it arbitrarily. Consider code like
339 char *s = something ();
341 while (1)
343 if (s)
344 t = strlen (s);
345 else
346 t = 0;
349 Here the strlen call cannot be moved out of the loop, even though
350 s is invariant. In addition to possibly creating a call with
351 invalid arguments, moving out a function call that is not executed
352 may cause performance regressions in case the call is costly and
353 not executed at all. */
354 ret = MOVE_PRESERVE_EXECUTION;
355 lhs = gimple_call_lhs (stmt);
357 else if (is_gimple_assign (stmt))
358 lhs = gimple_assign_lhs (stmt);
359 else
360 return MOVE_IMPOSSIBLE;
362 if (TREE_CODE (lhs) == SSA_NAME
363 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
364 return MOVE_IMPOSSIBLE;
366 if (TREE_CODE (lhs) != SSA_NAME
367 || gimple_could_trap_p (stmt))
368 return MOVE_PRESERVE_EXECUTION;
370 /* Non local loads in a transaction cannot be hoisted out. Well,
371 unless the load happens on every path out of the loop, but we
372 don't take this into account yet. */
373 if (flag_tm
374 && gimple_in_transaction (stmt)
375 && gimple_assign_single_p (stmt))
377 tree rhs = gimple_assign_rhs1 (stmt);
378 if (DECL_P (rhs) && is_global_var (rhs))
380 if (dump_file)
382 fprintf (dump_file, "Cannot hoist conditional load of ");
383 print_generic_expr (dump_file, rhs, TDF_SLIM);
384 fprintf (dump_file, " because it is in a transaction.\n");
386 return MOVE_IMPOSSIBLE;
390 return ret;
393 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
394 loop to that we could move the expression using DEF if it did not have
395 other operands, i.e. the outermost loop enclosing LOOP in that the value
396 of DEF is invariant. */
398 static class loop *
399 outermost_invariant_loop (tree def, class loop *loop)
401 gimple *def_stmt;
402 basic_block def_bb;
403 class loop *max_loop;
404 struct lim_aux_data *lim_data;
406 if (!def)
407 return superloop_at_depth (loop, 1);
409 if (TREE_CODE (def) != SSA_NAME)
411 gcc_assert (is_gimple_min_invariant (def));
412 return superloop_at_depth (loop, 1);
415 def_stmt = SSA_NAME_DEF_STMT (def);
416 def_bb = gimple_bb (def_stmt);
417 if (!def_bb)
418 return superloop_at_depth (loop, 1);
420 max_loop = find_common_loop (loop, def_bb->loop_father);
422 lim_data = get_lim_data (def_stmt);
423 if (lim_data != NULL && lim_data->max_loop != NULL)
424 max_loop = find_common_loop (max_loop,
425 loop_outer (lim_data->max_loop));
426 if (max_loop == loop)
427 return NULL;
428 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
430 return max_loop;
433 /* DATA is a structure containing information associated with a statement
434 inside LOOP. DEF is one of the operands of this statement.
436 Find the outermost loop enclosing LOOP in that value of DEF is invariant
437 and record this in DATA->max_loop field. If DEF itself is defined inside
438 this loop as well (i.e. we need to hoist it out of the loop if we want
439 to hoist the statement represented by DATA), record the statement in that
440 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
441 add the cost of the computation of DEF to the DATA->cost.
443 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
445 static bool
446 add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
447 bool add_cost)
449 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
450 basic_block def_bb = gimple_bb (def_stmt);
451 class loop *max_loop;
452 struct lim_aux_data *def_data;
454 if (!def_bb)
455 return true;
457 max_loop = outermost_invariant_loop (def, loop);
458 if (!max_loop)
459 return false;
461 if (flow_loop_nested_p (data->max_loop, max_loop))
462 data->max_loop = max_loop;
464 def_data = get_lim_data (def_stmt);
465 if (!def_data)
466 return true;
468 if (add_cost
469 /* Only add the cost if the statement defining DEF is inside LOOP,
470 i.e. if it is likely that by moving the invariants dependent
471 on it, we will be able to avoid creating a new register for
472 it (since it will be only used in these dependent invariants). */
473 && def_bb->loop_father == loop)
474 data->cost += def_data->cost;
476 data->depends.safe_push (def_stmt);
478 return true;
481 /* Returns an estimate for a cost of statement STMT. The values here
482 are just ad-hoc constants, similar to costs for inlining. */
484 static unsigned
485 stmt_cost (gimple *stmt)
487 /* Always try to create possibilities for unswitching. */
488 if (gimple_code (stmt) == GIMPLE_COND
489 || gimple_code (stmt) == GIMPLE_PHI)
490 return LIM_EXPENSIVE;
492 /* We should be hoisting calls if possible. */
493 if (is_gimple_call (stmt))
495 tree fndecl;
497 /* Unless the call is a builtin_constant_p; this always folds to a
498 constant, so moving it is useless. */
499 fndecl = gimple_call_fndecl (stmt);
500 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
501 return 0;
503 return LIM_EXPENSIVE;
506 /* Hoisting memory references out should almost surely be a win. */
507 if (gimple_references_memory_p (stmt))
508 return LIM_EXPENSIVE;
510 if (gimple_code (stmt) != GIMPLE_ASSIGN)
511 return 1;
513 switch (gimple_assign_rhs_code (stmt))
515 case MULT_EXPR:
516 case WIDEN_MULT_EXPR:
517 case WIDEN_MULT_PLUS_EXPR:
518 case WIDEN_MULT_MINUS_EXPR:
519 case DOT_PROD_EXPR:
520 case TRUNC_DIV_EXPR:
521 case CEIL_DIV_EXPR:
522 case FLOOR_DIV_EXPR:
523 case ROUND_DIV_EXPR:
524 case EXACT_DIV_EXPR:
525 case CEIL_MOD_EXPR:
526 case FLOOR_MOD_EXPR:
527 case ROUND_MOD_EXPR:
528 case TRUNC_MOD_EXPR:
529 case RDIV_EXPR:
530 /* Division and multiplication are usually expensive. */
531 return LIM_EXPENSIVE;
533 case LSHIFT_EXPR:
534 case RSHIFT_EXPR:
535 case WIDEN_LSHIFT_EXPR:
536 case LROTATE_EXPR:
537 case RROTATE_EXPR:
538 /* Shifts and rotates are usually expensive. */
539 return LIM_EXPENSIVE;
541 case CONSTRUCTOR:
542 /* Make vector construction cost proportional to the number
543 of elements. */
544 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
546 case SSA_NAME:
547 case PAREN_EXPR:
548 /* Whether or not something is wrapped inside a PAREN_EXPR
549 should not change move cost. Nor should an intermediate
550 unpropagated SSA name copy. */
551 return 0;
553 default:
554 return 1;
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559 REF is independent. If REF is not independent in LOOP, NULL is returned
560 instead. */
562 static class loop *
563 outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
565 class loop *aloop;
567 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
568 return NULL;
570 for (aloop = outer;
571 aloop != loop;
572 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
573 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
574 && ref_indep_loop_p (aloop, ref))
575 return aloop;
577 if (ref_indep_loop_p (loop, ref))
578 return loop;
579 else
580 return NULL;
583 /* If there is a simple load or store to a memory reference in STMT, returns
584 the location of the memory reference, and sets IS_STORE according to whether
585 it is a store or load. Otherwise, returns NULL. */
587 static tree *
588 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
590 tree *lhs, *rhs;
592 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
593 if (!gimple_assign_single_p (stmt))
594 return NULL;
596 lhs = gimple_assign_lhs_ptr (stmt);
597 rhs = gimple_assign_rhs1_ptr (stmt);
599 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
601 *is_store = false;
602 return rhs;
604 else if (gimple_vdef (stmt)
605 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
607 *is_store = true;
608 return lhs;
610 else
611 return NULL;
614 /* From a controlling predicate in DOM determine the arguments from
615 the PHI node PHI that are chosen if the predicate evaluates to
616 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617 they are non-NULL. Returns true if the arguments can be determined,
618 else return false. */
620 static bool
621 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
622 tree *true_arg_p, tree *false_arg_p)
624 edge te, fe;
625 if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
626 &te, &fe))
627 return false;
629 if (true_arg_p)
630 *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
631 if (false_arg_p)
632 *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
634 return true;
637 /* Determine the outermost loop to that it is possible to hoist a statement
638 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
639 the outermost loop in that the value computed by STMT is invariant.
640 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641 we preserve the fact whether STMT is executed. It also fills other related
642 information to LIM_DATA (STMT).
644 The function returns false if STMT cannot be hoisted outside of the loop it
645 is defined in, and true otherwise. */
647 static bool
648 determine_max_movement (gimple *stmt, bool must_preserve_exec)
650 basic_block bb = gimple_bb (stmt);
651 class loop *loop = bb->loop_father;
652 class loop *level;
653 struct lim_aux_data *lim_data = get_lim_data (stmt);
654 tree val;
655 ssa_op_iter iter;
657 if (must_preserve_exec)
658 level = ALWAYS_EXECUTED_IN (bb);
659 else
660 level = superloop_at_depth (loop, 1);
661 lim_data->max_loop = level;
663 if (gphi *phi = dyn_cast <gphi *> (stmt))
665 use_operand_p use_p;
666 unsigned min_cost = UINT_MAX;
667 unsigned total_cost = 0;
668 struct lim_aux_data *def_data;
670 /* We will end up promoting dependencies to be unconditionally
671 evaluated. For this reason the PHI cost (and thus the
672 cost we remove from the loop by doing the invariant motion)
673 is that of the cheapest PHI argument dependency chain. */
674 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
676 val = USE_FROM_PTR (use_p);
678 if (TREE_CODE (val) != SSA_NAME)
680 /* Assign const 1 to constants. */
681 min_cost = MIN (min_cost, 1);
682 total_cost += 1;
683 continue;
685 if (!add_dependency (val, lim_data, loop, false))
686 return false;
688 gimple *def_stmt = SSA_NAME_DEF_STMT (val);
689 if (gimple_bb (def_stmt)
690 && gimple_bb (def_stmt)->loop_father == loop)
692 def_data = get_lim_data (def_stmt);
693 if (def_data)
695 min_cost = MIN (min_cost, def_data->cost);
696 total_cost += def_data->cost;
701 min_cost = MIN (min_cost, total_cost);
702 lim_data->cost += min_cost;
704 if (gimple_phi_num_args (phi) > 1)
706 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
707 gimple *cond;
708 if (gsi_end_p (gsi_last_bb (dom)))
709 return false;
710 cond = gsi_stmt (gsi_last_bb (dom));
711 if (gimple_code (cond) != GIMPLE_COND)
712 return false;
713 /* Verify that this is an extended form of a diamond and
714 the PHI arguments are completely controlled by the
715 predicate in DOM. */
716 if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
717 return false;
719 /* Fold in dependencies and cost of the condition. */
720 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
722 if (!add_dependency (val, lim_data, loop, false))
723 return false;
724 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
725 if (def_data)
726 lim_data->cost += def_data->cost;
729 /* We want to avoid unconditionally executing very expensive
730 operations. As costs for our dependencies cannot be
731 negative just claim we are not invariand for this case.
732 We also are not sure whether the control-flow inside the
733 loop will vanish. */
734 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
735 && !(min_cost != 0
736 && total_cost / min_cost <= 2))
737 return false;
739 /* Assume that the control-flow in the loop will vanish.
740 ??? We should verify this and not artificially increase
741 the cost if that is not the case. */
742 lim_data->cost += stmt_cost (stmt);
745 return true;
747 else
748 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
749 if (!add_dependency (val, lim_data, loop, true))
750 return false;
752 if (gimple_vuse (stmt))
754 im_mem_ref *ref
755 = lim_data ? memory_accesses.refs_list[lim_data->ref] : NULL;
756 if (ref
757 && MEM_ANALYZABLE (ref))
759 lim_data->max_loop = outermost_indep_loop (lim_data->max_loop,
760 loop, ref);
761 if (!lim_data->max_loop)
762 return false;
764 else if (! add_dependency (gimple_vuse (stmt), lim_data, loop, false))
765 return false;
768 lim_data->cost += stmt_cost (stmt);
770 return true;
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774 and that one of the operands of this statement is computed by STMT.
775 Ensure that STMT (together with all the statements that define its
776 operands) is hoisted at least out of the loop LEVEL. */
778 static void
779 set_level (gimple *stmt, class loop *orig_loop, class loop *level)
781 class loop *stmt_loop = gimple_bb (stmt)->loop_father;
782 struct lim_aux_data *lim_data;
783 gimple *dep_stmt;
784 unsigned i;
786 stmt_loop = find_common_loop (orig_loop, stmt_loop);
787 lim_data = get_lim_data (stmt);
788 if (lim_data != NULL && lim_data->tgt_loop != NULL)
789 stmt_loop = find_common_loop (stmt_loop,
790 loop_outer (lim_data->tgt_loop));
791 if (flow_loop_nested_p (stmt_loop, level))
792 return;
794 gcc_assert (level == lim_data->max_loop
795 || flow_loop_nested_p (lim_data->max_loop, level));
797 lim_data->tgt_loop = level;
798 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
799 set_level (dep_stmt, orig_loop, level);
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803 For now we chose the outermost possible loop. TODO -- use profiling
804 information to set it more sanely. */
806 static void
807 set_profitable_level (gimple *stmt)
809 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
812 /* Returns true if STMT is a call that has side effects. */
814 static bool
815 nonpure_call_p (gimple *stmt)
817 if (gimple_code (stmt) != GIMPLE_CALL)
818 return false;
820 return gimple_has_side_effects (stmt);
823 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
825 static gimple *
826 rewrite_reciprocal (gimple_stmt_iterator *bsi)
828 gassign *stmt, *stmt1, *stmt2;
829 tree name, lhs, type;
830 tree real_one;
831 gimple_stmt_iterator gsi;
833 stmt = as_a <gassign *> (gsi_stmt (*bsi));
834 lhs = gimple_assign_lhs (stmt);
835 type = TREE_TYPE (lhs);
837 real_one = build_one_cst (type);
839 name = make_temp_ssa_name (type, NULL, "reciptmp");
840 stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
841 gimple_assign_rhs2 (stmt));
842 stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
843 gimple_assign_rhs1 (stmt));
845 /* Replace division stmt with reciprocal and multiply stmts.
846 The multiply stmt is not invariant, so update iterator
847 and avoid rescanning. */
848 gsi = *bsi;
849 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
850 gsi_replace (&gsi, stmt2, true);
852 /* Continue processing with invariant reciprocal statement. */
853 return stmt1;
856 /* Check if the pattern at *BSI is a bittest of the form
857 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
859 static gimple *
860 rewrite_bittest (gimple_stmt_iterator *bsi)
862 gassign *stmt;
863 gimple *stmt1;
864 gassign *stmt2;
865 gimple *use_stmt;
866 gcond *cond_stmt;
867 tree lhs, name, t, a, b;
868 use_operand_p use;
870 stmt = as_a <gassign *> (gsi_stmt (*bsi));
871 lhs = gimple_assign_lhs (stmt);
873 /* Verify that the single use of lhs is a comparison against zero. */
874 if (TREE_CODE (lhs) != SSA_NAME
875 || !single_imm_use (lhs, &use, &use_stmt))
876 return stmt;
877 cond_stmt = dyn_cast <gcond *> (use_stmt);
878 if (!cond_stmt)
879 return stmt;
880 if (gimple_cond_lhs (cond_stmt) != lhs
881 || (gimple_cond_code (cond_stmt) != NE_EXPR
882 && gimple_cond_code (cond_stmt) != EQ_EXPR)
883 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
884 return stmt;
886 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
887 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
888 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
889 return stmt;
891 /* There is a conversion in between possibly inserted by fold. */
892 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
894 t = gimple_assign_rhs1 (stmt1);
895 if (TREE_CODE (t) != SSA_NAME
896 || !has_single_use (t))
897 return stmt;
898 stmt1 = SSA_NAME_DEF_STMT (t);
899 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
900 return stmt;
903 /* Verify that B is loop invariant but A is not. Verify that with
904 all the stmt walking we are still in the same loop. */
905 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
906 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
907 return stmt;
909 a = gimple_assign_rhs1 (stmt1);
910 b = gimple_assign_rhs2 (stmt1);
912 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
913 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
915 gimple_stmt_iterator rsi;
917 /* 1 << B */
918 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
919 build_int_cst (TREE_TYPE (a), 1), b);
920 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
921 stmt1 = gimple_build_assign (name, t);
923 /* A & (1 << B) */
924 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
925 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
926 stmt2 = gimple_build_assign (name, t);
928 /* Replace the SSA_NAME we compare against zero. Adjust
929 the type of zero accordingly. */
930 SET_USE (use, name);
931 gimple_cond_set_rhs (cond_stmt,
932 build_int_cst_type (TREE_TYPE (name),
933 0));
935 /* Don't use gsi_replace here, none of the new assignments sets
936 the variable originally set in stmt. Move bsi to stmt1, and
937 then remove the original stmt, so that we get a chance to
938 retain debug info for it. */
939 rsi = *bsi;
940 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
941 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
942 gimple *to_release = gsi_stmt (rsi);
943 gsi_remove (&rsi, true);
944 release_defs (to_release);
946 return stmt1;
949 return stmt;
952 /* For each statement determines the outermost loop in that it is invariant,
953 - statements on whose motion it depends and the cost of the computation.
954 - This information is stored to the LIM_DATA structure associated with
955 - each statement. */
956 class invariantness_dom_walker : public dom_walker
958 public:
959 invariantness_dom_walker (cdi_direction direction)
960 : dom_walker (direction) {}
962 virtual edge before_dom_children (basic_block);
965 /* Determine the outermost loops in that statements in basic block BB are
966 invariant, and record them to the LIM_DATA associated with the statements.
967 Callback for dom_walker. */
969 edge
970 invariantness_dom_walker::before_dom_children (basic_block bb)
972 enum move_pos pos;
973 gimple_stmt_iterator bsi;
974 gimple *stmt;
975 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
976 class loop *outermost = ALWAYS_EXECUTED_IN (bb);
977 struct lim_aux_data *lim_data;
979 if (!loop_outer (bb->loop_father))
980 return NULL;
982 if (dump_file && (dump_flags & TDF_DETAILS))
983 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
984 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
986 /* Look at PHI nodes, but only if there is at most two.
987 ??? We could relax this further by post-processing the inserted
988 code and transforming adjacent cond-exprs with the same predicate
989 to control flow again. */
990 bsi = gsi_start_phis (bb);
991 if (!gsi_end_p (bsi)
992 && ((gsi_next (&bsi), gsi_end_p (bsi))
993 || (gsi_next (&bsi), gsi_end_p (bsi))))
994 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
996 stmt = gsi_stmt (bsi);
998 pos = movement_possibility (stmt);
999 if (pos == MOVE_IMPOSSIBLE)
1000 continue;
1002 lim_data = get_lim_data (stmt);
1003 if (! lim_data)
1004 lim_data = init_lim_data (stmt);
1005 lim_data->always_executed_in = outermost;
1007 if (!determine_max_movement (stmt, false))
1009 lim_data->max_loop = NULL;
1010 continue;
1013 if (dump_file && (dump_flags & TDF_DETAILS))
1015 print_gimple_stmt (dump_file, stmt, 2);
1016 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1017 loop_depth (lim_data->max_loop),
1018 lim_data->cost);
1021 if (lim_data->cost >= LIM_EXPENSIVE)
1022 set_profitable_level (stmt);
1025 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1027 stmt = gsi_stmt (bsi);
1029 pos = movement_possibility (stmt);
1030 if (pos == MOVE_IMPOSSIBLE)
1032 if (nonpure_call_p (stmt))
1034 maybe_never = true;
1035 outermost = NULL;
1037 /* Make sure to note always_executed_in for stores to make
1038 store-motion work. */
1039 else if (stmt_makes_single_store (stmt))
1041 struct lim_aux_data *lim_data = get_lim_data (stmt);
1042 if (! lim_data)
1043 lim_data = init_lim_data (stmt);
1044 lim_data->always_executed_in = outermost;
1046 continue;
1049 if (is_gimple_assign (stmt)
1050 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1051 == GIMPLE_BINARY_RHS))
1053 tree op0 = gimple_assign_rhs1 (stmt);
1054 tree op1 = gimple_assign_rhs2 (stmt);
1055 class loop *ol1 = outermost_invariant_loop (op1,
1056 loop_containing_stmt (stmt));
1058 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 to be hoisted out of loop, saving expensive divide. */
1060 if (pos == MOVE_POSSIBLE
1061 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1062 && flag_unsafe_math_optimizations
1063 && !flag_trapping_math
1064 && ol1 != NULL
1065 && outermost_invariant_loop (op0, ol1) == NULL)
1066 stmt = rewrite_reciprocal (&bsi);
1068 /* If the shift count is invariant, convert (A >> B) & 1 to
1069 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 saving an expensive shift. */
1071 if (pos == MOVE_POSSIBLE
1072 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1073 && integer_onep (op1)
1074 && TREE_CODE (op0) == SSA_NAME
1075 && has_single_use (op0))
1076 stmt = rewrite_bittest (&bsi);
1079 lim_data = get_lim_data (stmt);
1080 if (! lim_data)
1081 lim_data = init_lim_data (stmt);
1082 lim_data->always_executed_in = outermost;
1084 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1085 continue;
1087 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1089 lim_data->max_loop = NULL;
1090 continue;
1093 if (dump_file && (dump_flags & TDF_DETAILS))
1095 print_gimple_stmt (dump_file, stmt, 2);
1096 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1097 loop_depth (lim_data->max_loop),
1098 lim_data->cost);
1101 if (lim_data->cost >= LIM_EXPENSIVE)
1102 set_profitable_level (stmt);
1104 return NULL;
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108 data stored in LIM_DATA structures associated with each statement. Callback
1109 for walk_dominator_tree. */
1111 unsigned int
1112 move_computations_worker (basic_block bb)
1114 class loop *level;
1115 unsigned cost = 0;
1116 struct lim_aux_data *lim_data;
1117 unsigned int todo = 0;
1119 if (!loop_outer (bb->loop_father))
1120 return todo;
1122 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1124 gassign *new_stmt;
1125 gphi *stmt = bsi.phi ();
1127 lim_data = get_lim_data (stmt);
1128 if (lim_data == NULL)
1130 gsi_next (&bsi);
1131 continue;
1134 cost = lim_data->cost;
1135 level = lim_data->tgt_loop;
1136 clear_lim_data (stmt);
1138 if (!level)
1140 gsi_next (&bsi);
1141 continue;
1144 if (dump_file && (dump_flags & TDF_DETAILS))
1146 fprintf (dump_file, "Moving PHI node\n");
1147 print_gimple_stmt (dump_file, stmt, 0);
1148 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1149 cost, level->num);
1152 if (gimple_phi_num_args (stmt) == 1)
1154 tree arg = PHI_ARG_DEF (stmt, 0);
1155 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1156 TREE_CODE (arg), arg);
1158 else
1160 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1161 gimple *cond = gsi_stmt (gsi_last_bb (dom));
1162 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1163 /* Get the PHI arguments corresponding to the true and false
1164 edges of COND. */
1165 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1166 gcc_assert (arg0 && arg1);
1167 t = build2 (gimple_cond_code (cond), boolean_type_node,
1168 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1169 new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1170 COND_EXPR, t, arg0, arg1);
1171 todo |= TODO_cleanup_cfg;
1173 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1174 && (!ALWAYS_EXECUTED_IN (bb)
1175 || (ALWAYS_EXECUTED_IN (bb) != level
1176 && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1178 tree lhs = gimple_assign_lhs (new_stmt);
1179 SSA_NAME_RANGE_INFO (lhs) = NULL;
1181 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1182 remove_phi_node (&bsi, false);
1185 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1187 edge e;
1189 gimple *stmt = gsi_stmt (bsi);
1191 lim_data = get_lim_data (stmt);
1192 if (lim_data == NULL)
1194 gsi_next (&bsi);
1195 continue;
1198 cost = lim_data->cost;
1199 level = lim_data->tgt_loop;
1200 clear_lim_data (stmt);
1202 if (!level)
1204 gsi_next (&bsi);
1205 continue;
1208 /* We do not really want to move conditionals out of the loop; we just
1209 placed it here to force its operands to be moved if necessary. */
1210 if (gimple_code (stmt) == GIMPLE_COND)
1211 continue;
1213 if (dump_file && (dump_flags & TDF_DETAILS))
1215 fprintf (dump_file, "Moving statement\n");
1216 print_gimple_stmt (dump_file, stmt, 0);
1217 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1218 cost, level->num);
1221 e = loop_preheader_edge (level);
1222 gcc_assert (!gimple_vdef (stmt));
1223 if (gimple_vuse (stmt))
1225 /* The new VUSE is the one from the virtual PHI in the loop
1226 header or the one already present. */
1227 gphi_iterator gsi2;
1228 for (gsi2 = gsi_start_phis (e->dest);
1229 !gsi_end_p (gsi2); gsi_next (&gsi2))
1231 gphi *phi = gsi2.phi ();
1232 if (virtual_operand_p (gimple_phi_result (phi)))
1234 SET_USE (gimple_vuse_op (stmt),
1235 PHI_ARG_DEF_FROM_EDGE (phi, e));
1236 break;
1240 gsi_remove (&bsi, false);
1241 if (gimple_has_lhs (stmt)
1242 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1243 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1244 && (!ALWAYS_EXECUTED_IN (bb)
1245 || !(ALWAYS_EXECUTED_IN (bb) == level
1246 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1248 tree lhs = gimple_get_lhs (stmt);
1249 SSA_NAME_RANGE_INFO (lhs) = NULL;
1251 /* In case this is a stmt that is not unconditionally executed
1252 when the target loop header is executed and the stmt may
1253 invoke undefined integer or pointer overflow rewrite it to
1254 unsigned arithmetic. */
1255 if (is_gimple_assign (stmt)
1256 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1257 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1258 && arith_code_with_undefined_signed_overflow
1259 (gimple_assign_rhs_code (stmt))
1260 && (!ALWAYS_EXECUTED_IN (bb)
1261 || !(ALWAYS_EXECUTED_IN (bb) == level
1262 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1263 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1264 else
1265 gsi_insert_on_edge (e, stmt);
1268 return todo;
1271 /* Hoist the statements out of the loops prescribed by data stored in
1272 LIM_DATA structures associated with each statement.*/
1274 static unsigned int
1275 move_computations (void)
1277 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1278 int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1279 unsigned todo = 0;
1281 for (int i = 0; i < n; ++i)
1282 todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1284 free (rpo);
1286 gsi_commit_edge_inserts ();
1287 if (need_ssa_update_p (cfun))
1288 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1290 return todo;
1293 /* Checks whether the statement defining variable *INDEX can be hoisted
1294 out of the loop passed in DATA. Callback for for_each_index. */
1296 static bool
1297 may_move_till (tree ref, tree *index, void *data)
1299 class loop *loop = (class loop *) data, *max_loop;
1301 /* If REF is an array reference, check also that the step and the lower
1302 bound is invariant in LOOP. */
1303 if (TREE_CODE (ref) == ARRAY_REF)
1305 tree step = TREE_OPERAND (ref, 3);
1306 tree lbound = TREE_OPERAND (ref, 2);
1308 max_loop = outermost_invariant_loop (step, loop);
1309 if (!max_loop)
1310 return false;
1312 max_loop = outermost_invariant_loop (lbound, loop);
1313 if (!max_loop)
1314 return false;
1317 max_loop = outermost_invariant_loop (*index, loop);
1318 if (!max_loop)
1319 return false;
1321 return true;
1324 /* If OP is SSA NAME, force the statement that defines it to be
1325 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1327 static void
1328 force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
1330 gimple *stmt;
1332 if (!op
1333 || is_gimple_min_invariant (op))
1334 return;
1336 gcc_assert (TREE_CODE (op) == SSA_NAME);
1338 stmt = SSA_NAME_DEF_STMT (op);
1339 if (gimple_nop_p (stmt))
1340 return;
1342 set_level (stmt, orig_loop, loop);
1345 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1346 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1347 for_each_index. */
1349 struct fmt_data
1351 class loop *loop;
1352 class loop *orig_loop;
1355 static bool
1356 force_move_till (tree ref, tree *index, void *data)
1358 struct fmt_data *fmt_data = (struct fmt_data *) data;
1360 if (TREE_CODE (ref) == ARRAY_REF)
1362 tree step = TREE_OPERAND (ref, 3);
1363 tree lbound = TREE_OPERAND (ref, 2);
1365 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1366 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1369 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1371 return true;
1374 /* A function to free the mem_ref object OBJ. */
1376 static void
1377 memref_free (class im_mem_ref *mem)
1379 mem->accesses_in_loop.release ();
1382 /* Allocates and returns a memory reference description for MEM whose hash
1383 value is HASH and id is ID. */
1385 static im_mem_ref *
1386 mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
1388 im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
1389 if (mem)
1390 ref->mem = *mem;
1391 else
1392 ao_ref_init (&ref->mem, error_mark_node);
1393 ref->id = id;
1394 ref->ref_canonical = false;
1395 ref->ref_decomposed = false;
1396 ref->hash = hash;
1397 ref->stored = NULL;
1398 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1399 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1400 ref->accesses_in_loop.create (1);
1402 return ref;
1405 /* Records memory reference location *LOC in LOOP to the memory reference
1406 description REF. The reference occurs in statement STMT. */
1408 static void
1409 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1411 mem_ref_loc aref;
1412 aref.stmt = stmt;
1413 aref.ref = loc;
1414 ref->accesses_in_loop.safe_push (aref);
1417 /* Set the LOOP bit in REF stored bitmap and allocate that if
1418 necessary. Return whether a bit was changed. */
1420 static bool
1421 set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
1423 if (!ref->stored)
1424 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1425 return bitmap_set_bit (ref->stored, loop->num);
1428 /* Marks reference REF as stored in LOOP. */
1430 static void
1431 mark_ref_stored (im_mem_ref *ref, class loop *loop)
1433 while (loop != current_loops->tree_root
1434 && set_ref_stored_in_loop (ref, loop))
1435 loop = loop_outer (loop);
1438 /* Gathers memory references in statement STMT in LOOP, storing the
1439 information about them in the memory_accesses structure. Marks
1440 the vops accessed through unrecognized statements there as
1441 well. */
1443 static void
1444 gather_mem_refs_stmt (class loop *loop, gimple *stmt)
1446 tree *mem = NULL;
1447 hashval_t hash;
1448 im_mem_ref **slot;
1449 im_mem_ref *ref;
1450 bool is_stored;
1451 unsigned id;
1453 if (!gimple_vuse (stmt))
1454 return;
1456 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1457 if (!mem)
1459 /* We use the shared mem_ref for all unanalyzable refs. */
1460 id = UNANALYZABLE_MEM_ID;
1461 ref = memory_accesses.refs_list[id];
1462 if (dump_file && (dump_flags & TDF_DETAILS))
1464 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1465 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1467 is_stored = gimple_vdef (stmt);
1469 else
1471 /* We are looking for equal refs that might differ in structure
1472 such as a.b vs. MEM[&a + 4]. So we key off the ao_ref but
1473 make sure we can canonicalize the ref in the hashtable if
1474 non-operand_equal_p refs are found. For the lookup we mark
1475 the case we want strict equality with aor.max_size == -1. */
1476 ao_ref aor;
1477 ao_ref_init (&aor, *mem);
1478 ao_ref_base (&aor);
1479 ao_ref_alias_set (&aor);
1480 HOST_WIDE_INT offset, size, max_size;
1481 poly_int64 saved_maxsize = aor.max_size, mem_off;
1482 tree mem_base;
1483 bool ref_decomposed;
1484 if (aor.max_size_known_p ()
1485 && aor.offset.is_constant (&offset)
1486 && aor.size.is_constant (&size)
1487 && aor.max_size.is_constant (&max_size)
1488 && size == max_size
1489 && (size % BITS_PER_UNIT) == 0
1490 /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1491 size. Make sure this is consistent with the extraction. */
1492 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem)))
1493 && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem))),
1494 aor.size)
1495 && (mem_base = get_addr_base_and_unit_offset (aor.ref, &mem_off)))
1497 ref_decomposed = true;
1498 hash = iterative_hash_expr (ao_ref_base (&aor), 0);
1499 hash = iterative_hash_host_wide_int (offset, hash);
1500 hash = iterative_hash_host_wide_int (size, hash);
1502 else
1504 ref_decomposed = false;
1505 hash = iterative_hash_expr (aor.ref, 0);
1506 aor.max_size = -1;
1508 slot = memory_accesses.refs->find_slot_with_hash (&aor, hash, INSERT);
1509 aor.max_size = saved_maxsize;
1510 if (*slot)
1512 if (!(*slot)->ref_canonical
1513 && !operand_equal_p (*mem, (*slot)->mem.ref, 0))
1515 /* If we didn't yet canonicalize the hashtable ref (which
1516 we'll end up using for code insertion) and hit a second
1517 equal ref that is not structurally equivalent create
1518 a canonical ref which is a bare MEM_REF. */
1519 if (TREE_CODE (*mem) == MEM_REF
1520 || TREE_CODE (*mem) == TARGET_MEM_REF)
1522 (*slot)->mem.ref = *mem;
1523 (*slot)->mem.base_alias_set = ao_ref_base_alias_set (&aor);
1525 else
1527 tree ref_alias_type = reference_alias_ptr_type (*mem);
1528 unsigned int ref_align = get_object_alignment (*mem);
1529 tree ref_type = TREE_TYPE (*mem);
1530 tree tmp = build_fold_addr_expr (unshare_expr (mem_base));
1531 if (TYPE_ALIGN (ref_type) != ref_align)
1532 ref_type = build_aligned_type (ref_type, ref_align);
1533 (*slot)->mem.ref
1534 = fold_build2 (MEM_REF, ref_type, tmp,
1535 build_int_cst (ref_alias_type, mem_off));
1536 if ((*slot)->mem.volatile_p)
1537 TREE_THIS_VOLATILE ((*slot)->mem.ref) = 1;
1538 gcc_checking_assert (TREE_CODE ((*slot)->mem.ref) == MEM_REF
1539 && is_gimple_mem_ref_addr
1540 (TREE_OPERAND ((*slot)->mem.ref,
1541 0)));
1542 (*slot)->mem.base_alias_set = (*slot)->mem.ref_alias_set;
1544 (*slot)->ref_canonical = true;
1546 ref = *slot;
1547 id = ref->id;
1549 else
1551 id = memory_accesses.refs_list.length ();
1552 ref = mem_ref_alloc (&aor, hash, id);
1553 ref->ref_decomposed = ref_decomposed;
1554 memory_accesses.refs_list.safe_push (ref);
1555 *slot = ref;
1557 if (dump_file && (dump_flags & TDF_DETAILS))
1559 fprintf (dump_file, "Memory reference %u: ", id);
1560 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1561 fprintf (dump_file, "\n");
1565 record_mem_ref_loc (ref, stmt, mem);
1567 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1568 if (is_stored)
1570 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1571 mark_ref_stored (ref, loop);
1573 init_lim_data (stmt)->ref = ref->id;
1574 return;
1577 static unsigned *bb_loop_postorder;
1579 /* qsort sort function to sort blocks after their loop fathers postorder. */
1581 static int
1582 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_,
1583 void *bb_loop_postorder_)
1585 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1586 basic_block bb1 = *(const basic_block *)bb1_;
1587 basic_block bb2 = *(const basic_block *)bb2_;
1588 class loop *loop1 = bb1->loop_father;
1589 class loop *loop2 = bb2->loop_father;
1590 if (loop1->num == loop2->num)
1591 return bb1->index - bb2->index;
1592 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1595 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1597 static int
1598 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_,
1599 void *bb_loop_postorder_)
1601 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1602 const mem_ref_loc *loc1 = (const mem_ref_loc *)loc1_;
1603 const mem_ref_loc *loc2 = (const mem_ref_loc *)loc2_;
1604 class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1605 class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1606 if (loop1->num == loop2->num)
1607 return 0;
1608 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1611 /* Gathers memory references in loops. */
1613 static void
1614 analyze_memory_references (void)
1616 gimple_stmt_iterator bsi;
1617 basic_block bb, *bbs;
1618 class loop *loop, *outer;
1619 unsigned i, n;
1621 /* Collect all basic-blocks in loops and sort them after their
1622 loops postorder. */
1623 i = 0;
1624 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1625 FOR_EACH_BB_FN (bb, cfun)
1626 if (bb->loop_father != current_loops->tree_root)
1627 bbs[i++] = bb;
1628 n = i;
1629 gcc_sort_r (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp,
1630 bb_loop_postorder);
1632 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1633 That results in better locality for all the bitmaps. */
1634 for (i = 0; i < n; ++i)
1636 basic_block bb = bbs[i];
1637 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1638 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1641 /* Sort the location list of gathered memory references after their
1642 loop postorder number. */
1643 im_mem_ref *ref;
1644 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1645 ref->accesses_in_loop.sort (sort_locs_in_loop_postorder_cmp,
1646 bb_loop_postorder);
1648 free (bbs);
1650 /* Propagate the information about accessed memory references up
1651 the loop hierarchy. */
1652 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1654 /* Finalize the overall touched references (including subloops). */
1655 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1656 &memory_accesses.refs_stored_in_loop[loop->num]);
1658 /* Propagate the information about accessed memory references up
1659 the loop hierarchy. */
1660 outer = loop_outer (loop);
1661 if (outer == current_loops->tree_root)
1662 continue;
1664 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1665 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1669 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1670 tree_to_aff_combination_expand. */
1672 static bool
1673 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1674 hash_map<tree, name_expansion *> **ttae_cache)
1676 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1677 object and their offset differ in such a way that the locations cannot
1678 overlap, then they cannot alias. */
1679 poly_widest_int size1, size2;
1680 aff_tree off1, off2;
1682 /* Perform basic offset and type-based disambiguation. */
1683 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1684 return false;
1686 /* The expansion of addresses may be a bit expensive, thus we only do
1687 the check at -O2 and higher optimization levels. */
1688 if (optimize < 2)
1689 return true;
1691 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1692 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1693 aff_combination_expand (&off1, ttae_cache);
1694 aff_combination_expand (&off2, ttae_cache);
1695 aff_combination_scale (&off1, -1);
1696 aff_combination_add (&off2, &off1);
1698 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1699 return false;
1701 return true;
1704 /* Compare function for bsearch searching for reference locations
1705 in a loop. */
1707 static int
1708 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_,
1709 void *bb_loop_postorder_)
1711 unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1712 class loop *loop = (class loop *)const_cast<void *>(loop_);
1713 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1714 class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1715 if (loop->num == loc_loop->num
1716 || flow_loop_nested_p (loop, loc_loop))
1717 return 0;
1718 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1719 ? -1 : 1);
1722 /* Iterates over all locations of REF in LOOP and its subloops calling
1723 fn.operator() with the location as argument. When that operator
1724 returns true the iteration is stopped and true is returned.
1725 Otherwise false is returned. */
1727 template <typename FN>
1728 static bool
1729 for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
1731 unsigned i;
1732 mem_ref_loc *loc;
1734 /* Search for the cluster of locs in the accesses_in_loop vector
1735 which is sorted after postorder index of the loop father. */
1736 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp,
1737 bb_loop_postorder);
1738 if (!loc)
1739 return false;
1741 /* We have found one location inside loop or its sub-loops. Iterate
1742 both forward and backward to cover the whole cluster. */
1743 i = loc - ref->accesses_in_loop.address ();
1744 while (i > 0)
1746 --i;
1747 mem_ref_loc *l = &ref->accesses_in_loop[i];
1748 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1749 break;
1750 if (fn (l))
1751 return true;
1753 for (i = loc - ref->accesses_in_loop.address ();
1754 i < ref->accesses_in_loop.length (); ++i)
1756 mem_ref_loc *l = &ref->accesses_in_loop[i];
1757 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1758 break;
1759 if (fn (l))
1760 return true;
1763 return false;
1766 /* Rewrites location LOC by TMP_VAR. */
1768 class rewrite_mem_ref_loc
1770 public:
1771 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1772 bool operator () (mem_ref_loc *loc);
1773 tree tmp_var;
1776 bool
1777 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1779 *loc->ref = tmp_var;
1780 update_stmt (loc->stmt);
1781 return false;
1784 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1786 static void
1787 rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
1789 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1792 /* Stores the first reference location in LOCP. */
1794 class first_mem_ref_loc_1
1796 public:
1797 first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1798 bool operator () (mem_ref_loc *loc);
1799 mem_ref_loc **locp;
1802 bool
1803 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1805 *locp = loc;
1806 return true;
1809 /* Returns the first reference location to REF in LOOP. */
1811 static mem_ref_loc *
1812 first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
1814 mem_ref_loc *locp = NULL;
1815 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1816 return locp;
1819 struct prev_flag_edges {
1820 /* Edge to insert new flag comparison code. */
1821 edge append_cond_position;
1823 /* Edge for fall through from previous flag comparison. */
1824 edge last_cond_fallthru;
1827 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1828 MEM along edge EX.
1830 The store is only done if MEM has changed. We do this so no
1831 changes to MEM occur on code paths that did not originally store
1832 into it.
1834 The common case for execute_sm will transform:
1836 for (...) {
1837 if (foo)
1838 stuff;
1839 else
1840 MEM = TMP_VAR;
1843 into:
1845 lsm = MEM;
1846 for (...) {
1847 if (foo)
1848 stuff;
1849 else
1850 lsm = TMP_VAR;
1852 MEM = lsm;
1854 This function will generate:
1856 lsm = MEM;
1858 lsm_flag = false;
1860 for (...) {
1861 if (foo)
1862 stuff;
1863 else {
1864 lsm = TMP_VAR;
1865 lsm_flag = true;
1868 if (lsm_flag) <--
1869 MEM = lsm; <--
1872 static void
1873 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1874 edge preheader, hash_set <basic_block> *flag_bbs)
1876 basic_block new_bb, then_bb, old_dest;
1877 bool loop_has_only_one_exit;
1878 edge then_old_edge, orig_ex = ex;
1879 gimple_stmt_iterator gsi;
1880 gimple *stmt;
1881 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1882 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1884 profile_count count_sum = profile_count::zero ();
1885 int nbbs = 0, ncount = 0;
1886 profile_probability flag_probability = profile_probability::uninitialized ();
1888 /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1889 at loop exit.
1891 This code may look fancy, but it cannot update profile very realistically
1892 because we do not know the probability that flag will be true at given
1893 loop exit.
1895 We look for two interesting extremes
1896 - when exit is dominated by block setting the flag, we know it will
1897 always be true. This is a common case.
1898 - when all blocks setting the flag have very low frequency we know
1899 it will likely be false.
1900 In all other cases we default to 2/3 for flag being true. */
1902 for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1903 it != flag_bbs->end (); ++it)
1905 if ((*it)->count.initialized_p ())
1906 count_sum += (*it)->count, ncount ++;
1907 if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1908 flag_probability = profile_probability::always ();
1909 nbbs++;
1912 profile_probability cap = profile_probability::always ().apply_scale (2, 3);
1914 if (flag_probability.initialized_p ())
1916 else if (ncount == nbbs
1917 && preheader->count () >= count_sum && preheader->count ().nonzero_p ())
1919 flag_probability = count_sum.probability_in (preheader->count ());
1920 if (flag_probability > cap)
1921 flag_probability = cap;
1924 if (!flag_probability.initialized_p ())
1925 flag_probability = cap;
1927 /* ?? Insert store after previous store if applicable. See note
1928 below. */
1929 if (prev_edges)
1930 ex = prev_edges->append_cond_position;
1932 loop_has_only_one_exit = single_pred_p (ex->dest);
1934 if (loop_has_only_one_exit)
1935 ex = split_block_after_labels (ex->dest);
1936 else
1938 for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1939 !gsi_end_p (gpi); gsi_next (&gpi))
1941 gphi *phi = gpi.phi ();
1942 if (virtual_operand_p (gimple_phi_result (phi)))
1943 continue;
1945 /* When the destination has a non-virtual PHI node with multiple
1946 predecessors make sure we preserve the PHI structure by
1947 forcing a forwarder block so that hoisting of that PHI will
1948 still work. */
1949 split_edge (ex);
1950 break;
1954 old_dest = ex->dest;
1955 new_bb = split_edge (ex);
1956 then_bb = create_empty_bb (new_bb);
1957 then_bb->count = new_bb->count.apply_probability (flag_probability);
1958 if (irr)
1959 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1960 add_bb_to_loop (then_bb, new_bb->loop_father);
1962 gsi = gsi_start_bb (new_bb);
1963 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1964 NULL_TREE, NULL_TREE);
1965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1967 gsi = gsi_start_bb (then_bb);
1968 /* Insert actual store. */
1969 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1970 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1972 edge e1 = single_succ_edge (new_bb);
1973 edge e2 = make_edge (new_bb, then_bb,
1974 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1975 e2->probability = flag_probability;
1977 e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1978 e1->flags &= ~EDGE_FALLTHRU;
1980 e1->probability = flag_probability.invert ();
1982 then_old_edge = make_single_succ_edge (then_bb, old_dest,
1983 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1985 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1987 if (prev_edges)
1989 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1990 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1991 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1992 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1993 recompute_dominator (CDI_DOMINATORS, old_dest));
1996 /* ?? Because stores may alias, they must happen in the exact
1997 sequence they originally happened. Save the position right after
1998 the (_lsm) store we just created so we can continue appending after
1999 it and maintain the original order. */
2001 struct prev_flag_edges *p;
2003 if (orig_ex->aux)
2004 orig_ex->aux = NULL;
2005 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2006 p = (struct prev_flag_edges *) orig_ex->aux;
2007 p->append_cond_position = then_old_edge;
2008 p->last_cond_fallthru = find_edge (new_bb, old_dest);
2009 orig_ex->aux = (void *) p;
2012 if (!loop_has_only_one_exit)
2013 for (gphi_iterator gpi = gsi_start_phis (old_dest);
2014 !gsi_end_p (gpi); gsi_next (&gpi))
2016 gphi *phi = gpi.phi ();
2017 unsigned i;
2019 for (i = 0; i < gimple_phi_num_args (phi); i++)
2020 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2022 tree arg = gimple_phi_arg_def (phi, i);
2023 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2024 update_stmt (phi);
2029 /* When REF is set on the location, set flag indicating the store. */
2031 class sm_set_flag_if_changed
2033 public:
2034 sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
2035 : flag (flag_), bbs (bbs_) {}
2036 bool operator () (mem_ref_loc *loc);
2037 tree flag;
2038 hash_set <basic_block> *bbs;
2041 bool
2042 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
2044 /* Only set the flag for writes. */
2045 if (is_gimple_assign (loc->stmt)
2046 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2048 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
2049 gimple *stmt = gimple_build_assign (flag, boolean_true_node);
2050 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2051 bbs->add (gimple_bb (stmt));
2053 return false;
2056 /* Helper function for execute_sm. On every location where REF is
2057 set, set an appropriate flag indicating the store. */
2059 static tree
2060 execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
2061 hash_set <basic_block> *bbs)
2063 tree flag;
2064 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
2065 flag = create_tmp_reg (boolean_type_node, str);
2066 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
2067 return flag;
2070 /* Executes store motion of memory reference REF from LOOP.
2071 Exits from the LOOP are stored in EXITS. The initialization of the
2072 temporary variable is put to the preheader of the loop, and assignments
2073 to the reference from the temporary variable are emitted to exits. */
2075 static void
2076 execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
2078 tree tmp_var, store_flag = NULL_TREE;
2079 unsigned i;
2080 gassign *load;
2081 struct fmt_data fmt_data;
2082 edge ex;
2083 struct lim_aux_data *lim_data;
2084 bool multi_threaded_model_p = false;
2085 gimple_stmt_iterator gsi;
2086 hash_set<basic_block> flag_bbs;
2088 if (dump_file && (dump_flags & TDF_DETAILS))
2090 fprintf (dump_file, "Executing store motion of ");
2091 print_generic_expr (dump_file, ref->mem.ref);
2092 fprintf (dump_file, " from loop %d\n", loop->num);
2095 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2096 get_lsm_tmp_name (ref->mem.ref, ~0));
2098 fmt_data.loop = loop;
2099 fmt_data.orig_loop = loop;
2100 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2102 if (bb_in_transaction (loop_preheader_edge (loop)->src)
2103 || (! flag_store_data_races
2104 && ! ref_always_accessed_p (loop, ref, true)))
2105 multi_threaded_model_p = true;
2107 if (multi_threaded_model_p)
2108 store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2110 rewrite_mem_refs (loop, ref, tmp_var);
2112 /* Emit the load code on a random exit edge or into the latch if
2113 the loop does not exit, so that we are sure it will be processed
2114 by move_computations after all dependencies. */
2115 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2117 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2118 load altogether, since the store is predicated by a flag. We
2119 could, do the load only if it was originally in the loop. */
2120 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2121 lim_data = init_lim_data (load);
2122 lim_data->max_loop = loop;
2123 lim_data->tgt_loop = loop;
2124 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2126 if (multi_threaded_model_p)
2128 load = gimple_build_assign (store_flag, boolean_false_node);
2129 lim_data = init_lim_data (load);
2130 lim_data->max_loop = loop;
2131 lim_data->tgt_loop = loop;
2132 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2135 /* Sink the store to every exit from the loop. */
2136 FOR_EACH_VEC_ELT (exits, i, ex)
2137 if (!multi_threaded_model_p)
2139 gassign *store;
2140 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2141 gsi_insert_on_edge (ex, store);
2143 else
2144 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2145 loop_preheader_edge (loop), &flag_bbs);
2148 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2149 edges of the LOOP. */
2151 static void
2152 hoist_memory_references (class loop *loop, bitmap mem_refs,
2153 vec<edge> exits)
2155 im_mem_ref *ref;
2156 unsigned i;
2157 bitmap_iterator bi;
2159 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2161 ref = memory_accesses.refs_list[i];
2162 execute_sm (loop, exits, ref);
2166 class ref_always_accessed
2168 public:
2169 ref_always_accessed (class loop *loop_, bool stored_p_)
2170 : loop (loop_), stored_p (stored_p_) {}
2171 bool operator () (mem_ref_loc *loc);
2172 class loop *loop;
2173 bool stored_p;
2176 bool
2177 ref_always_accessed::operator () (mem_ref_loc *loc)
2179 class loop *must_exec;
2181 if (!get_lim_data (loc->stmt))
2182 return false;
2184 /* If we require an always executed store make sure the statement
2185 stores to the reference. */
2186 if (stored_p)
2188 tree lhs = gimple_get_lhs (loc->stmt);
2189 if (!lhs
2190 || lhs != *loc->ref)
2191 return false;
2194 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2195 if (!must_exec)
2196 return false;
2198 if (must_exec == loop
2199 || flow_loop_nested_p (must_exec, loop))
2200 return true;
2202 return false;
2205 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2206 make sure REF is always stored to in LOOP. */
2208 static bool
2209 ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
2211 return for_all_locs_in_loop (loop, ref,
2212 ref_always_accessed (loop, stored_p));
2215 /* Returns true if REF1 and REF2 are independent. */
2217 static bool
2218 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2220 if (ref1 == ref2)
2221 return true;
2223 if (dump_file && (dump_flags & TDF_DETAILS))
2224 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2225 ref1->id, ref2->id);
2227 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2229 if (dump_file && (dump_flags & TDF_DETAILS))
2230 fprintf (dump_file, "dependent.\n");
2231 return false;
2233 else
2235 if (dump_file && (dump_flags & TDF_DETAILS))
2236 fprintf (dump_file, "independent.\n");
2237 return true;
2241 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2242 and its super-loops. */
2244 static void
2245 record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
2247 /* We can propagate dependent-in-loop bits up the loop
2248 hierarchy to all outer loops. */
2249 while (loop != current_loops->tree_root
2250 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2251 loop = loop_outer (loop);
2254 /* Returns true if REF is independent on all other memory
2255 references in LOOP. */
2257 static bool
2258 ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
2260 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2262 bool indep_p = true;
2263 bitmap refs_to_check;
2265 if (stored_p)
2266 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2267 else
2268 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2270 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2271 indep_p = false;
2272 else
2274 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2275 return true;
2276 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2277 return false;
2279 class loop *inner = loop->inner;
2280 while (inner)
2282 if (!ref_indep_loop_p_1 (inner, ref, stored_p))
2284 indep_p = false;
2285 break;
2287 inner = inner->next;
2290 if (indep_p)
2292 unsigned i;
2293 bitmap_iterator bi;
2294 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2296 im_mem_ref *aref = memory_accesses.refs_list[i];
2297 if (!refs_independent_p (ref, aref))
2299 indep_p = false;
2300 break;
2306 if (dump_file && (dump_flags & TDF_DETAILS))
2307 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2308 ref->id, loop->num, indep_p ? "independent" : "dependent");
2310 /* Record the computed result in the cache. */
2311 if (indep_p)
2313 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2314 && stored_p)
2316 /* If it's independend against all refs then it's independent
2317 against stores, too. */
2318 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2321 else
2323 record_dep_loop (loop, ref, stored_p);
2324 if (!stored_p)
2326 /* If it's dependent against stores it's dependent against
2327 all refs, too. */
2328 record_dep_loop (loop, ref, true);
2332 return indep_p;
2335 /* Returns true if REF is independent on all other memory references in
2336 LOOP. */
2338 static bool
2339 ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
2341 gcc_checking_assert (MEM_ANALYZABLE (ref));
2343 return ref_indep_loop_p_1 (loop, ref, false);
2346 /* Returns true if we can perform store motion of REF from LOOP. */
2348 static bool
2349 can_sm_ref_p (class loop *loop, im_mem_ref *ref)
2351 tree base;
2353 /* Can't hoist unanalyzable refs. */
2354 if (!MEM_ANALYZABLE (ref))
2355 return false;
2357 /* It should be movable. */
2358 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2359 || TREE_THIS_VOLATILE (ref->mem.ref)
2360 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2361 return false;
2363 /* If it can throw fail, we do not properly update EH info. */
2364 if (tree_could_throw_p (ref->mem.ref))
2365 return false;
2367 /* If it can trap, it must be always executed in LOOP.
2368 Readonly memory locations may trap when storing to them, but
2369 tree_could_trap_p is a predicate for rvalues, so check that
2370 explicitly. */
2371 base = get_base_address (ref->mem.ref);
2372 if ((tree_could_trap_p (ref->mem.ref)
2373 || (DECL_P (base) && TREE_READONLY (base)))
2374 && !ref_always_accessed_p (loop, ref, true))
2375 return false;
2377 /* And it must be independent on all other memory references
2378 in LOOP. */
2379 if (!ref_indep_loop_p (loop, ref))
2380 return false;
2382 return true;
2385 /* Marks the references in LOOP for that store motion should be performed
2386 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2387 motion was performed in one of the outer loops. */
2389 static void
2390 find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2392 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2393 unsigned i;
2394 bitmap_iterator bi;
2395 im_mem_ref *ref;
2397 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2399 ref = memory_accesses.refs_list[i];
2400 if (can_sm_ref_p (loop, ref))
2401 bitmap_set_bit (refs_to_sm, i);
2405 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2406 for a store motion optimization (i.e. whether we can insert statement
2407 on its exits). */
2409 static bool
2410 loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
2411 vec<edge> exits)
2413 unsigned i;
2414 edge ex;
2416 FOR_EACH_VEC_ELT (exits, i, ex)
2417 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2418 return false;
2420 return true;
2423 /* Try to perform store motion for all memory references modified inside
2424 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2425 store motion was executed in one of the outer loops. */
2427 static void
2428 store_motion_loop (class loop *loop, bitmap sm_executed)
2430 vec<edge> exits = get_loop_exit_edges (loop);
2431 class loop *subloop;
2432 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2434 if (loop_suitable_for_sm (loop, exits))
2436 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2437 hoist_memory_references (loop, sm_in_loop, exits);
2439 exits.release ();
2441 bitmap_ior_into (sm_executed, sm_in_loop);
2442 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2443 store_motion_loop (subloop, sm_executed);
2444 bitmap_and_compl_into (sm_executed, sm_in_loop);
2445 BITMAP_FREE (sm_in_loop);
2448 /* Try to perform store motion for all memory references modified inside
2449 loops. */
2451 static void
2452 store_motion (void)
2454 class loop *loop;
2455 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2457 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2458 store_motion_loop (loop, sm_executed);
2460 BITMAP_FREE (sm_executed);
2461 gsi_commit_edge_inserts ();
2464 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2465 for each such basic block bb records the outermost loop for that execution
2466 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2467 blocks that contain a nonpure call. */
2469 static void
2470 fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
2472 basic_block bb = NULL, *bbs, last = NULL;
2473 unsigned i;
2474 edge e;
2475 class loop *inn_loop = loop;
2477 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2479 bbs = get_loop_body_in_dom_order (loop);
2481 for (i = 0; i < loop->num_nodes; i++)
2483 edge_iterator ei;
2484 bb = bbs[i];
2486 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2487 last = bb;
2489 if (bitmap_bit_p (contains_call, bb->index))
2490 break;
2492 FOR_EACH_EDGE (e, ei, bb->succs)
2494 /* If there is an exit from this BB. */
2495 if (!flow_bb_inside_loop_p (loop, e->dest))
2496 break;
2497 /* Or we enter a possibly non-finite loop. */
2498 if (flow_loop_nested_p (bb->loop_father,
2499 e->dest->loop_father)
2500 && ! finite_loop_p (e->dest->loop_father))
2501 break;
2503 if (e)
2504 break;
2506 /* A loop might be infinite (TODO use simple loop analysis
2507 to disprove this if possible). */
2508 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2509 break;
2511 if (!flow_bb_inside_loop_p (inn_loop, bb))
2512 break;
2514 if (bb->loop_father->header == bb)
2516 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2517 break;
2519 /* In a loop that is always entered we may proceed anyway.
2520 But record that we entered it and stop once we leave it. */
2521 inn_loop = bb->loop_father;
2525 while (1)
2527 SET_ALWAYS_EXECUTED_IN (last, loop);
2528 if (last == loop->header)
2529 break;
2530 last = get_immediate_dominator (CDI_DOMINATORS, last);
2533 free (bbs);
2536 for (loop = loop->inner; loop; loop = loop->next)
2537 fill_always_executed_in_1 (loop, contains_call);
2540 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2541 for each such basic block bb records the outermost loop for that execution
2542 of its header implies execution of bb. */
2544 static void
2545 fill_always_executed_in (void)
2547 basic_block bb;
2548 class loop *loop;
2550 auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2551 bitmap_clear (contains_call);
2552 FOR_EACH_BB_FN (bb, cfun)
2554 gimple_stmt_iterator gsi;
2555 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2557 if (nonpure_call_p (gsi_stmt (gsi)))
2558 break;
2561 if (!gsi_end_p (gsi))
2562 bitmap_set_bit (contains_call, bb->index);
2565 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2566 fill_always_executed_in_1 (loop, contains_call);
2570 /* Compute the global information needed by the loop invariant motion pass. */
2572 static void
2573 tree_ssa_lim_initialize (void)
2575 class loop *loop;
2576 unsigned i;
2578 bitmap_obstack_initialize (&lim_bitmap_obstack);
2579 gcc_obstack_init (&mem_ref_obstack);
2580 lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2582 if (flag_tm)
2583 compute_transaction_bits ();
2585 alloc_aux_for_edges (0);
2587 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2588 memory_accesses.refs_list.create (100);
2589 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2590 memory_accesses.refs_list.quick_push
2591 (mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
2593 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2594 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2595 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2596 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2597 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2598 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2600 for (i = 0; i < number_of_loops (cfun); i++)
2602 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2603 &lim_bitmap_obstack);
2604 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2605 &lim_bitmap_obstack);
2606 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2607 &lim_bitmap_obstack);
2610 memory_accesses.ttae_cache = NULL;
2612 /* Initialize bb_loop_postorder with a mapping from loop->num to
2613 its postorder index. */
2614 i = 0;
2615 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2616 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2617 bb_loop_postorder[loop->num] = i++;
2620 /* Cleans up after the invariant motion pass. */
2622 static void
2623 tree_ssa_lim_finalize (void)
2625 basic_block bb;
2626 unsigned i;
2627 im_mem_ref *ref;
2629 free_aux_for_edges ();
2631 FOR_EACH_BB_FN (bb, cfun)
2632 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2634 bitmap_obstack_release (&lim_bitmap_obstack);
2635 delete lim_aux_data_map;
2637 delete memory_accesses.refs;
2638 memory_accesses.refs = NULL;
2640 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2641 memref_free (ref);
2642 memory_accesses.refs_list.release ();
2643 obstack_free (&mem_ref_obstack, NULL);
2645 memory_accesses.refs_in_loop.release ();
2646 memory_accesses.refs_stored_in_loop.release ();
2647 memory_accesses.all_refs_stored_in_loop.release ();
2649 if (memory_accesses.ttae_cache)
2650 free_affine_expand_cache (&memory_accesses.ttae_cache);
2652 free (bb_loop_postorder);
2655 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2656 i.e. those that are likely to be win regardless of the register pressure. */
2658 static unsigned int
2659 tree_ssa_lim (void)
2661 unsigned int todo;
2663 tree_ssa_lim_initialize ();
2665 /* Gathers information about memory accesses in the loops. */
2666 analyze_memory_references ();
2668 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2669 fill_always_executed_in ();
2671 /* For each statement determine the outermost loop in that it is
2672 invariant and cost for computing the invariant. */
2673 invariantness_dom_walker (CDI_DOMINATORS)
2674 .walk (cfun->cfg->x_entry_block_ptr);
2676 /* Execute store motion. Force the necessary invariants to be moved
2677 out of the loops as well. */
2678 store_motion ();
2680 /* Move the expressions that are expensive enough. */
2681 todo = move_computations ();
2683 tree_ssa_lim_finalize ();
2685 return todo;
2688 /* Loop invariant motion pass. */
2690 namespace {
2692 const pass_data pass_data_lim =
2694 GIMPLE_PASS, /* type */
2695 "lim", /* name */
2696 OPTGROUP_LOOP, /* optinfo_flags */
2697 TV_LIM, /* tv_id */
2698 PROP_cfg, /* properties_required */
2699 0, /* properties_provided */
2700 0, /* properties_destroyed */
2701 0, /* todo_flags_start */
2702 0, /* todo_flags_finish */
2705 class pass_lim : public gimple_opt_pass
2707 public:
2708 pass_lim (gcc::context *ctxt)
2709 : gimple_opt_pass (pass_data_lim, ctxt)
2712 /* opt_pass methods: */
2713 opt_pass * clone () { return new pass_lim (m_ctxt); }
2714 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2715 virtual unsigned int execute (function *);
2717 }; // class pass_lim
2719 unsigned int
2720 pass_lim::execute (function *fun)
2722 bool in_loop_pipeline = scev_initialized_p ();
2723 if (!in_loop_pipeline)
2724 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2726 if (number_of_loops (fun) <= 1)
2727 return 0;
2728 unsigned int todo = tree_ssa_lim ();
2730 if (!in_loop_pipeline)
2731 loop_optimizer_finalize ();
2732 else
2733 scev_reset ();
2734 return todo;
2737 } // anon namespace
2739 gimple_opt_pass *
2740 make_pass_lim (gcc::context *ctxt)
2742 return new pass_lim (ctxt);