Concretize gimple_call_set_fntype
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob07823caf0c653cd56e513fdc176ddc8c20ba50ea
1 /* Loop invariant motion.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "hash-map.h"
29 #include "hash-table.h"
30 #include "tree-ssa-alias.h"
31 #include "internal-fn.h"
32 #include "tree-eh.h"
33 #include "gimple-expr.h"
34 #include "is-a.h"
35 #include "gimple.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "gimple-ssa.h"
39 #include "tree-cfg.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "stringpool.h"
43 #include "tree-ssanames.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "tree-ssa-loop.h"
46 #include "tree-into-ssa.h"
47 #include "cfgloop.h"
48 #include "domwalk.h"
49 #include "params.h"
50 #include "tree-pass.h"
51 #include "flags.h"
52 #include "tree-affine.h"
53 #include "tree-ssa-propagate.h"
54 #include "trans-mem.h"
55 #include "gimple-fold.h"
57 /* TODO: Support for predicated code motion. I.e.
59 while (1)
61 if (cond)
63 a = inv;
64 something;
68 Where COND and INV are invariants, but evaluating INV may trap or be
69 invalid from some other reason if !COND. This may be transformed to
71 if (cond)
72 a = inv;
73 while (1)
75 if (cond)
76 something;
77 } */
79 /* The auxiliary data kept for each statement. */
81 struct lim_aux_data
83 struct loop *max_loop; /* The outermost loop in that the statement
84 is invariant. */
86 struct loop *tgt_loop; /* The loop out of that we want to move the
87 invariant. */
89 struct loop *always_executed_in;
90 /* The outermost loop for that we are sure
91 the statement is executed if the loop
92 is entered. */
94 unsigned cost; /* Cost of the computation performed by the
95 statement. */
97 vec<gimple> depends; /* Vector of statements that must be also
98 hoisted out of the loop when this statement
99 is hoisted; i.e. those that define the
100 operands of the statement and are inside of
101 the MAX_LOOP loop. */
104 /* Maps statements to their lim_aux_data. */
106 static hash_map<gimple, lim_aux_data *> *lim_aux_data_map;
108 /* Description of a memory reference location. */
110 typedef struct mem_ref_loc
112 tree *ref; /* The reference itself. */
113 gimple stmt; /* The statement in that it occurs. */
114 } *mem_ref_loc_p;
117 /* Description of a memory reference. */
119 typedef struct im_mem_ref
121 unsigned id; /* ID assigned to the memory reference
122 (its index in memory_accesses.refs_list) */
123 hashval_t hash; /* Its hash value. */
125 /* The memory access itself and associated caching of alias-oracle
126 query meta-data. */
127 ao_ref mem;
129 bitmap stored; /* The set of loops in that this memory location
130 is stored to. */
131 vec<mem_ref_loc> accesses_in_loop;
132 /* The locations of the accesses. Vector
133 indexed by the loop number. */
135 /* The following sets are computed on demand. We keep both set and
136 its complement, so that we know whether the information was
137 already computed or not. */
138 bitmap_head indep_loop; /* The set of loops in that the memory
139 reference is independent, meaning:
140 If it is stored in the loop, this store
141 is independent on all other loads and
142 stores.
143 If it is only loaded, then it is independent
144 on all stores in the loop. */
145 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
146 } *mem_ref_p;
148 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
149 to record (in)dependence against stores in the loop and its subloops, the
150 second to record (in)dependence against all references in the loop
151 and its subloops. */
152 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
154 /* Mem_ref hashtable helpers. */
156 struct mem_ref_hasher : typed_noop_remove <im_mem_ref>
158 typedef im_mem_ref value_type;
159 typedef tree_node compare_type;
160 static inline hashval_t hash (const value_type *);
161 static inline bool equal (const value_type *, const compare_type *);
164 /* A hash function for struct im_mem_ref object OBJ. */
166 inline hashval_t
167 mem_ref_hasher::hash (const value_type *mem)
169 return mem->hash;
172 /* An equality function for struct im_mem_ref object MEM1 with
173 memory reference OBJ2. */
175 inline bool
176 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
178 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
182 /* Description of memory accesses in loops. */
184 static struct
186 /* The hash table of memory references accessed in loops. */
187 hash_table<mem_ref_hasher> *refs;
189 /* The list of memory references. */
190 vec<mem_ref_p> refs_list;
192 /* The set of memory references accessed in each loop. */
193 vec<bitmap_head> refs_in_loop;
195 /* The set of memory references stored in each loop. */
196 vec<bitmap_head> refs_stored_in_loop;
198 /* The set of memory references stored in each loop, including subloops . */
199 vec<bitmap_head> all_refs_stored_in_loop;
201 /* Cache for expanding memory addresses. */
202 hash_map<tree, name_expansion *> *ttae_cache;
203 } memory_accesses;
205 /* Obstack for the bitmaps in the above data structures. */
206 static bitmap_obstack lim_bitmap_obstack;
207 static obstack mem_ref_obstack;
209 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
211 /* Minimum cost of an expensive expression. */
212 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
214 /* The outermost loop for which execution of the header guarantees that the
215 block will be executed. */
216 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
217 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
219 /* ID of the shared unanalyzable mem. */
220 #define UNANALYZABLE_MEM_ID 0
222 /* Whether the reference was analyzable. */
223 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
225 static struct lim_aux_data *
226 init_lim_data (gimple stmt)
228 lim_aux_data *p = XCNEW (struct lim_aux_data);
229 lim_aux_data_map->put (stmt, p);
231 return p;
234 static struct lim_aux_data *
235 get_lim_data (gimple stmt)
237 lim_aux_data **p = lim_aux_data_map->get (stmt);
238 if (!p)
239 return NULL;
241 return *p;
244 /* Releases the memory occupied by DATA. */
246 static void
247 free_lim_aux_data (struct lim_aux_data *data)
249 data->depends.release ();
250 free (data);
253 static void
254 clear_lim_data (gimple stmt)
256 lim_aux_data **p = lim_aux_data_map->get (stmt);
257 if (!p)
258 return;
260 free_lim_aux_data (*p);
261 *p = NULL;
265 /* The possibilities of statement movement. */
266 enum move_pos
268 MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */
269 MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement
270 become executed -- memory accesses, ... */
271 MOVE_POSSIBLE /* Unlimited movement. */
275 /* If it is possible to hoist the statement STMT unconditionally,
276 returns MOVE_POSSIBLE.
277 If it is possible to hoist the statement STMT, but we must avoid making
278 it executed if it would not be executed in the original program (e.g.
279 because it may trap), return MOVE_PRESERVE_EXECUTION.
280 Otherwise return MOVE_IMPOSSIBLE. */
282 enum move_pos
283 movement_possibility (gimple stmt)
285 tree lhs;
286 enum move_pos ret = MOVE_POSSIBLE;
288 if (flag_unswitch_loops
289 && gimple_code (stmt) == GIMPLE_COND)
291 /* If we perform unswitching, force the operands of the invariant
292 condition to be moved out of the loop. */
293 return MOVE_POSSIBLE;
296 if (gimple_code (stmt) == GIMPLE_PHI
297 && gimple_phi_num_args (stmt) <= 2
298 && !virtual_operand_p (gimple_phi_result (stmt))
299 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
300 return MOVE_POSSIBLE;
302 if (gimple_get_lhs (stmt) == NULL_TREE)
303 return MOVE_IMPOSSIBLE;
305 if (gimple_vdef (stmt))
306 return MOVE_IMPOSSIBLE;
308 if (stmt_ends_bb_p (stmt)
309 || gimple_has_volatile_ops (stmt)
310 || gimple_has_side_effects (stmt)
311 || stmt_could_throw_p (stmt))
312 return MOVE_IMPOSSIBLE;
314 if (is_gimple_call (stmt))
316 /* While pure or const call is guaranteed to have no side effects, we
317 cannot move it arbitrarily. Consider code like
319 char *s = something ();
321 while (1)
323 if (s)
324 t = strlen (s);
325 else
326 t = 0;
329 Here the strlen call cannot be moved out of the loop, even though
330 s is invariant. In addition to possibly creating a call with
331 invalid arguments, moving out a function call that is not executed
332 may cause performance regressions in case the call is costly and
333 not executed at all. */
334 ret = MOVE_PRESERVE_EXECUTION;
335 lhs = gimple_call_lhs (stmt);
337 else if (is_gimple_assign (stmt))
338 lhs = gimple_assign_lhs (stmt);
339 else
340 return MOVE_IMPOSSIBLE;
342 if (TREE_CODE (lhs) == SSA_NAME
343 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
344 return MOVE_IMPOSSIBLE;
346 if (TREE_CODE (lhs) != SSA_NAME
347 || gimple_could_trap_p (stmt))
348 return MOVE_PRESERVE_EXECUTION;
350 /* Non local loads in a transaction cannot be hoisted out. Well,
351 unless the load happens on every path out of the loop, but we
352 don't take this into account yet. */
353 if (flag_tm
354 && gimple_in_transaction (stmt)
355 && gimple_assign_single_p (stmt))
357 tree rhs = gimple_assign_rhs1 (stmt);
358 if (DECL_P (rhs) && is_global_var (rhs))
360 if (dump_file)
362 fprintf (dump_file, "Cannot hoist conditional load of ");
363 print_generic_expr (dump_file, rhs, TDF_SLIM);
364 fprintf (dump_file, " because it is in a transaction.\n");
366 return MOVE_IMPOSSIBLE;
370 return ret;
373 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
374 loop to that we could move the expression using DEF if it did not have
375 other operands, i.e. the outermost loop enclosing LOOP in that the value
376 of DEF is invariant. */
378 static struct loop *
379 outermost_invariant_loop (tree def, struct loop *loop)
381 gimple def_stmt;
382 basic_block def_bb;
383 struct loop *max_loop;
384 struct lim_aux_data *lim_data;
386 if (!def)
387 return superloop_at_depth (loop, 1);
389 if (TREE_CODE (def) != SSA_NAME)
391 gcc_assert (is_gimple_min_invariant (def));
392 return superloop_at_depth (loop, 1);
395 def_stmt = SSA_NAME_DEF_STMT (def);
396 def_bb = gimple_bb (def_stmt);
397 if (!def_bb)
398 return superloop_at_depth (loop, 1);
400 max_loop = find_common_loop (loop, def_bb->loop_father);
402 lim_data = get_lim_data (def_stmt);
403 if (lim_data != NULL && lim_data->max_loop != NULL)
404 max_loop = find_common_loop (max_loop,
405 loop_outer (lim_data->max_loop));
406 if (max_loop == loop)
407 return NULL;
408 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
410 return max_loop;
413 /* DATA is a structure containing information associated with a statement
414 inside LOOP. DEF is one of the operands of this statement.
416 Find the outermost loop enclosing LOOP in that value of DEF is invariant
417 and record this in DATA->max_loop field. If DEF itself is defined inside
418 this loop as well (i.e. we need to hoist it out of the loop if we want
419 to hoist the statement represented by DATA), record the statement in that
420 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
421 add the cost of the computation of DEF to the DATA->cost.
423 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
425 static bool
426 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
427 bool add_cost)
429 gimple def_stmt = SSA_NAME_DEF_STMT (def);
430 basic_block def_bb = gimple_bb (def_stmt);
431 struct loop *max_loop;
432 struct lim_aux_data *def_data;
434 if (!def_bb)
435 return true;
437 max_loop = outermost_invariant_loop (def, loop);
438 if (!max_loop)
439 return false;
441 if (flow_loop_nested_p (data->max_loop, max_loop))
442 data->max_loop = max_loop;
444 def_data = get_lim_data (def_stmt);
445 if (!def_data)
446 return true;
448 if (add_cost
449 /* Only add the cost if the statement defining DEF is inside LOOP,
450 i.e. if it is likely that by moving the invariants dependent
451 on it, we will be able to avoid creating a new register for
452 it (since it will be only used in these dependent invariants). */
453 && def_bb->loop_father == loop)
454 data->cost += def_data->cost;
456 data->depends.safe_push (def_stmt);
458 return true;
461 /* Returns an estimate for a cost of statement STMT. The values here
462 are just ad-hoc constants, similar to costs for inlining. */
464 static unsigned
465 stmt_cost (gimple stmt)
467 /* Always try to create possibilities for unswitching. */
468 if (gimple_code (stmt) == GIMPLE_COND
469 || gimple_code (stmt) == GIMPLE_PHI)
470 return LIM_EXPENSIVE;
472 /* We should be hoisting calls if possible. */
473 if (is_gimple_call (stmt))
475 tree fndecl;
477 /* Unless the call is a builtin_constant_p; this always folds to a
478 constant, so moving it is useless. */
479 fndecl = gimple_call_fndecl (stmt);
480 if (fndecl
481 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
482 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
483 return 0;
485 return LIM_EXPENSIVE;
488 /* Hoisting memory references out should almost surely be a win. */
489 if (gimple_references_memory_p (stmt))
490 return LIM_EXPENSIVE;
492 if (gimple_code (stmt) != GIMPLE_ASSIGN)
493 return 1;
495 switch (gimple_assign_rhs_code (stmt))
497 case MULT_EXPR:
498 case WIDEN_MULT_EXPR:
499 case WIDEN_MULT_PLUS_EXPR:
500 case WIDEN_MULT_MINUS_EXPR:
501 case DOT_PROD_EXPR:
502 case FMA_EXPR:
503 case TRUNC_DIV_EXPR:
504 case CEIL_DIV_EXPR:
505 case FLOOR_DIV_EXPR:
506 case ROUND_DIV_EXPR:
507 case EXACT_DIV_EXPR:
508 case CEIL_MOD_EXPR:
509 case FLOOR_MOD_EXPR:
510 case ROUND_MOD_EXPR:
511 case TRUNC_MOD_EXPR:
512 case RDIV_EXPR:
513 /* Division and multiplication are usually expensive. */
514 return LIM_EXPENSIVE;
516 case LSHIFT_EXPR:
517 case RSHIFT_EXPR:
518 case WIDEN_LSHIFT_EXPR:
519 case LROTATE_EXPR:
520 case RROTATE_EXPR:
521 /* Shifts and rotates are usually expensive. */
522 return LIM_EXPENSIVE;
524 case CONSTRUCTOR:
525 /* Make vector construction cost proportional to the number
526 of elements. */
527 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
529 case SSA_NAME:
530 case PAREN_EXPR:
531 /* Whether or not something is wrapped inside a PAREN_EXPR
532 should not change move cost. Nor should an intermediate
533 unpropagated SSA name copy. */
534 return 0;
536 default:
537 return 1;
541 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
542 REF is independent. If REF is not independent in LOOP, NULL is returned
543 instead. */
545 static struct loop *
546 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
548 struct loop *aloop;
550 if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
551 return NULL;
553 for (aloop = outer;
554 aloop != loop;
555 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
556 if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
557 && ref_indep_loop_p (aloop, ref))
558 return aloop;
560 if (ref_indep_loop_p (loop, ref))
561 return loop;
562 else
563 return NULL;
566 /* If there is a simple load or store to a memory reference in STMT, returns
567 the location of the memory reference, and sets IS_STORE according to whether
568 it is a store or load. Otherwise, returns NULL. */
570 static tree *
571 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
573 tree *lhs, *rhs;
575 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
576 if (!gimple_assign_single_p (stmt))
577 return NULL;
579 lhs = gimple_assign_lhs_ptr (stmt);
580 rhs = gimple_assign_rhs1_ptr (stmt);
582 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
584 *is_store = false;
585 return rhs;
587 else if (gimple_vdef (stmt)
588 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
590 *is_store = true;
591 return lhs;
593 else
594 return NULL;
597 /* Returns the memory reference contained in STMT. */
599 static mem_ref_p
600 mem_ref_in_stmt (gimple stmt)
602 bool store;
603 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
604 hashval_t hash;
605 mem_ref_p ref;
607 if (!mem)
608 return NULL;
609 gcc_assert (!store);
611 hash = iterative_hash_expr (*mem, 0);
612 ref = memory_accesses.refs->find_with_hash (*mem, hash);
614 gcc_assert (ref != NULL);
615 return ref;
618 /* From a controlling predicate in DOM determine the arguments from
619 the PHI node PHI that are chosen if the predicate evaluates to
620 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
621 they are non-NULL. Returns true if the arguments can be determined,
622 else return false. */
624 static bool
625 extract_true_false_args_from_phi (basic_block dom, gimple phi,
626 tree *true_arg_p, tree *false_arg_p)
628 basic_block bb = gimple_bb (phi);
629 edge true_edge, false_edge, tem;
630 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
632 /* We have to verify that one edge into the PHI node is dominated
633 by the true edge of the predicate block and the other edge
634 dominated by the false edge. This ensures that the PHI argument
635 we are going to take is completely determined by the path we
636 take from the predicate block.
637 We can only use BB dominance checks below if the destination of
638 the true/false edges are dominated by their edge, thus only
639 have a single predecessor. */
640 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
641 tem = EDGE_PRED (bb, 0);
642 if (tem == true_edge
643 || (single_pred_p (true_edge->dest)
644 && (tem->src == true_edge->dest
645 || dominated_by_p (CDI_DOMINATORS,
646 tem->src, true_edge->dest))))
647 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
648 else if (tem == false_edge
649 || (single_pred_p (false_edge->dest)
650 && (tem->src == false_edge->dest
651 || dominated_by_p (CDI_DOMINATORS,
652 tem->src, false_edge->dest))))
653 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
654 else
655 return false;
656 tem = EDGE_PRED (bb, 1);
657 if (tem == true_edge
658 || (single_pred_p (true_edge->dest)
659 && (tem->src == true_edge->dest
660 || dominated_by_p (CDI_DOMINATORS,
661 tem->src, true_edge->dest))))
662 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
663 else if (tem == false_edge
664 || (single_pred_p (false_edge->dest)
665 && (tem->src == false_edge->dest
666 || dominated_by_p (CDI_DOMINATORS,
667 tem->src, false_edge->dest))))
668 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
669 else
670 return false;
671 if (!arg0 || !arg1)
672 return false;
674 if (true_arg_p)
675 *true_arg_p = arg0;
676 if (false_arg_p)
677 *false_arg_p = arg1;
679 return true;
682 /* Determine the outermost loop to that it is possible to hoist a statement
683 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
684 the outermost loop in that the value computed by STMT is invariant.
685 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
686 we preserve the fact whether STMT is executed. It also fills other related
687 information to LIM_DATA (STMT).
689 The function returns false if STMT cannot be hoisted outside of the loop it
690 is defined in, and true otherwise. */
692 static bool
693 determine_max_movement (gimple stmt, bool must_preserve_exec)
695 basic_block bb = gimple_bb (stmt);
696 struct loop *loop = bb->loop_father;
697 struct loop *level;
698 struct lim_aux_data *lim_data = get_lim_data (stmt);
699 tree val;
700 ssa_op_iter iter;
702 if (must_preserve_exec)
703 level = ALWAYS_EXECUTED_IN (bb);
704 else
705 level = superloop_at_depth (loop, 1);
706 lim_data->max_loop = level;
708 if (gimple_code (stmt) == GIMPLE_PHI)
710 use_operand_p use_p;
711 unsigned min_cost = UINT_MAX;
712 unsigned total_cost = 0;
713 struct lim_aux_data *def_data;
715 /* We will end up promoting dependencies to be unconditionally
716 evaluated. For this reason the PHI cost (and thus the
717 cost we remove from the loop by doing the invariant motion)
718 is that of the cheapest PHI argument dependency chain. */
719 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
721 val = USE_FROM_PTR (use_p);
723 if (TREE_CODE (val) != SSA_NAME)
725 /* Assign const 1 to constants. */
726 min_cost = MIN (min_cost, 1);
727 total_cost += 1;
728 continue;
730 if (!add_dependency (val, lim_data, loop, false))
731 return false;
733 gimple def_stmt = SSA_NAME_DEF_STMT (val);
734 if (gimple_bb (def_stmt)
735 && gimple_bb (def_stmt)->loop_father == loop)
737 def_data = get_lim_data (def_stmt);
738 if (def_data)
740 min_cost = MIN (min_cost, def_data->cost);
741 total_cost += def_data->cost;
746 min_cost = MIN (min_cost, total_cost);
747 lim_data->cost += min_cost;
749 if (gimple_phi_num_args (stmt) > 1)
751 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
752 gimple cond;
753 if (gsi_end_p (gsi_last_bb (dom)))
754 return false;
755 cond = gsi_stmt (gsi_last_bb (dom));
756 if (gimple_code (cond) != GIMPLE_COND)
757 return false;
758 /* Verify that this is an extended form of a diamond and
759 the PHI arguments are completely controlled by the
760 predicate in DOM. */
761 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
762 return false;
764 /* Fold in dependencies and cost of the condition. */
765 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
767 if (!add_dependency (val, lim_data, loop, false))
768 return false;
769 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
770 if (def_data)
771 total_cost += def_data->cost;
774 /* We want to avoid unconditionally executing very expensive
775 operations. As costs for our dependencies cannot be
776 negative just claim we are not invariand for this case.
777 We also are not sure whether the control-flow inside the
778 loop will vanish. */
779 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
780 && !(min_cost != 0
781 && total_cost / min_cost <= 2))
782 return false;
784 /* Assume that the control-flow in the loop will vanish.
785 ??? We should verify this and not artificially increase
786 the cost if that is not the case. */
787 lim_data->cost += stmt_cost (stmt);
790 return true;
792 else
793 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
794 if (!add_dependency (val, lim_data, loop, true))
795 return false;
797 if (gimple_vuse (stmt))
799 mem_ref_p ref = mem_ref_in_stmt (stmt);
801 if (ref)
803 lim_data->max_loop
804 = outermost_indep_loop (lim_data->max_loop, loop, ref);
805 if (!lim_data->max_loop)
806 return false;
808 else
810 if ((val = gimple_vuse (stmt)) != NULL_TREE)
812 if (!add_dependency (val, lim_data, loop, false))
813 return false;
818 lim_data->cost += stmt_cost (stmt);
820 return true;
823 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
824 and that one of the operands of this statement is computed by STMT.
825 Ensure that STMT (together with all the statements that define its
826 operands) is hoisted at least out of the loop LEVEL. */
828 static void
829 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
831 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
832 struct lim_aux_data *lim_data;
833 gimple dep_stmt;
834 unsigned i;
836 stmt_loop = find_common_loop (orig_loop, stmt_loop);
837 lim_data = get_lim_data (stmt);
838 if (lim_data != NULL && lim_data->tgt_loop != NULL)
839 stmt_loop = find_common_loop (stmt_loop,
840 loop_outer (lim_data->tgt_loop));
841 if (flow_loop_nested_p (stmt_loop, level))
842 return;
844 gcc_assert (level == lim_data->max_loop
845 || flow_loop_nested_p (lim_data->max_loop, level));
847 lim_data->tgt_loop = level;
848 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
849 set_level (dep_stmt, orig_loop, level);
852 /* Determines an outermost loop from that we want to hoist the statement STMT.
853 For now we chose the outermost possible loop. TODO -- use profiling
854 information to set it more sanely. */
856 static void
857 set_profitable_level (gimple stmt)
859 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
862 /* Returns true if STMT is a call that has side effects. */
864 static bool
865 nonpure_call_p (gimple stmt)
867 if (gimple_code (stmt) != GIMPLE_CALL)
868 return false;
870 return gimple_has_side_effects (stmt);
873 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
875 static gimple
876 rewrite_reciprocal (gimple_stmt_iterator *bsi)
878 gimple_assign stmt, stmt1, stmt2;
879 tree name, lhs, type;
880 tree real_one;
881 gimple_stmt_iterator gsi;
883 stmt = as_a <gimple_assign> (gsi_stmt (*bsi));
884 lhs = gimple_assign_lhs (stmt);
885 type = TREE_TYPE (lhs);
887 real_one = build_one_cst (type);
889 name = make_temp_ssa_name (type, NULL, "reciptmp");
890 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
891 gimple_assign_rhs2 (stmt));
893 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
894 gimple_assign_rhs1 (stmt));
896 /* Replace division stmt with reciprocal and multiply stmts.
897 The multiply stmt is not invariant, so update iterator
898 and avoid rescanning. */
899 gsi = *bsi;
900 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
901 gsi_replace (&gsi, stmt2, true);
903 /* Continue processing with invariant reciprocal statement. */
904 return stmt1;
907 /* Check if the pattern at *BSI is a bittest of the form
908 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
910 static gimple
911 rewrite_bittest (gimple_stmt_iterator *bsi)
913 gimple_assign stmt;
914 gimple stmt1;
915 gimple_assign stmt2;
916 gimple use_stmt;
917 gimple_cond cond_stmt;
918 tree lhs, name, t, a, b;
919 use_operand_p use;
921 stmt = as_a <gimple_assign> (gsi_stmt (*bsi));
922 lhs = gimple_assign_lhs (stmt);
924 /* Verify that the single use of lhs is a comparison against zero. */
925 if (TREE_CODE (lhs) != SSA_NAME
926 || !single_imm_use (lhs, &use, &use_stmt))
927 return stmt;
928 cond_stmt = dyn_cast <gimple_cond> (use_stmt);
929 if (!cond_stmt)
930 return stmt;
931 if (gimple_cond_lhs (cond_stmt) != lhs
932 || (gimple_cond_code (cond_stmt) != NE_EXPR
933 && gimple_cond_code (cond_stmt) != EQ_EXPR)
934 || !integer_zerop (gimple_cond_rhs (cond_stmt)))
935 return stmt;
937 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
938 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
939 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
940 return stmt;
942 /* There is a conversion in between possibly inserted by fold. */
943 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
945 t = gimple_assign_rhs1 (stmt1);
946 if (TREE_CODE (t) != SSA_NAME
947 || !has_single_use (t))
948 return stmt;
949 stmt1 = SSA_NAME_DEF_STMT (t);
950 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
951 return stmt;
954 /* Verify that B is loop invariant but A is not. Verify that with
955 all the stmt walking we are still in the same loop. */
956 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
957 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
958 return stmt;
960 a = gimple_assign_rhs1 (stmt1);
961 b = gimple_assign_rhs2 (stmt1);
963 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
964 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
966 gimple_stmt_iterator rsi;
968 /* 1 << B */
969 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
970 build_int_cst (TREE_TYPE (a), 1), b);
971 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
972 stmt1 = gimple_build_assign (name, t);
974 /* A & (1 << B) */
975 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
976 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
977 stmt2 = gimple_build_assign (name, t);
979 /* Replace the SSA_NAME we compare against zero. Adjust
980 the type of zero accordingly. */
981 SET_USE (use, name);
982 gimple_cond_set_rhs (cond_stmt,
983 build_int_cst_type (TREE_TYPE (name),
984 0));
986 /* Don't use gsi_replace here, none of the new assignments sets
987 the variable originally set in stmt. Move bsi to stmt1, and
988 then remove the original stmt, so that we get a chance to
989 retain debug info for it. */
990 rsi = *bsi;
991 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
992 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
993 gsi_remove (&rsi, true);
995 return stmt1;
998 return stmt;
1001 /* For each statement determines the outermost loop in that it is invariant,
1002 - statements on whose motion it depends and the cost of the computation.
1003 - This information is stored to the LIM_DATA structure associated with
1004 - each statement. */
1005 class invariantness_dom_walker : public dom_walker
1007 public:
1008 invariantness_dom_walker (cdi_direction direction)
1009 : dom_walker (direction) {}
1011 virtual void before_dom_children (basic_block);
1014 /* Determine the outermost loops in that statements in basic block BB are
1015 invariant, and record them to the LIM_DATA associated with the statements.
1016 Callback for dom_walker. */
1018 void
1019 invariantness_dom_walker::before_dom_children (basic_block bb)
1021 enum move_pos pos;
1022 gimple_stmt_iterator bsi;
1023 gimple stmt;
1024 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1025 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1026 struct lim_aux_data *lim_data;
1028 if (!loop_outer (bb->loop_father))
1029 return;
1031 if (dump_file && (dump_flags & TDF_DETAILS))
1032 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1033 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1035 /* Look at PHI nodes, but only if there is at most two.
1036 ??? We could relax this further by post-processing the inserted
1037 code and transforming adjacent cond-exprs with the same predicate
1038 to control flow again. */
1039 bsi = gsi_start_phis (bb);
1040 if (!gsi_end_p (bsi)
1041 && ((gsi_next (&bsi), gsi_end_p (bsi))
1042 || (gsi_next (&bsi), gsi_end_p (bsi))))
1043 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1045 stmt = gsi_stmt (bsi);
1047 pos = movement_possibility (stmt);
1048 if (pos == MOVE_IMPOSSIBLE)
1049 continue;
1051 lim_data = init_lim_data (stmt);
1052 lim_data->always_executed_in = outermost;
1054 if (!determine_max_movement (stmt, false))
1056 lim_data->max_loop = NULL;
1057 continue;
1060 if (dump_file && (dump_flags & TDF_DETAILS))
1062 print_gimple_stmt (dump_file, stmt, 2, 0);
1063 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1064 loop_depth (lim_data->max_loop),
1065 lim_data->cost);
1068 if (lim_data->cost >= LIM_EXPENSIVE)
1069 set_profitable_level (stmt);
1072 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1074 stmt = gsi_stmt (bsi);
1076 pos = movement_possibility (stmt);
1077 if (pos == MOVE_IMPOSSIBLE)
1079 if (nonpure_call_p (stmt))
1081 maybe_never = true;
1082 outermost = NULL;
1084 /* Make sure to note always_executed_in for stores to make
1085 store-motion work. */
1086 else if (stmt_makes_single_store (stmt))
1088 struct lim_aux_data *lim_data = init_lim_data (stmt);
1089 lim_data->always_executed_in = outermost;
1091 continue;
1094 if (is_gimple_assign (stmt)
1095 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1096 == GIMPLE_BINARY_RHS))
1098 tree op0 = gimple_assign_rhs1 (stmt);
1099 tree op1 = gimple_assign_rhs2 (stmt);
1100 struct loop *ol1 = outermost_invariant_loop (op1,
1101 loop_containing_stmt (stmt));
1103 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1104 to be hoisted out of loop, saving expensive divide. */
1105 if (pos == MOVE_POSSIBLE
1106 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1107 && flag_unsafe_math_optimizations
1108 && !flag_trapping_math
1109 && ol1 != NULL
1110 && outermost_invariant_loop (op0, ol1) == NULL)
1111 stmt = rewrite_reciprocal (&bsi);
1113 /* If the shift count is invariant, convert (A >> B) & 1 to
1114 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1115 saving an expensive shift. */
1116 if (pos == MOVE_POSSIBLE
1117 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1118 && integer_onep (op1)
1119 && TREE_CODE (op0) == SSA_NAME
1120 && has_single_use (op0))
1121 stmt = rewrite_bittest (&bsi);
1124 lim_data = init_lim_data (stmt);
1125 lim_data->always_executed_in = outermost;
1127 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1128 continue;
1130 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1132 lim_data->max_loop = NULL;
1133 continue;
1136 if (dump_file && (dump_flags & TDF_DETAILS))
1138 print_gimple_stmt (dump_file, stmt, 2, 0);
1139 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1140 loop_depth (lim_data->max_loop),
1141 lim_data->cost);
1144 if (lim_data->cost >= LIM_EXPENSIVE)
1145 set_profitable_level (stmt);
1149 class move_computations_dom_walker : public dom_walker
1151 public:
1152 move_computations_dom_walker (cdi_direction direction)
1153 : dom_walker (direction), todo_ (0) {}
1155 virtual void before_dom_children (basic_block);
1157 unsigned int todo_;
1160 /* Hoist the statements in basic block BB out of the loops prescribed by
1161 data stored in LIM_DATA structures associated with each statement. Callback
1162 for walk_dominator_tree. */
1164 void
1165 move_computations_dom_walker::before_dom_children (basic_block bb)
1167 struct loop *level;
1168 gimple_stmt_iterator bsi;
1169 gimple stmt;
1170 unsigned cost = 0;
1171 struct lim_aux_data *lim_data;
1173 if (!loop_outer (bb->loop_father))
1174 return;
1176 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1178 gimple_assign new_stmt;
1179 stmt = gsi_stmt (bsi);
1181 lim_data = get_lim_data (stmt);
1182 if (lim_data == NULL)
1184 gsi_next (&bsi);
1185 continue;
1188 cost = lim_data->cost;
1189 level = lim_data->tgt_loop;
1190 clear_lim_data (stmt);
1192 if (!level)
1194 gsi_next (&bsi);
1195 continue;
1198 if (dump_file && (dump_flags & TDF_DETAILS))
1200 fprintf (dump_file, "Moving PHI node\n");
1201 print_gimple_stmt (dump_file, stmt, 0, 0);
1202 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1203 cost, level->num);
1206 if (gimple_phi_num_args (stmt) == 1)
1208 tree arg = PHI_ARG_DEF (stmt, 0);
1209 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1210 gimple_phi_result (stmt),
1211 arg, NULL_TREE);
1213 else
1215 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1216 gimple cond = gsi_stmt (gsi_last_bb (dom));
1217 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1218 /* Get the PHI arguments corresponding to the true and false
1219 edges of COND. */
1220 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1221 gcc_assert (arg0 && arg1);
1222 t = build2 (gimple_cond_code (cond), boolean_type_node,
1223 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1224 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1225 gimple_phi_result (stmt),
1226 t, arg0, arg1);
1227 todo_ |= TODO_cleanup_cfg;
1229 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1230 remove_phi_node (&bsi, false);
1233 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1235 edge e;
1237 stmt = gsi_stmt (bsi);
1239 lim_data = get_lim_data (stmt);
1240 if (lim_data == NULL)
1242 gsi_next (&bsi);
1243 continue;
1246 cost = lim_data->cost;
1247 level = lim_data->tgt_loop;
1248 clear_lim_data (stmt);
1250 if (!level)
1252 gsi_next (&bsi);
1253 continue;
1256 /* We do not really want to move conditionals out of the loop; we just
1257 placed it here to force its operands to be moved if necessary. */
1258 if (gimple_code (stmt) == GIMPLE_COND)
1259 continue;
1261 if (dump_file && (dump_flags & TDF_DETAILS))
1263 fprintf (dump_file, "Moving statement\n");
1264 print_gimple_stmt (dump_file, stmt, 0, 0);
1265 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1266 cost, level->num);
1269 e = loop_preheader_edge (level);
1270 gcc_assert (!gimple_vdef (stmt));
1271 if (gimple_vuse (stmt))
1273 /* The new VUSE is the one from the virtual PHI in the loop
1274 header or the one already present. */
1275 gimple_phi_iterator gsi2;
1276 for (gsi2 = gsi_start_phis (e->dest);
1277 !gsi_end_p (gsi2); gsi_next (&gsi2))
1279 gimple_phi phi = gsi2.phi ();
1280 if (virtual_operand_p (gimple_phi_result (phi)))
1282 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1283 break;
1287 gsi_remove (&bsi, false);
1288 /* In case this is a stmt that is not unconditionally executed
1289 when the target loop header is executed and the stmt may
1290 invoke undefined integer or pointer overflow rewrite it to
1291 unsigned arithmetic. */
1292 if (is_gimple_assign (stmt)
1293 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1294 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1295 && arith_code_with_undefined_signed_overflow
1296 (gimple_assign_rhs_code (stmt))
1297 && (!ALWAYS_EXECUTED_IN (bb)
1298 || !(ALWAYS_EXECUTED_IN (bb) == level
1299 || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1300 gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1301 else
1302 gsi_insert_on_edge (e, stmt);
1306 /* Hoist the statements out of the loops prescribed by data stored in
1307 LIM_DATA structures associated with each statement.*/
1309 static unsigned int
1310 move_computations (void)
1312 move_computations_dom_walker walker (CDI_DOMINATORS);
1313 walker.walk (cfun->cfg->x_entry_block_ptr);
1315 gsi_commit_edge_inserts ();
1316 if (need_ssa_update_p (cfun))
1317 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1319 return walker.todo_;
1322 /* Checks whether the statement defining variable *INDEX can be hoisted
1323 out of the loop passed in DATA. Callback for for_each_index. */
1325 static bool
1326 may_move_till (tree ref, tree *index, void *data)
1328 struct loop *loop = (struct loop *) data, *max_loop;
1330 /* If REF is an array reference, check also that the step and the lower
1331 bound is invariant in LOOP. */
1332 if (TREE_CODE (ref) == ARRAY_REF)
1334 tree step = TREE_OPERAND (ref, 3);
1335 tree lbound = TREE_OPERAND (ref, 2);
1337 max_loop = outermost_invariant_loop (step, loop);
1338 if (!max_loop)
1339 return false;
1341 max_loop = outermost_invariant_loop (lbound, loop);
1342 if (!max_loop)
1343 return false;
1346 max_loop = outermost_invariant_loop (*index, loop);
1347 if (!max_loop)
1348 return false;
1350 return true;
1353 /* If OP is SSA NAME, force the statement that defines it to be
1354 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1356 static void
1357 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1359 gimple stmt;
1361 if (!op
1362 || is_gimple_min_invariant (op))
1363 return;
1365 gcc_assert (TREE_CODE (op) == SSA_NAME);
1367 stmt = SSA_NAME_DEF_STMT (op);
1368 if (gimple_nop_p (stmt))
1369 return;
1371 set_level (stmt, orig_loop, loop);
1374 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1375 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1376 for_each_index. */
1378 struct fmt_data
1380 struct loop *loop;
1381 struct loop *orig_loop;
1384 static bool
1385 force_move_till (tree ref, tree *index, void *data)
1387 struct fmt_data *fmt_data = (struct fmt_data *) data;
1389 if (TREE_CODE (ref) == ARRAY_REF)
1391 tree step = TREE_OPERAND (ref, 3);
1392 tree lbound = TREE_OPERAND (ref, 2);
1394 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1395 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1398 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1400 return true;
1403 /* A function to free the mem_ref object OBJ. */
1405 static void
1406 memref_free (struct im_mem_ref *mem)
1408 mem->accesses_in_loop.release ();
1411 /* Allocates and returns a memory reference description for MEM whose hash
1412 value is HASH and id is ID. */
1414 static mem_ref_p
1415 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1417 mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1418 ao_ref_init (&ref->mem, mem);
1419 ref->id = id;
1420 ref->hash = hash;
1421 ref->stored = NULL;
1422 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1423 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1424 ref->accesses_in_loop.create (1);
1426 return ref;
1429 /* Records memory reference location *LOC in LOOP to the memory reference
1430 description REF. The reference occurs in statement STMT. */
1432 static void
1433 record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc)
1435 mem_ref_loc aref;
1436 aref.stmt = stmt;
1437 aref.ref = loc;
1438 ref->accesses_in_loop.safe_push (aref);
1441 /* Set the LOOP bit in REF stored bitmap and allocate that if
1442 necessary. Return whether a bit was changed. */
1444 static bool
1445 set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop)
1447 if (!ref->stored)
1448 ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1449 return bitmap_set_bit (ref->stored, loop->num);
1452 /* Marks reference REF as stored in LOOP. */
1454 static void
1455 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1457 while (loop != current_loops->tree_root
1458 && set_ref_stored_in_loop (ref, loop))
1459 loop = loop_outer (loop);
1462 /* Gathers memory references in statement STMT in LOOP, storing the
1463 information about them in the memory_accesses structure. Marks
1464 the vops accessed through unrecognized statements there as
1465 well. */
1467 static void
1468 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1470 tree *mem = NULL;
1471 hashval_t hash;
1472 im_mem_ref **slot;
1473 mem_ref_p ref;
1474 bool is_stored;
1475 unsigned id;
1477 if (!gimple_vuse (stmt))
1478 return;
1480 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1481 if (!mem)
1483 /* We use the shared mem_ref for all unanalyzable refs. */
1484 id = UNANALYZABLE_MEM_ID;
1485 ref = memory_accesses.refs_list[id];
1486 if (dump_file && (dump_flags & TDF_DETAILS))
1488 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1489 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1491 is_stored = gimple_vdef (stmt);
1493 else
1495 hash = iterative_hash_expr (*mem, 0);
1496 slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
1497 if (*slot)
1499 ref = (mem_ref_p) *slot;
1500 id = ref->id;
1502 else
1504 id = memory_accesses.refs_list.length ();
1505 ref = mem_ref_alloc (*mem, hash, id);
1506 memory_accesses.refs_list.safe_push (ref);
1507 *slot = ref;
1509 if (dump_file && (dump_flags & TDF_DETAILS))
1511 fprintf (dump_file, "Memory reference %u: ", id);
1512 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1513 fprintf (dump_file, "\n");
1517 record_mem_ref_loc (ref, stmt, mem);
1519 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1520 if (is_stored)
1522 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1523 mark_ref_stored (ref, loop);
1525 return;
1528 static unsigned *bb_loop_postorder;
1530 /* qsort sort function to sort blocks after their loop fathers postorder. */
1532 static int
1533 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1535 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1536 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1537 struct loop *loop1 = bb1->loop_father;
1538 struct loop *loop2 = bb2->loop_father;
1539 if (loop1->num == loop2->num)
1540 return 0;
1541 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1544 /* qsort sort function to sort ref locs after their loop fathers postorder. */
1546 static int
1547 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1549 mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1550 mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1551 struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1552 struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1553 if (loop1->num == loop2->num)
1554 return 0;
1555 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1558 /* Gathers memory references in loops. */
1560 static void
1561 analyze_memory_references (void)
1563 gimple_stmt_iterator bsi;
1564 basic_block bb, *bbs;
1565 struct loop *loop, *outer;
1566 unsigned i, n;
1568 /* Collect all basic-blocks in loops and sort them after their
1569 loops postorder. */
1570 i = 0;
1571 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1572 FOR_EACH_BB_FN (bb, cfun)
1573 if (bb->loop_father != current_loops->tree_root)
1574 bbs[i++] = bb;
1575 n = i;
1576 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1578 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1579 That results in better locality for all the bitmaps. */
1580 for (i = 0; i < n; ++i)
1582 basic_block bb = bbs[i];
1583 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1584 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1587 /* Sort the location list of gathered memory references after their
1588 loop postorder number. */
1589 im_mem_ref *ref;
1590 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1591 ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1593 free (bbs);
1594 // free (bb_loop_postorder);
1596 /* Propagate the information about accessed memory references up
1597 the loop hierarchy. */
1598 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1600 /* Finalize the overall touched references (including subloops). */
1601 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1602 &memory_accesses.refs_stored_in_loop[loop->num]);
1604 /* Propagate the information about accessed memory references up
1605 the loop hierarchy. */
1606 outer = loop_outer (loop);
1607 if (outer == current_loops->tree_root)
1608 continue;
1610 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1611 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1615 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1616 tree_to_aff_combination_expand. */
1618 static bool
1619 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1620 hash_map<tree, name_expansion *> **ttae_cache)
1622 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1623 object and their offset differ in such a way that the locations cannot
1624 overlap, then they cannot alias. */
1625 widest_int size1, size2;
1626 aff_tree off1, off2;
1628 /* Perform basic offset and type-based disambiguation. */
1629 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1630 return false;
1632 /* The expansion of addresses may be a bit expensive, thus we only do
1633 the check at -O2 and higher optimization levels. */
1634 if (optimize < 2)
1635 return true;
1637 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1638 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1639 aff_combination_expand (&off1, ttae_cache);
1640 aff_combination_expand (&off2, ttae_cache);
1641 aff_combination_scale (&off1, -1);
1642 aff_combination_add (&off2, &off1);
1644 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1645 return false;
1647 return true;
1650 /* Compare function for bsearch searching for reference locations
1651 in a loop. */
1653 static int
1654 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1656 struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1657 mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1658 struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1659 if (loop->num == loc_loop->num
1660 || flow_loop_nested_p (loop, loc_loop))
1661 return 0;
1662 return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1663 ? -1 : 1);
1666 /* Iterates over all locations of REF in LOOP and its subloops calling
1667 fn.operator() with the location as argument. When that operator
1668 returns true the iteration is stopped and true is returned.
1669 Otherwise false is returned. */
1671 template <typename FN>
1672 static bool
1673 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1675 unsigned i;
1676 mem_ref_loc_p loc;
1678 /* Search for the cluster of locs in the accesses_in_loop vector
1679 which is sorted after postorder index of the loop father. */
1680 loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1681 if (!loc)
1682 return false;
1684 /* We have found one location inside loop or its sub-loops. Iterate
1685 both forward and backward to cover the whole cluster. */
1686 i = loc - ref->accesses_in_loop.address ();
1687 while (i > 0)
1689 --i;
1690 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1691 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1692 break;
1693 if (fn (l))
1694 return true;
1696 for (i = loc - ref->accesses_in_loop.address ();
1697 i < ref->accesses_in_loop.length (); ++i)
1699 mem_ref_loc_p l = &ref->accesses_in_loop[i];
1700 if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1701 break;
1702 if (fn (l))
1703 return true;
1706 return false;
1709 /* Rewrites location LOC by TMP_VAR. */
1711 struct rewrite_mem_ref_loc
1713 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1714 bool operator () (mem_ref_loc_p loc);
1715 tree tmp_var;
1718 bool
1719 rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc)
1721 *loc->ref = tmp_var;
1722 update_stmt (loc->stmt);
1723 return false;
1726 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1728 static void
1729 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1731 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1734 /* Stores the first reference location in LOCP. */
1736 struct first_mem_ref_loc_1
1738 first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {}
1739 bool operator () (mem_ref_loc_p loc);
1740 mem_ref_loc_p *locp;
1743 bool
1744 first_mem_ref_loc_1::operator () (mem_ref_loc_p loc)
1746 *locp = loc;
1747 return true;
1750 /* Returns the first reference location to REF in LOOP. */
1752 static mem_ref_loc_p
1753 first_mem_ref_loc (struct loop *loop, mem_ref_p ref)
1755 mem_ref_loc_p locp = NULL;
1756 for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1757 return locp;
1760 struct prev_flag_edges {
1761 /* Edge to insert new flag comparison code. */
1762 edge append_cond_position;
1764 /* Edge for fall through from previous flag comparison. */
1765 edge last_cond_fallthru;
1768 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1769 MEM along edge EX.
1771 The store is only done if MEM has changed. We do this so no
1772 changes to MEM occur on code paths that did not originally store
1773 into it.
1775 The common case for execute_sm will transform:
1777 for (...) {
1778 if (foo)
1779 stuff;
1780 else
1781 MEM = TMP_VAR;
1784 into:
1786 lsm = MEM;
1787 for (...) {
1788 if (foo)
1789 stuff;
1790 else
1791 lsm = TMP_VAR;
1793 MEM = lsm;
1795 This function will generate:
1797 lsm = MEM;
1799 lsm_flag = false;
1801 for (...) {
1802 if (foo)
1803 stuff;
1804 else {
1805 lsm = TMP_VAR;
1806 lsm_flag = true;
1809 if (lsm_flag) <--
1810 MEM = lsm; <--
1813 static void
1814 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1816 basic_block new_bb, then_bb, old_dest;
1817 bool loop_has_only_one_exit;
1818 edge then_old_edge, orig_ex = ex;
1819 gimple_stmt_iterator gsi;
1820 gimple stmt;
1821 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1822 bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1824 /* ?? Insert store after previous store if applicable. See note
1825 below. */
1826 if (prev_edges)
1827 ex = prev_edges->append_cond_position;
1829 loop_has_only_one_exit = single_pred_p (ex->dest);
1831 if (loop_has_only_one_exit)
1832 ex = split_block_after_labels (ex->dest);
1834 old_dest = ex->dest;
1835 new_bb = split_edge (ex);
1836 then_bb = create_empty_bb (new_bb);
1837 if (irr)
1838 then_bb->flags = BB_IRREDUCIBLE_LOOP;
1839 add_bb_to_loop (then_bb, new_bb->loop_father);
1841 gsi = gsi_start_bb (new_bb);
1842 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1843 NULL_TREE, NULL_TREE);
1844 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1846 gsi = gsi_start_bb (then_bb);
1847 /* Insert actual store. */
1848 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1849 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1851 make_edge (new_bb, then_bb,
1852 EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1853 make_edge (new_bb, old_dest,
1854 EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1855 then_old_edge = make_edge (then_bb, old_dest,
1856 EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1858 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1860 if (prev_edges)
1862 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1863 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1864 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1865 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1866 recompute_dominator (CDI_DOMINATORS, old_dest));
1869 /* ?? Because stores may alias, they must happen in the exact
1870 sequence they originally happened. Save the position right after
1871 the (_lsm) store we just created so we can continue appending after
1872 it and maintain the original order. */
1874 struct prev_flag_edges *p;
1876 if (orig_ex->aux)
1877 orig_ex->aux = NULL;
1878 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1879 p = (struct prev_flag_edges *) orig_ex->aux;
1880 p->append_cond_position = then_old_edge;
1881 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1882 orig_ex->aux = (void *) p;
1885 if (!loop_has_only_one_exit)
1886 for (gimple_phi_iterator gpi = gsi_start_phis (old_dest);
1887 !gsi_end_p (gpi); gsi_next (&gpi))
1889 gimple_phi phi = gpi.phi ();
1890 unsigned i;
1892 for (i = 0; i < gimple_phi_num_args (phi); i++)
1893 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1895 tree arg = gimple_phi_arg_def (phi, i);
1896 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1897 update_stmt (phi);
1900 /* Remove the original fall through edge. This was the
1901 single_succ_edge (new_bb). */
1902 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1905 /* When REF is set on the location, set flag indicating the store. */
1907 struct sm_set_flag_if_changed
1909 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1910 bool operator () (mem_ref_loc_p loc);
1911 tree flag;
1914 bool
1915 sm_set_flag_if_changed::operator () (mem_ref_loc_p loc)
1917 /* Only set the flag for writes. */
1918 if (is_gimple_assign (loc->stmt)
1919 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1921 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1922 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1923 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1925 return false;
1928 /* Helper function for execute_sm. On every location where REF is
1929 set, set an appropriate flag indicating the store. */
1931 static tree
1932 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
1934 tree flag;
1935 char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
1936 flag = create_tmp_reg (boolean_type_node, str);
1937 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
1938 return flag;
1941 /* Executes store motion of memory reference REF from LOOP.
1942 Exits from the LOOP are stored in EXITS. The initialization of the
1943 temporary variable is put to the preheader of the loop, and assignments
1944 to the reference from the temporary variable are emitted to exits. */
1946 static void
1947 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
1949 tree tmp_var, store_flag = NULL_TREE;
1950 unsigned i;
1951 gimple_assign load;
1952 struct fmt_data fmt_data;
1953 edge ex;
1954 struct lim_aux_data *lim_data;
1955 bool multi_threaded_model_p = false;
1956 gimple_stmt_iterator gsi;
1958 if (dump_file && (dump_flags & TDF_DETAILS))
1960 fprintf (dump_file, "Executing store motion of ");
1961 print_generic_expr (dump_file, ref->mem.ref, 0);
1962 fprintf (dump_file, " from loop %d\n", loop->num);
1965 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
1966 get_lsm_tmp_name (ref->mem.ref, ~0));
1968 fmt_data.loop = loop;
1969 fmt_data.orig_loop = loop;
1970 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
1972 if (bb_in_transaction (loop_preheader_edge (loop)->src)
1973 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
1974 multi_threaded_model_p = true;
1976 if (multi_threaded_model_p)
1977 store_flag = execute_sm_if_changed_flag_set (loop, ref);
1979 rewrite_mem_refs (loop, ref, tmp_var);
1981 /* Emit the load code on a random exit edge or into the latch if
1982 the loop does not exit, so that we are sure it will be processed
1983 by move_computations after all dependencies. */
1984 gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
1986 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
1987 load altogether, since the store is predicated by a flag. We
1988 could, do the load only if it was originally in the loop. */
1989 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
1990 lim_data = init_lim_data (load);
1991 lim_data->max_loop = loop;
1992 lim_data->tgt_loop = loop;
1993 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
1995 if (multi_threaded_model_p)
1997 load = gimple_build_assign (store_flag, boolean_false_node);
1998 lim_data = init_lim_data (load);
1999 lim_data->max_loop = loop;
2000 lim_data->tgt_loop = loop;
2001 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2004 /* Sink the store to every exit from the loop. */
2005 FOR_EACH_VEC_ELT (exits, i, ex)
2006 if (!multi_threaded_model_p)
2008 gimple_assign store;
2009 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2010 gsi_insert_on_edge (ex, store);
2012 else
2013 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
2016 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2017 edges of the LOOP. */
2019 static void
2020 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2021 vec<edge> exits)
2023 mem_ref_p ref;
2024 unsigned i;
2025 bitmap_iterator bi;
2027 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2029 ref = memory_accesses.refs_list[i];
2030 execute_sm (loop, exits, ref);
2034 struct ref_always_accessed
2036 ref_always_accessed (struct loop *loop_, bool stored_p_)
2037 : loop (loop_), stored_p (stored_p_) {}
2038 bool operator () (mem_ref_loc_p loc);
2039 struct loop *loop;
2040 bool stored_p;
2043 bool
2044 ref_always_accessed::operator () (mem_ref_loc_p loc)
2046 struct loop *must_exec;
2048 if (!get_lim_data (loc->stmt))
2049 return false;
2051 /* If we require an always executed store make sure the statement
2052 stores to the reference. */
2053 if (stored_p)
2055 tree lhs = gimple_get_lhs (loc->stmt);
2056 if (!lhs
2057 || lhs != *loc->ref)
2058 return false;
2061 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2062 if (!must_exec)
2063 return false;
2065 if (must_exec == loop
2066 || flow_loop_nested_p (must_exec, loop))
2067 return true;
2069 return false;
2072 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2073 make sure REF is always stored to in LOOP. */
2075 static bool
2076 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2078 return for_all_locs_in_loop (loop, ref,
2079 ref_always_accessed (loop, stored_p));
2082 /* Returns true if REF1 and REF2 are independent. */
2084 static bool
2085 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2087 if (ref1 == ref2)
2088 return true;
2090 if (dump_file && (dump_flags & TDF_DETAILS))
2091 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2092 ref1->id, ref2->id);
2094 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2096 if (dump_file && (dump_flags & TDF_DETAILS))
2097 fprintf (dump_file, "dependent.\n");
2098 return false;
2100 else
2102 if (dump_file && (dump_flags & TDF_DETAILS))
2103 fprintf (dump_file, "independent.\n");
2104 return true;
2108 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2109 and its super-loops. */
2111 static void
2112 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2114 /* We can propagate dependent-in-loop bits up the loop
2115 hierarchy to all outer loops. */
2116 while (loop != current_loops->tree_root
2117 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2118 loop = loop_outer (loop);
2121 /* Returns true if REF is independent on all other memory references in
2122 LOOP. */
2124 static bool
2125 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2127 bitmap refs_to_check;
2128 unsigned i;
2129 bitmap_iterator bi;
2130 mem_ref_p aref;
2132 if (stored_p)
2133 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2134 else
2135 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2137 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2138 return false;
2140 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2142 aref = memory_accesses.refs_list[i];
2143 if (!refs_independent_p (ref, aref))
2144 return false;
2147 return true;
2150 /* Returns true if REF is independent on all other memory references in
2151 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2153 static bool
2154 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2156 stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2158 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2159 return true;
2160 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2161 return false;
2163 struct loop *inner = loop->inner;
2164 while (inner)
2166 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2167 return false;
2168 inner = inner->next;
2171 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2173 if (dump_file && (dump_flags & TDF_DETAILS))
2174 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2175 ref->id, loop->num, indep_p ? "independent" : "dependent");
2177 /* Record the computed result in the cache. */
2178 if (indep_p)
2180 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2181 && stored_p)
2183 /* If it's independend against all refs then it's independent
2184 against stores, too. */
2185 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2188 else
2190 record_dep_loop (loop, ref, stored_p);
2191 if (!stored_p)
2193 /* If it's dependent against stores it's dependent against
2194 all refs, too. */
2195 record_dep_loop (loop, ref, true);
2199 return indep_p;
2202 /* Returns true if REF is independent on all other memory references in
2203 LOOP. */
2205 static bool
2206 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2208 gcc_checking_assert (MEM_ANALYZABLE (ref));
2210 return ref_indep_loop_p_2 (loop, ref, false);
2213 /* Returns true if we can perform store motion of REF from LOOP. */
2215 static bool
2216 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2218 tree base;
2220 /* Can't hoist unanalyzable refs. */
2221 if (!MEM_ANALYZABLE (ref))
2222 return false;
2224 /* It should be movable. */
2225 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2226 || TREE_THIS_VOLATILE (ref->mem.ref)
2227 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2228 return false;
2230 /* If it can throw fail, we do not properly update EH info. */
2231 if (tree_could_throw_p (ref->mem.ref))
2232 return false;
2234 /* If it can trap, it must be always executed in LOOP.
2235 Readonly memory locations may trap when storing to them, but
2236 tree_could_trap_p is a predicate for rvalues, so check that
2237 explicitly. */
2238 base = get_base_address (ref->mem.ref);
2239 if ((tree_could_trap_p (ref->mem.ref)
2240 || (DECL_P (base) && TREE_READONLY (base)))
2241 && !ref_always_accessed_p (loop, ref, true))
2242 return false;
2244 /* And it must be independent on all other memory references
2245 in LOOP. */
2246 if (!ref_indep_loop_p (loop, ref))
2247 return false;
2249 return true;
2252 /* Marks the references in LOOP for that store motion should be performed
2253 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2254 motion was performed in one of the outer loops. */
2256 static void
2257 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2259 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2260 unsigned i;
2261 bitmap_iterator bi;
2262 mem_ref_p ref;
2264 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2266 ref = memory_accesses.refs_list[i];
2267 if (can_sm_ref_p (loop, ref))
2268 bitmap_set_bit (refs_to_sm, i);
2272 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2273 for a store motion optimization (i.e. whether we can insert statement
2274 on its exits). */
2276 static bool
2277 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2278 vec<edge> exits)
2280 unsigned i;
2281 edge ex;
2283 FOR_EACH_VEC_ELT (exits, i, ex)
2284 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2285 return false;
2287 return true;
2290 /* Try to perform store motion for all memory references modified inside
2291 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2292 store motion was executed in one of the outer loops. */
2294 static void
2295 store_motion_loop (struct loop *loop, bitmap sm_executed)
2297 vec<edge> exits = get_loop_exit_edges (loop);
2298 struct loop *subloop;
2299 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2301 if (loop_suitable_for_sm (loop, exits))
2303 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2304 hoist_memory_references (loop, sm_in_loop, exits);
2306 exits.release ();
2308 bitmap_ior_into (sm_executed, sm_in_loop);
2309 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2310 store_motion_loop (subloop, sm_executed);
2311 bitmap_and_compl_into (sm_executed, sm_in_loop);
2312 BITMAP_FREE (sm_in_loop);
2315 /* Try to perform store motion for all memory references modified inside
2316 loops. */
2318 static void
2319 store_motion (void)
2321 struct loop *loop;
2322 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2324 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2325 store_motion_loop (loop, sm_executed);
2327 BITMAP_FREE (sm_executed);
2328 gsi_commit_edge_inserts ();
2331 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2332 for each such basic block bb records the outermost loop for that execution
2333 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2334 blocks that contain a nonpure call. */
2336 static void
2337 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2339 basic_block bb = NULL, *bbs, last = NULL;
2340 unsigned i;
2341 edge e;
2342 struct loop *inn_loop = loop;
2344 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2346 bbs = get_loop_body_in_dom_order (loop);
2348 for (i = 0; i < loop->num_nodes; i++)
2350 edge_iterator ei;
2351 bb = bbs[i];
2353 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2354 last = bb;
2356 if (bitmap_bit_p (contains_call, bb->index))
2357 break;
2359 FOR_EACH_EDGE (e, ei, bb->succs)
2360 if (!flow_bb_inside_loop_p (loop, e->dest))
2361 break;
2362 if (e)
2363 break;
2365 /* A loop might be infinite (TODO use simple loop analysis
2366 to disprove this if possible). */
2367 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2368 break;
2370 if (!flow_bb_inside_loop_p (inn_loop, bb))
2371 break;
2373 if (bb->loop_father->header == bb)
2375 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2376 break;
2378 /* In a loop that is always entered we may proceed anyway.
2379 But record that we entered it and stop once we leave it. */
2380 inn_loop = bb->loop_father;
2384 while (1)
2386 SET_ALWAYS_EXECUTED_IN (last, loop);
2387 if (last == loop->header)
2388 break;
2389 last = get_immediate_dominator (CDI_DOMINATORS, last);
2392 free (bbs);
2395 for (loop = loop->inner; loop; loop = loop->next)
2396 fill_always_executed_in_1 (loop, contains_call);
2399 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2400 for each such basic block bb records the outermost loop for that execution
2401 of its header implies execution of bb. */
2403 static void
2404 fill_always_executed_in (void)
2406 sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
2407 basic_block bb;
2408 struct loop *loop;
2410 bitmap_clear (contains_call);
2411 FOR_EACH_BB_FN (bb, cfun)
2413 gimple_stmt_iterator gsi;
2414 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2416 if (nonpure_call_p (gsi_stmt (gsi)))
2417 break;
2420 if (!gsi_end_p (gsi))
2421 bitmap_set_bit (contains_call, bb->index);
2424 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2425 fill_always_executed_in_1 (loop, contains_call);
2427 sbitmap_free (contains_call);
2431 /* Compute the global information needed by the loop invariant motion pass. */
2433 static void
2434 tree_ssa_lim_initialize (void)
2436 struct loop *loop;
2437 unsigned i;
2439 bitmap_obstack_initialize (&lim_bitmap_obstack);
2440 gcc_obstack_init (&mem_ref_obstack);
2441 lim_aux_data_map = new hash_map<gimple, lim_aux_data *>;
2443 if (flag_tm)
2444 compute_transaction_bits ();
2446 alloc_aux_for_edges (0);
2448 memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2449 memory_accesses.refs_list.create (100);
2450 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2451 memory_accesses.refs_list.quick_push
2452 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2454 memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2455 memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2456 memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2457 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2458 memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2459 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2461 for (i = 0; i < number_of_loops (cfun); i++)
2463 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2464 &lim_bitmap_obstack);
2465 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2466 &lim_bitmap_obstack);
2467 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2468 &lim_bitmap_obstack);
2471 memory_accesses.ttae_cache = NULL;
2473 /* Initialize bb_loop_postorder with a mapping from loop->num to
2474 its postorder index. */
2475 i = 0;
2476 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2477 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2478 bb_loop_postorder[loop->num] = i++;
2481 /* Cleans up after the invariant motion pass. */
2483 static void
2484 tree_ssa_lim_finalize (void)
2486 basic_block bb;
2487 unsigned i;
2488 mem_ref_p ref;
2490 free_aux_for_edges ();
2492 FOR_EACH_BB_FN (bb, cfun)
2493 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2495 bitmap_obstack_release (&lim_bitmap_obstack);
2496 delete lim_aux_data_map;
2498 delete memory_accesses.refs;
2499 memory_accesses.refs = NULL;
2501 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2502 memref_free (ref);
2503 memory_accesses.refs_list.release ();
2504 obstack_free (&mem_ref_obstack, NULL);
2506 memory_accesses.refs_in_loop.release ();
2507 memory_accesses.refs_stored_in_loop.release ();
2508 memory_accesses.all_refs_stored_in_loop.release ();
2510 if (memory_accesses.ttae_cache)
2511 free_affine_expand_cache (&memory_accesses.ttae_cache);
2513 free (bb_loop_postorder);
2516 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2517 i.e. those that are likely to be win regardless of the register pressure. */
2519 unsigned int
2520 tree_ssa_lim (void)
2522 unsigned int todo;
2524 tree_ssa_lim_initialize ();
2526 /* Gathers information about memory accesses in the loops. */
2527 analyze_memory_references ();
2529 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2530 fill_always_executed_in ();
2532 /* For each statement determine the outermost loop in that it is
2533 invariant and cost for computing the invariant. */
2534 invariantness_dom_walker (CDI_DOMINATORS)
2535 .walk (cfun->cfg->x_entry_block_ptr);
2537 /* Execute store motion. Force the necessary invariants to be moved
2538 out of the loops as well. */
2539 store_motion ();
2541 /* Move the expressions that are expensive enough. */
2542 todo = move_computations ();
2544 tree_ssa_lim_finalize ();
2546 return todo;
2549 /* Loop invariant motion pass. */
2551 namespace {
2553 const pass_data pass_data_lim =
2555 GIMPLE_PASS, /* type */
2556 "lim", /* name */
2557 OPTGROUP_LOOP, /* optinfo_flags */
2558 TV_LIM, /* tv_id */
2559 PROP_cfg, /* properties_required */
2560 0, /* properties_provided */
2561 0, /* properties_destroyed */
2562 0, /* todo_flags_start */
2563 0, /* todo_flags_finish */
2566 class pass_lim : public gimple_opt_pass
2568 public:
2569 pass_lim (gcc::context *ctxt)
2570 : gimple_opt_pass (pass_data_lim, ctxt)
2573 /* opt_pass methods: */
2574 opt_pass * clone () { return new pass_lim (m_ctxt); }
2575 virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2576 virtual unsigned int execute (function *);
2578 }; // class pass_lim
2580 unsigned int
2581 pass_lim::execute (function *fun)
2583 if (number_of_loops (fun) <= 1)
2584 return 0;
2586 return tree_ssa_lim ();
2589 } // anon namespace
2591 gimple_opt_pass *
2592 make_pass_lim (gcc::context *ctxt)
2594 return new pass_lim (ctxt);