Merged revisions 196607-196608,196611-196614,196625,196629-196634,196636,196639,19664...
[official-gcc.git] / gcc / tree-ssa-loop-im.c
blob7ee00d6f58533e0c066c229e07ed7094694d2d5f
1 /* Loop invariant motion.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "tree-flow.h"
29 #include "cfgloop.h"
30 #include "domwalk.h"
31 #include "params.h"
32 #include "tree-pass.h"
33 #include "flags.h"
34 #include "hash-table.h"
35 #include "tree-affine.h"
36 #include "pointer-set.h"
37 #include "tree-ssa-propagate.h"
39 /* TODO: Support for predicated code motion. I.e.
41 while (1)
43 if (cond)
45 a = inv;
46 something;
50 Where COND and INV are invariants, but evaluating INV may trap or be
51 invalid from some other reason if !COND. This may be transformed to
53 if (cond)
54 a = inv;
55 while (1)
57 if (cond)
58 something;
59 } */
61 /* The auxiliary data kept for each statement. */
63 struct lim_aux_data
65 struct loop *max_loop; /* The outermost loop in that the statement
66 is invariant. */
68 struct loop *tgt_loop; /* The loop out of that we want to move the
69 invariant. */
71 struct loop *always_executed_in;
72 /* The outermost loop for that we are sure
73 the statement is executed if the loop
74 is entered. */
76 unsigned cost; /* Cost of the computation performed by the
77 statement. */
79 vec<gimple> depends; /* Vector of statements that must be also
80 hoisted out of the loop when this statement
81 is hoisted; i.e. those that define the
82 operands of the statement and are inside of
83 the MAX_LOOP loop. */
86 /* Maps statements to their lim_aux_data. */
88 static struct pointer_map_t *lim_aux_data_map;
90 /* Description of a memory reference location. */
92 typedef struct mem_ref_loc
94 tree *ref; /* The reference itself. */
95 gimple stmt; /* The statement in that it occurs. */
96 } *mem_ref_loc_p;
99 /* Description of a memory reference. */
101 typedef struct mem_ref
103 unsigned id; /* ID assigned to the memory reference
104 (its index in memory_accesses.refs_list) */
105 hashval_t hash; /* Its hash value. */
107 /* The memory access itself and associated caching of alias-oracle
108 query meta-data. */
109 ao_ref mem;
111 bitmap_head stored; /* The set of loops in that this memory location
112 is stored to. */
113 vec<vec<mem_ref_loc> > accesses_in_loop;
114 /* The locations of the accesses. Vector
115 indexed by the loop number. */
117 /* The following sets are computed on demand. We keep both set and
118 its complement, so that we know whether the information was
119 already computed or not. */
120 bitmap_head indep_loop; /* The set of loops in that the memory
121 reference is independent, meaning:
122 If it is stored in the loop, this store
123 is independent on all other loads and
124 stores.
125 If it is only loaded, then it is independent
126 on all stores in the loop. */
127 bitmap_head dep_loop; /* The complement of INDEP_LOOP. */
128 } *mem_ref_p;
130 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
131 to record (in)dependence against stores in the loop and its subloops, the
132 second to record (in)dependence against all references in the loop
133 and its subloops. */
134 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
136 /* Mem_ref hashtable helpers. */
138 struct mem_ref_hasher : typed_noop_remove <mem_ref>
140 typedef mem_ref value_type;
141 typedef tree_node compare_type;
142 static inline hashval_t hash (const value_type *);
143 static inline bool equal (const value_type *, const compare_type *);
146 /* A hash function for struct mem_ref object OBJ. */
148 inline hashval_t
149 mem_ref_hasher::hash (const value_type *mem)
151 return mem->hash;
154 /* An equality function for struct mem_ref object MEM1 with
155 memory reference OBJ2. */
157 inline bool
158 mem_ref_hasher::equal (const value_type *mem1, const compare_type *obj2)
160 return operand_equal_p (mem1->mem.ref, (const_tree) obj2, 0);
164 /* Description of memory accesses in loops. */
166 static struct
168 /* The hash table of memory references accessed in loops. */
169 hash_table <mem_ref_hasher> refs;
171 /* The list of memory references. */
172 vec<mem_ref_p> refs_list;
174 /* The set of memory references accessed in each loop. */
175 vec<bitmap_head> refs_in_loop;
177 /* The set of memory references stored in each loop. */
178 vec<bitmap_head> refs_stored_in_loop;
180 /* The set of memory references stored in each loop, including subloops . */
181 vec<bitmap_head> all_refs_stored_in_loop;
183 /* Cache for expanding memory addresses. */
184 struct pointer_map_t *ttae_cache;
185 } memory_accesses;
187 /* Obstack for the bitmaps in the above data structures. */
188 static bitmap_obstack lim_bitmap_obstack;
190 static bool ref_indep_loop_p (struct loop *, mem_ref_p);
192 /* Minimum cost of an expensive expression. */
193 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
195 /* The outermost loop for which execution of the header guarantees that the
196 block will be executed. */
197 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
198 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
200 /* ID of the shared unanalyzable mem. */
201 #define UNANALYZABLE_MEM_ID 0
203 /* Whether the reference was analyzable. */
204 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
206 static struct lim_aux_data *
207 init_lim_data (gimple stmt)
209 void **p = pointer_map_insert (lim_aux_data_map, stmt);
211 *p = XCNEW (struct lim_aux_data);
212 return (struct lim_aux_data *) *p;
215 static struct lim_aux_data *
216 get_lim_data (gimple stmt)
218 void **p = pointer_map_contains (lim_aux_data_map, stmt);
219 if (!p)
220 return NULL;
222 return (struct lim_aux_data *) *p;
225 /* Releases the memory occupied by DATA. */
227 static void
228 free_lim_aux_data (struct lim_aux_data *data)
230 data->depends.release();
231 free (data);
234 static void
235 clear_lim_data (gimple stmt)
237 void **p = pointer_map_contains (lim_aux_data_map, stmt);
238 if (!p)
239 return;
241 free_lim_aux_data ((struct lim_aux_data *) *p);
242 *p = NULL;
245 /* Calls CBCK for each index in memory reference ADDR_P. There are two
246 kinds situations handled; in each of these cases, the memory reference
247 and DATA are passed to the callback:
249 Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also
250 pass the pointer to the index to the callback.
252 Pointer dereference: INDIRECT_REF (addr). In this case we also pass the
253 pointer to addr to the callback.
255 If the callback returns false, the whole search stops and false is returned.
256 Otherwise the function returns true after traversing through the whole
257 reference *ADDR_P. */
259 bool
260 for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
262 tree *nxt, *idx;
264 for (; ; addr_p = nxt)
266 switch (TREE_CODE (*addr_p))
268 case SSA_NAME:
269 return cbck (*addr_p, addr_p, data);
271 case MEM_REF:
272 nxt = &TREE_OPERAND (*addr_p, 0);
273 return cbck (*addr_p, nxt, data);
275 case BIT_FIELD_REF:
276 case VIEW_CONVERT_EXPR:
277 case REALPART_EXPR:
278 case IMAGPART_EXPR:
279 nxt = &TREE_OPERAND (*addr_p, 0);
280 break;
282 case COMPONENT_REF:
283 /* If the component has varying offset, it behaves like index
284 as well. */
285 idx = &TREE_OPERAND (*addr_p, 2);
286 if (*idx
287 && !cbck (*addr_p, idx, data))
288 return false;
290 nxt = &TREE_OPERAND (*addr_p, 0);
291 break;
293 case ARRAY_REF:
294 case ARRAY_RANGE_REF:
295 nxt = &TREE_OPERAND (*addr_p, 0);
296 if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
297 return false;
298 break;
300 case VAR_DECL:
301 case PARM_DECL:
302 case CONST_DECL:
303 case STRING_CST:
304 case RESULT_DECL:
305 case VECTOR_CST:
306 case COMPLEX_CST:
307 case INTEGER_CST:
308 case REAL_CST:
309 case FIXED_CST:
310 case CONSTRUCTOR:
311 return true;
313 case ADDR_EXPR:
314 gcc_assert (is_gimple_min_invariant (*addr_p));
315 return true;
317 case TARGET_MEM_REF:
318 idx = &TMR_BASE (*addr_p);
319 if (*idx
320 && !cbck (*addr_p, idx, data))
321 return false;
322 idx = &TMR_INDEX (*addr_p);
323 if (*idx
324 && !cbck (*addr_p, idx, data))
325 return false;
326 idx = &TMR_INDEX2 (*addr_p);
327 if (*idx
328 && !cbck (*addr_p, idx, data))
329 return false;
330 return true;
332 default:
333 gcc_unreachable ();
338 /* If it is possible to hoist the statement STMT unconditionally,
339 returns MOVE_POSSIBLE.
340 If it is possible to hoist the statement STMT, but we must avoid making
341 it executed if it would not be executed in the original program (e.g.
342 because it may trap), return MOVE_PRESERVE_EXECUTION.
343 Otherwise return MOVE_IMPOSSIBLE. */
345 enum move_pos
346 movement_possibility (gimple stmt)
348 tree lhs;
349 enum move_pos ret = MOVE_POSSIBLE;
351 if (flag_unswitch_loops
352 && gimple_code (stmt) == GIMPLE_COND)
354 /* If we perform unswitching, force the operands of the invariant
355 condition to be moved out of the loop. */
356 return MOVE_POSSIBLE;
359 if (gimple_code (stmt) == GIMPLE_PHI
360 && gimple_phi_num_args (stmt) <= 2
361 && !virtual_operand_p (gimple_phi_result (stmt))
362 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
363 return MOVE_POSSIBLE;
365 if (gimple_get_lhs (stmt) == NULL_TREE)
366 return MOVE_IMPOSSIBLE;
368 if (gimple_vdef (stmt))
369 return MOVE_IMPOSSIBLE;
371 if (stmt_ends_bb_p (stmt)
372 || gimple_has_volatile_ops (stmt)
373 || gimple_has_side_effects (stmt)
374 || stmt_could_throw_p (stmt))
375 return MOVE_IMPOSSIBLE;
377 if (is_gimple_call (stmt))
379 /* While pure or const call is guaranteed to have no side effects, we
380 cannot move it arbitrarily. Consider code like
382 char *s = something ();
384 while (1)
386 if (s)
387 t = strlen (s);
388 else
389 t = 0;
392 Here the strlen call cannot be moved out of the loop, even though
393 s is invariant. In addition to possibly creating a call with
394 invalid arguments, moving out a function call that is not executed
395 may cause performance regressions in case the call is costly and
396 not executed at all. */
397 ret = MOVE_PRESERVE_EXECUTION;
398 lhs = gimple_call_lhs (stmt);
400 else if (is_gimple_assign (stmt))
401 lhs = gimple_assign_lhs (stmt);
402 else
403 return MOVE_IMPOSSIBLE;
405 if (TREE_CODE (lhs) == SSA_NAME
406 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
407 return MOVE_IMPOSSIBLE;
409 if (TREE_CODE (lhs) != SSA_NAME
410 || gimple_could_trap_p (stmt))
411 return MOVE_PRESERVE_EXECUTION;
413 /* Non local loads in a transaction cannot be hoisted out. Well,
414 unless the load happens on every path out of the loop, but we
415 don't take this into account yet. */
416 if (flag_tm
417 && gimple_in_transaction (stmt)
418 && gimple_assign_single_p (stmt))
420 tree rhs = gimple_assign_rhs1 (stmt);
421 if (DECL_P (rhs) && is_global_var (rhs))
423 if (dump_file)
425 fprintf (dump_file, "Cannot hoist conditional load of ");
426 print_generic_expr (dump_file, rhs, TDF_SLIM);
427 fprintf (dump_file, " because it is in a transaction.\n");
429 return MOVE_IMPOSSIBLE;
433 return ret;
436 /* Suppose that operand DEF is used inside the LOOP. Returns the outermost
437 loop to that we could move the expression using DEF if it did not have
438 other operands, i.e. the outermost loop enclosing LOOP in that the value
439 of DEF is invariant. */
441 static struct loop *
442 outermost_invariant_loop (tree def, struct loop *loop)
444 gimple def_stmt;
445 basic_block def_bb;
446 struct loop *max_loop;
447 struct lim_aux_data *lim_data;
449 if (!def)
450 return superloop_at_depth (loop, 1);
452 if (TREE_CODE (def) != SSA_NAME)
454 gcc_assert (is_gimple_min_invariant (def));
455 return superloop_at_depth (loop, 1);
458 def_stmt = SSA_NAME_DEF_STMT (def);
459 def_bb = gimple_bb (def_stmt);
460 if (!def_bb)
461 return superloop_at_depth (loop, 1);
463 max_loop = find_common_loop (loop, def_bb->loop_father);
465 lim_data = get_lim_data (def_stmt);
466 if (lim_data != NULL && lim_data->max_loop != NULL)
467 max_loop = find_common_loop (max_loop,
468 loop_outer (lim_data->max_loop));
469 if (max_loop == loop)
470 return NULL;
471 max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
473 return max_loop;
476 /* DATA is a structure containing information associated with a statement
477 inside LOOP. DEF is one of the operands of this statement.
479 Find the outermost loop enclosing LOOP in that value of DEF is invariant
480 and record this in DATA->max_loop field. If DEF itself is defined inside
481 this loop as well (i.e. we need to hoist it out of the loop if we want
482 to hoist the statement represented by DATA), record the statement in that
483 DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
484 add the cost of the computation of DEF to the DATA->cost.
486 If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
488 static bool
489 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
490 bool add_cost)
492 gimple def_stmt = SSA_NAME_DEF_STMT (def);
493 basic_block def_bb = gimple_bb (def_stmt);
494 struct loop *max_loop;
495 struct lim_aux_data *def_data;
497 if (!def_bb)
498 return true;
500 max_loop = outermost_invariant_loop (def, loop);
501 if (!max_loop)
502 return false;
504 if (flow_loop_nested_p (data->max_loop, max_loop))
505 data->max_loop = max_loop;
507 def_data = get_lim_data (def_stmt);
508 if (!def_data)
509 return true;
511 if (add_cost
512 /* Only add the cost if the statement defining DEF is inside LOOP,
513 i.e. if it is likely that by moving the invariants dependent
514 on it, we will be able to avoid creating a new register for
515 it (since it will be only used in these dependent invariants). */
516 && def_bb->loop_father == loop)
517 data->cost += def_data->cost;
519 data->depends.safe_push (def_stmt);
521 return true;
524 /* Returns an estimate for a cost of statement STMT. The values here
525 are just ad-hoc constants, similar to costs for inlining. */
527 static unsigned
528 stmt_cost (gimple stmt)
530 /* Always try to create possibilities for unswitching. */
531 if (gimple_code (stmt) == GIMPLE_COND
532 || gimple_code (stmt) == GIMPLE_PHI)
533 return LIM_EXPENSIVE;
535 /* We should be hoisting calls if possible. */
536 if (is_gimple_call (stmt))
538 tree fndecl;
540 /* Unless the call is a builtin_constant_p; this always folds to a
541 constant, so moving it is useless. */
542 fndecl = gimple_call_fndecl (stmt);
543 if (fndecl
544 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
545 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
546 return 0;
548 return LIM_EXPENSIVE;
551 /* Hoisting memory references out should almost surely be a win. */
552 if (gimple_references_memory_p (stmt))
553 return LIM_EXPENSIVE;
555 if (gimple_code (stmt) != GIMPLE_ASSIGN)
556 return 1;
558 switch (gimple_assign_rhs_code (stmt))
560 case MULT_EXPR:
561 case WIDEN_MULT_EXPR:
562 case WIDEN_MULT_PLUS_EXPR:
563 case WIDEN_MULT_MINUS_EXPR:
564 case DOT_PROD_EXPR:
565 case FMA_EXPR:
566 case TRUNC_DIV_EXPR:
567 case CEIL_DIV_EXPR:
568 case FLOOR_DIV_EXPR:
569 case ROUND_DIV_EXPR:
570 case EXACT_DIV_EXPR:
571 case CEIL_MOD_EXPR:
572 case FLOOR_MOD_EXPR:
573 case ROUND_MOD_EXPR:
574 case TRUNC_MOD_EXPR:
575 case RDIV_EXPR:
576 /* Division and multiplication are usually expensive. */
577 return LIM_EXPENSIVE;
579 case LSHIFT_EXPR:
580 case RSHIFT_EXPR:
581 case WIDEN_LSHIFT_EXPR:
582 case LROTATE_EXPR:
583 case RROTATE_EXPR:
584 /* Shifts and rotates are usually expensive. */
585 return LIM_EXPENSIVE;
587 case CONSTRUCTOR:
588 /* Make vector construction cost proportional to the number
589 of elements. */
590 return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
592 case SSA_NAME:
593 case PAREN_EXPR:
594 /* Whether or not something is wrapped inside a PAREN_EXPR
595 should not change move cost. Nor should an intermediate
596 unpropagated SSA name copy. */
597 return 0;
599 default:
600 return 1;
604 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
605 REF is independent. If REF is not independent in LOOP, NULL is returned
606 instead. */
608 static struct loop *
609 outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
611 struct loop *aloop;
613 if (bitmap_bit_p (&ref->stored, loop->num))
614 return NULL;
616 for (aloop = outer;
617 aloop != loop;
618 aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
619 if (!bitmap_bit_p (&ref->stored, aloop->num)
620 && ref_indep_loop_p (aloop, ref))
621 return aloop;
623 if (ref_indep_loop_p (loop, ref))
624 return loop;
625 else
626 return NULL;
629 /* If there is a simple load or store to a memory reference in STMT, returns
630 the location of the memory reference, and sets IS_STORE according to whether
631 it is a store or load. Otherwise, returns NULL. */
633 static tree *
634 simple_mem_ref_in_stmt (gimple stmt, bool *is_store)
636 tree *lhs, *rhs;
638 /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns. */
639 if (!gimple_assign_single_p (stmt))
640 return NULL;
642 lhs = gimple_assign_lhs_ptr (stmt);
643 rhs = gimple_assign_rhs1_ptr (stmt);
645 if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
647 *is_store = false;
648 return rhs;
650 else if (gimple_vdef (stmt)
651 && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
653 *is_store = true;
654 return lhs;
656 else
657 return NULL;
660 /* Returns the memory reference contained in STMT. */
662 static mem_ref_p
663 mem_ref_in_stmt (gimple stmt)
665 bool store;
666 tree *mem = simple_mem_ref_in_stmt (stmt, &store);
667 hashval_t hash;
668 mem_ref_p ref;
670 if (!mem)
671 return NULL;
672 gcc_assert (!store);
674 hash = iterative_hash_expr (*mem, 0);
675 ref = memory_accesses.refs.find_with_hash (*mem, hash);
677 gcc_assert (ref != NULL);
678 return ref;
681 /* From a controlling predicate in DOM determine the arguments from
682 the PHI node PHI that are chosen if the predicate evaluates to
683 true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
684 they are non-NULL. Returns true if the arguments can be determined,
685 else return false. */
687 static bool
688 extract_true_false_args_from_phi (basic_block dom, gimple phi,
689 tree *true_arg_p, tree *false_arg_p)
691 basic_block bb = gimple_bb (phi);
692 edge true_edge, false_edge, tem;
693 tree arg0 = NULL_TREE, arg1 = NULL_TREE;
695 /* We have to verify that one edge into the PHI node is dominated
696 by the true edge of the predicate block and the other edge
697 dominated by the false edge. This ensures that the PHI argument
698 we are going to take is completely determined by the path we
699 take from the predicate block.
700 We can only use BB dominance checks below if the destination of
701 the true/false edges are dominated by their edge, thus only
702 have a single predecessor. */
703 extract_true_false_edges_from_block (dom, &true_edge, &false_edge);
704 tem = EDGE_PRED (bb, 0);
705 if (tem == true_edge
706 || (single_pred_p (true_edge->dest)
707 && (tem->src == true_edge->dest
708 || dominated_by_p (CDI_DOMINATORS,
709 tem->src, true_edge->dest))))
710 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
711 else if (tem == false_edge
712 || (single_pred_p (false_edge->dest)
713 && (tem->src == false_edge->dest
714 || dominated_by_p (CDI_DOMINATORS,
715 tem->src, false_edge->dest))))
716 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
717 else
718 return false;
719 tem = EDGE_PRED (bb, 1);
720 if (tem == true_edge
721 || (single_pred_p (true_edge->dest)
722 && (tem->src == true_edge->dest
723 || dominated_by_p (CDI_DOMINATORS,
724 tem->src, true_edge->dest))))
725 arg0 = PHI_ARG_DEF (phi, tem->dest_idx);
726 else if (tem == false_edge
727 || (single_pred_p (false_edge->dest)
728 && (tem->src == false_edge->dest
729 || dominated_by_p (CDI_DOMINATORS,
730 tem->src, false_edge->dest))))
731 arg1 = PHI_ARG_DEF (phi, tem->dest_idx);
732 else
733 return false;
734 if (!arg0 || !arg1)
735 return false;
737 if (true_arg_p)
738 *true_arg_p = arg0;
739 if (false_arg_p)
740 *false_arg_p = arg1;
742 return true;
745 /* Determine the outermost loop to that it is possible to hoist a statement
746 STMT and store it to LIM_DATA (STMT)->max_loop. To do this we determine
747 the outermost loop in that the value computed by STMT is invariant.
748 If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
749 we preserve the fact whether STMT is executed. It also fills other related
750 information to LIM_DATA (STMT).
752 The function returns false if STMT cannot be hoisted outside of the loop it
753 is defined in, and true otherwise. */
755 static bool
756 determine_max_movement (gimple stmt, bool must_preserve_exec)
758 basic_block bb = gimple_bb (stmt);
759 struct loop *loop = bb->loop_father;
760 struct loop *level;
761 struct lim_aux_data *lim_data = get_lim_data (stmt);
762 tree val;
763 ssa_op_iter iter;
765 if (must_preserve_exec)
766 level = ALWAYS_EXECUTED_IN (bb);
767 else
768 level = superloop_at_depth (loop, 1);
769 lim_data->max_loop = level;
771 if (gimple_code (stmt) == GIMPLE_PHI)
773 use_operand_p use_p;
774 unsigned min_cost = UINT_MAX;
775 unsigned total_cost = 0;
776 struct lim_aux_data *def_data;
778 /* We will end up promoting dependencies to be unconditionally
779 evaluated. For this reason the PHI cost (and thus the
780 cost we remove from the loop by doing the invariant motion)
781 is that of the cheapest PHI argument dependency chain. */
782 FOR_EACH_PHI_ARG (use_p, stmt, iter, SSA_OP_USE)
784 val = USE_FROM_PTR (use_p);
785 if (TREE_CODE (val) != SSA_NAME)
786 continue;
787 if (!add_dependency (val, lim_data, loop, false))
788 return false;
789 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
790 if (def_data)
792 min_cost = MIN (min_cost, def_data->cost);
793 total_cost += def_data->cost;
797 lim_data->cost += min_cost;
799 if (gimple_phi_num_args (stmt) > 1)
801 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
802 gimple cond;
803 if (gsi_end_p (gsi_last_bb (dom)))
804 return false;
805 cond = gsi_stmt (gsi_last_bb (dom));
806 if (gimple_code (cond) != GIMPLE_COND)
807 return false;
808 /* Verify that this is an extended form of a diamond and
809 the PHI arguments are completely controlled by the
810 predicate in DOM. */
811 if (!extract_true_false_args_from_phi (dom, stmt, NULL, NULL))
812 return false;
814 /* Fold in dependencies and cost of the condition. */
815 FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
817 if (!add_dependency (val, lim_data, loop, false))
818 return false;
819 def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
820 if (def_data)
821 total_cost += def_data->cost;
824 /* We want to avoid unconditionally executing very expensive
825 operations. As costs for our dependencies cannot be
826 negative just claim we are not invariand for this case.
827 We also are not sure whether the control-flow inside the
828 loop will vanish. */
829 if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
830 && !(min_cost != 0
831 && total_cost / min_cost <= 2))
832 return false;
834 /* Assume that the control-flow in the loop will vanish.
835 ??? We should verify this and not artificially increase
836 the cost if that is not the case. */
837 lim_data->cost += stmt_cost (stmt);
840 return true;
842 else
843 FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
844 if (!add_dependency (val, lim_data, loop, true))
845 return false;
847 if (gimple_vuse (stmt))
849 mem_ref_p ref = mem_ref_in_stmt (stmt);
851 if (ref)
853 lim_data->max_loop
854 = outermost_indep_loop (lim_data->max_loop, loop, ref);
855 if (!lim_data->max_loop)
856 return false;
858 else
860 if ((val = gimple_vuse (stmt)) != NULL_TREE)
862 if (!add_dependency (val, lim_data, loop, false))
863 return false;
868 lim_data->cost += stmt_cost (stmt);
870 return true;
873 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
874 and that one of the operands of this statement is computed by STMT.
875 Ensure that STMT (together with all the statements that define its
876 operands) is hoisted at least out of the loop LEVEL. */
878 static void
879 set_level (gimple stmt, struct loop *orig_loop, struct loop *level)
881 struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
882 struct lim_aux_data *lim_data;
883 gimple dep_stmt;
884 unsigned i;
886 stmt_loop = find_common_loop (orig_loop, stmt_loop);
887 lim_data = get_lim_data (stmt);
888 if (lim_data != NULL && lim_data->tgt_loop != NULL)
889 stmt_loop = find_common_loop (stmt_loop,
890 loop_outer (lim_data->tgt_loop));
891 if (flow_loop_nested_p (stmt_loop, level))
892 return;
894 gcc_assert (level == lim_data->max_loop
895 || flow_loop_nested_p (lim_data->max_loop, level));
897 lim_data->tgt_loop = level;
898 FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
899 set_level (dep_stmt, orig_loop, level);
902 /* Determines an outermost loop from that we want to hoist the statement STMT.
903 For now we chose the outermost possible loop. TODO -- use profiling
904 information to set it more sanely. */
906 static void
907 set_profitable_level (gimple stmt)
909 set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
912 /* Returns true if STMT is a call that has side effects. */
914 static bool
915 nonpure_call_p (gimple stmt)
917 if (gimple_code (stmt) != GIMPLE_CALL)
918 return false;
920 return gimple_has_side_effects (stmt);
923 /* Rewrite a/b to a*(1/b). Return the invariant stmt to process. */
925 static gimple
926 rewrite_reciprocal (gimple_stmt_iterator *bsi)
928 gimple stmt, stmt1, stmt2;
929 tree name, lhs, type;
930 tree real_one;
931 gimple_stmt_iterator gsi;
933 stmt = gsi_stmt (*bsi);
934 lhs = gimple_assign_lhs (stmt);
935 type = TREE_TYPE (lhs);
937 real_one = build_one_cst (type);
939 name = make_temp_ssa_name (type, NULL, "reciptmp");
940 stmt1 = gimple_build_assign_with_ops (RDIV_EXPR, name, real_one,
941 gimple_assign_rhs2 (stmt));
943 stmt2 = gimple_build_assign_with_ops (MULT_EXPR, lhs, name,
944 gimple_assign_rhs1 (stmt));
946 /* Replace division stmt with reciprocal and multiply stmts.
947 The multiply stmt is not invariant, so update iterator
948 and avoid rescanning. */
949 gsi = *bsi;
950 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
951 gsi_replace (&gsi, stmt2, true);
953 /* Continue processing with invariant reciprocal statement. */
954 return stmt1;
957 /* Check if the pattern at *BSI is a bittest of the form
958 (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0. */
960 static gimple
961 rewrite_bittest (gimple_stmt_iterator *bsi)
963 gimple stmt, use_stmt, stmt1, stmt2;
964 tree lhs, name, t, a, b;
965 use_operand_p use;
967 stmt = gsi_stmt (*bsi);
968 lhs = gimple_assign_lhs (stmt);
970 /* Verify that the single use of lhs is a comparison against zero. */
971 if (TREE_CODE (lhs) != SSA_NAME
972 || !single_imm_use (lhs, &use, &use_stmt)
973 || gimple_code (use_stmt) != GIMPLE_COND)
974 return stmt;
975 if (gimple_cond_lhs (use_stmt) != lhs
976 || (gimple_cond_code (use_stmt) != NE_EXPR
977 && gimple_cond_code (use_stmt) != EQ_EXPR)
978 || !integer_zerop (gimple_cond_rhs (use_stmt)))
979 return stmt;
981 /* Get at the operands of the shift. The rhs is TMP1 & 1. */
982 stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
983 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
984 return stmt;
986 /* There is a conversion in between possibly inserted by fold. */
987 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
989 t = gimple_assign_rhs1 (stmt1);
990 if (TREE_CODE (t) != SSA_NAME
991 || !has_single_use (t))
992 return stmt;
993 stmt1 = SSA_NAME_DEF_STMT (t);
994 if (gimple_code (stmt1) != GIMPLE_ASSIGN)
995 return stmt;
998 /* Verify that B is loop invariant but A is not. Verify that with
999 all the stmt walking we are still in the same loop. */
1000 if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
1001 || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
1002 return stmt;
1004 a = gimple_assign_rhs1 (stmt1);
1005 b = gimple_assign_rhs2 (stmt1);
1007 if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
1008 && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
1010 gimple_stmt_iterator rsi;
1012 /* 1 << B */
1013 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
1014 build_int_cst (TREE_TYPE (a), 1), b);
1015 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
1016 stmt1 = gimple_build_assign (name, t);
1018 /* A & (1 << B) */
1019 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
1020 name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
1021 stmt2 = gimple_build_assign (name, t);
1023 /* Replace the SSA_NAME we compare against zero. Adjust
1024 the type of zero accordingly. */
1025 SET_USE (use, name);
1026 gimple_cond_set_rhs (use_stmt, build_int_cst_type (TREE_TYPE (name), 0));
1028 /* Don't use gsi_replace here, none of the new assignments sets
1029 the variable originally set in stmt. Move bsi to stmt1, and
1030 then remove the original stmt, so that we get a chance to
1031 retain debug info for it. */
1032 rsi = *bsi;
1033 gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
1034 gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
1035 gsi_remove (&rsi, true);
1037 return stmt1;
1040 return stmt;
1044 /* Determine the outermost loops in that statements in basic block BB are
1045 invariant, and record them to the LIM_DATA associated with the statements.
1046 Callback for walk_dominator_tree. */
1048 static void
1049 determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
1050 basic_block bb)
1052 enum move_pos pos;
1053 gimple_stmt_iterator bsi;
1054 gimple stmt;
1055 bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
1056 struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
1057 struct lim_aux_data *lim_data;
1059 if (!loop_outer (bb->loop_father))
1060 return;
1062 if (dump_file && (dump_flags & TDF_DETAILS))
1063 fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
1064 bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
1066 /* Look at PHI nodes, but only if there is at most two.
1067 ??? We could relax this further by post-processing the inserted
1068 code and transforming adjacent cond-exprs with the same predicate
1069 to control flow again. */
1070 bsi = gsi_start_phis (bb);
1071 if (!gsi_end_p (bsi)
1072 && ((gsi_next (&bsi), gsi_end_p (bsi))
1073 || (gsi_next (&bsi), gsi_end_p (bsi))))
1074 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1076 stmt = gsi_stmt (bsi);
1078 pos = movement_possibility (stmt);
1079 if (pos == MOVE_IMPOSSIBLE)
1080 continue;
1082 lim_data = init_lim_data (stmt);
1083 lim_data->always_executed_in = outermost;
1085 if (!determine_max_movement (stmt, false))
1087 lim_data->max_loop = NULL;
1088 continue;
1091 if (dump_file && (dump_flags & TDF_DETAILS))
1093 print_gimple_stmt (dump_file, stmt, 2, 0);
1094 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1095 loop_depth (lim_data->max_loop),
1096 lim_data->cost);
1099 if (lim_data->cost >= LIM_EXPENSIVE)
1100 set_profitable_level (stmt);
1103 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1105 stmt = gsi_stmt (bsi);
1107 pos = movement_possibility (stmt);
1108 if (pos == MOVE_IMPOSSIBLE)
1110 if (nonpure_call_p (stmt))
1112 maybe_never = true;
1113 outermost = NULL;
1115 /* Make sure to note always_executed_in for stores to make
1116 store-motion work. */
1117 else if (stmt_makes_single_store (stmt))
1119 struct lim_aux_data *lim_data = init_lim_data (stmt);
1120 lim_data->always_executed_in = outermost;
1122 continue;
1125 if (is_gimple_assign (stmt)
1126 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1127 == GIMPLE_BINARY_RHS))
1129 tree op0 = gimple_assign_rhs1 (stmt);
1130 tree op1 = gimple_assign_rhs2 (stmt);
1131 struct loop *ol1 = outermost_invariant_loop (op1,
1132 loop_containing_stmt (stmt));
1134 /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1135 to be hoisted out of loop, saving expensive divide. */
1136 if (pos == MOVE_POSSIBLE
1137 && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1138 && flag_unsafe_math_optimizations
1139 && !flag_trapping_math
1140 && ol1 != NULL
1141 && outermost_invariant_loop (op0, ol1) == NULL)
1142 stmt = rewrite_reciprocal (&bsi);
1144 /* If the shift count is invariant, convert (A >> B) & 1 to
1145 A & (1 << B) allowing the bit mask to be hoisted out of the loop
1146 saving an expensive shift. */
1147 if (pos == MOVE_POSSIBLE
1148 && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1149 && integer_onep (op1)
1150 && TREE_CODE (op0) == SSA_NAME
1151 && has_single_use (op0))
1152 stmt = rewrite_bittest (&bsi);
1155 lim_data = init_lim_data (stmt);
1156 lim_data->always_executed_in = outermost;
1158 if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1159 continue;
1161 if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1163 lim_data->max_loop = NULL;
1164 continue;
1167 if (dump_file && (dump_flags & TDF_DETAILS))
1169 print_gimple_stmt (dump_file, stmt, 2, 0);
1170 fprintf (dump_file, " invariant up to level %d, cost %d.\n\n",
1171 loop_depth (lim_data->max_loop),
1172 lim_data->cost);
1175 if (lim_data->cost >= LIM_EXPENSIVE)
1176 set_profitable_level (stmt);
1180 /* For each statement determines the outermost loop in that it is invariant,
1181 statements on whose motion it depends and the cost of the computation.
1182 This information is stored to the LIM_DATA structure associated with
1183 each statement. */
1185 static void
1186 determine_invariantness (void)
1188 struct dom_walk_data walk_data;
1190 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1191 walk_data.dom_direction = CDI_DOMINATORS;
1192 walk_data.before_dom_children = determine_invariantness_stmt;
1194 init_walk_dominator_tree (&walk_data);
1195 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1196 fini_walk_dominator_tree (&walk_data);
1199 /* Hoist the statements in basic block BB out of the loops prescribed by
1200 data stored in LIM_DATA structures associated with each statement. Callback
1201 for walk_dominator_tree. */
1203 static void
1204 move_computations_stmt (struct dom_walk_data *dw_data,
1205 basic_block bb)
1207 struct loop *level;
1208 gimple_stmt_iterator bsi;
1209 gimple stmt;
1210 unsigned cost = 0;
1211 struct lim_aux_data *lim_data;
1213 if (!loop_outer (bb->loop_father))
1214 return;
1216 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1218 gimple new_stmt;
1219 stmt = gsi_stmt (bsi);
1221 lim_data = get_lim_data (stmt);
1222 if (lim_data == NULL)
1224 gsi_next (&bsi);
1225 continue;
1228 cost = lim_data->cost;
1229 level = lim_data->tgt_loop;
1230 clear_lim_data (stmt);
1232 if (!level)
1234 gsi_next (&bsi);
1235 continue;
1238 if (dump_file && (dump_flags & TDF_DETAILS))
1240 fprintf (dump_file, "Moving PHI node\n");
1241 print_gimple_stmt (dump_file, stmt, 0, 0);
1242 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1243 cost, level->num);
1246 if (gimple_phi_num_args (stmt) == 1)
1248 tree arg = PHI_ARG_DEF (stmt, 0);
1249 new_stmt = gimple_build_assign_with_ops (TREE_CODE (arg),
1250 gimple_phi_result (stmt),
1251 arg, NULL_TREE);
1252 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1254 else
1256 basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1257 gimple cond = gsi_stmt (gsi_last_bb (dom));
1258 tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1259 /* Get the PHI arguments corresponding to the true and false
1260 edges of COND. */
1261 extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1262 gcc_assert (arg0 && arg1);
1263 t = build2 (gimple_cond_code (cond), boolean_type_node,
1264 gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1265 new_stmt = gimple_build_assign_with_ops (COND_EXPR,
1266 gimple_phi_result (stmt),
1267 t, arg0, arg1);
1268 SSA_NAME_DEF_STMT (gimple_phi_result (stmt)) = new_stmt;
1269 *((unsigned int *)(dw_data->global_data)) |= TODO_cleanup_cfg;
1271 gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1272 remove_phi_node (&bsi, false);
1275 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1277 edge e;
1279 stmt = gsi_stmt (bsi);
1281 lim_data = get_lim_data (stmt);
1282 if (lim_data == NULL)
1284 gsi_next (&bsi);
1285 continue;
1288 cost = lim_data->cost;
1289 level = lim_data->tgt_loop;
1290 clear_lim_data (stmt);
1292 if (!level)
1294 gsi_next (&bsi);
1295 continue;
1298 /* We do not really want to move conditionals out of the loop; we just
1299 placed it here to force its operands to be moved if necessary. */
1300 if (gimple_code (stmt) == GIMPLE_COND)
1301 continue;
1303 if (dump_file && (dump_flags & TDF_DETAILS))
1305 fprintf (dump_file, "Moving statement\n");
1306 print_gimple_stmt (dump_file, stmt, 0, 0);
1307 fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1308 cost, level->num);
1311 e = loop_preheader_edge (level);
1312 gcc_assert (!gimple_vdef (stmt));
1313 if (gimple_vuse (stmt))
1315 /* The new VUSE is the one from the virtual PHI in the loop
1316 header or the one already present. */
1317 gimple_stmt_iterator gsi2;
1318 for (gsi2 = gsi_start_phis (e->dest);
1319 !gsi_end_p (gsi2); gsi_next (&gsi2))
1321 gimple phi = gsi_stmt (gsi2);
1322 if (virtual_operand_p (gimple_phi_result (phi)))
1324 gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1325 break;
1329 gsi_remove (&bsi, false);
1330 gsi_insert_on_edge (e, stmt);
1334 /* Hoist the statements out of the loops prescribed by data stored in
1335 LIM_DATA structures associated with each statement.*/
1337 static unsigned int
1338 move_computations (void)
1340 struct dom_walk_data walk_data;
1341 unsigned int todo = 0;
1343 memset (&walk_data, 0, sizeof (struct dom_walk_data));
1344 walk_data.global_data = &todo;
1345 walk_data.dom_direction = CDI_DOMINATORS;
1346 walk_data.before_dom_children = move_computations_stmt;
1348 init_walk_dominator_tree (&walk_data);
1349 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
1350 fini_walk_dominator_tree (&walk_data);
1352 gsi_commit_edge_inserts ();
1353 if (need_ssa_update_p (cfun))
1354 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1356 return todo;
1359 /* Checks whether the statement defining variable *INDEX can be hoisted
1360 out of the loop passed in DATA. Callback for for_each_index. */
1362 static bool
1363 may_move_till (tree ref, tree *index, void *data)
1365 struct loop *loop = (struct loop *) data, *max_loop;
1367 /* If REF is an array reference, check also that the step and the lower
1368 bound is invariant in LOOP. */
1369 if (TREE_CODE (ref) == ARRAY_REF)
1371 tree step = TREE_OPERAND (ref, 3);
1372 tree lbound = TREE_OPERAND (ref, 2);
1374 max_loop = outermost_invariant_loop (step, loop);
1375 if (!max_loop)
1376 return false;
1378 max_loop = outermost_invariant_loop (lbound, loop);
1379 if (!max_loop)
1380 return false;
1383 max_loop = outermost_invariant_loop (*index, loop);
1384 if (!max_loop)
1385 return false;
1387 return true;
1390 /* If OP is SSA NAME, force the statement that defines it to be
1391 moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
1393 static void
1394 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1396 gimple stmt;
1398 if (!op
1399 || is_gimple_min_invariant (op))
1400 return;
1402 gcc_assert (TREE_CODE (op) == SSA_NAME);
1404 stmt = SSA_NAME_DEF_STMT (op);
1405 if (gimple_nop_p (stmt))
1406 return;
1408 set_level (stmt, orig_loop, loop);
1411 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1412 the LOOP. The reference REF is used in the loop ORIG_LOOP. Callback for
1413 for_each_index. */
1415 struct fmt_data
1417 struct loop *loop;
1418 struct loop *orig_loop;
1421 static bool
1422 force_move_till (tree ref, tree *index, void *data)
1424 struct fmt_data *fmt_data = (struct fmt_data *) data;
1426 if (TREE_CODE (ref) == ARRAY_REF)
1428 tree step = TREE_OPERAND (ref, 3);
1429 tree lbound = TREE_OPERAND (ref, 2);
1431 force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1432 force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1435 force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1437 return true;
1440 /* A function to free the mem_ref object OBJ. */
1442 static void
1443 memref_free (struct mem_ref *mem)
1445 unsigned i;
1446 vec<mem_ref_loc> *accs;
1448 FOR_EACH_VEC_ELT (mem->accesses_in_loop, i, accs)
1449 accs->release ();
1450 mem->accesses_in_loop.release ();
1452 free (mem);
1455 /* Allocates and returns a memory reference description for MEM whose hash
1456 value is HASH and id is ID. */
1458 static mem_ref_p
1459 mem_ref_alloc (tree mem, unsigned hash, unsigned id)
1461 mem_ref_p ref = XNEW (struct mem_ref);
1462 ao_ref_init (&ref->mem, mem);
1463 ref->id = id;
1464 ref->hash = hash;
1465 bitmap_initialize (&ref->stored, &lim_bitmap_obstack);
1466 bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1467 bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1468 ref->accesses_in_loop.create (0);
1470 return ref;
1473 /* Records memory reference location *LOC in LOOP to the memory reference
1474 description REF. The reference occurs in statement STMT. */
1476 static void
1477 record_mem_ref_loc (mem_ref_p ref, struct loop *loop, gimple stmt, tree *loc)
1479 mem_ref_loc aref;
1481 if (ref->accesses_in_loop.length ()
1482 <= (unsigned) loop->num)
1483 ref->accesses_in_loop.safe_grow_cleared (loop->num + 1);
1485 aref.stmt = stmt;
1486 aref.ref = loc;
1487 ref->accesses_in_loop[loop->num].safe_push (aref);
1490 /* Marks reference REF as stored in LOOP. */
1492 static void
1493 mark_ref_stored (mem_ref_p ref, struct loop *loop)
1495 while (loop != current_loops->tree_root
1496 && bitmap_set_bit (&ref->stored, loop->num))
1497 loop = loop_outer (loop);
1500 /* Gathers memory references in statement STMT in LOOP, storing the
1501 information about them in the memory_accesses structure. Marks
1502 the vops accessed through unrecognized statements there as
1503 well. */
1505 static void
1506 gather_mem_refs_stmt (struct loop *loop, gimple stmt)
1508 tree *mem = NULL;
1509 hashval_t hash;
1510 mem_ref **slot;
1511 mem_ref_p ref;
1512 bool is_stored;
1513 unsigned id;
1515 if (!gimple_vuse (stmt))
1516 return;
1518 mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1519 if (!mem)
1521 /* We use the shared mem_ref for all unanalyzable refs. */
1522 id = UNANALYZABLE_MEM_ID;
1523 ref = memory_accesses.refs_list[id];
1524 if (dump_file && (dump_flags & TDF_DETAILS))
1526 fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1527 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1529 is_stored = gimple_vdef (stmt);
1531 else
1533 hash = iterative_hash_expr (*mem, 0);
1534 slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
1535 if (*slot)
1537 ref = (mem_ref_p) *slot;
1538 id = ref->id;
1540 else
1542 id = memory_accesses.refs_list.length ();
1543 ref = mem_ref_alloc (*mem, hash, id);
1544 memory_accesses.refs_list.safe_push (ref);
1545 *slot = ref;
1547 if (dump_file && (dump_flags & TDF_DETAILS))
1549 fprintf (dump_file, "Memory reference %u: ", id);
1550 print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1551 fprintf (dump_file, "\n");
1555 record_mem_ref_loc (ref, loop, stmt, mem);
1557 bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1558 if (is_stored)
1560 bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1561 mark_ref_stored (ref, loop);
1563 return;
1566 static unsigned *bb_loop_postorder;
1568 /* qsort sort function to sort blocks after their loop fathers postorder. */
1570 static int
1571 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1573 basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1574 basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1575 struct loop *loop1 = bb1->loop_father;
1576 struct loop *loop2 = bb2->loop_father;
1577 if (loop1->num == loop2->num)
1578 return 0;
1579 return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1582 /* Gathers memory references in loops. */
1584 static void
1585 analyze_memory_references (void)
1587 gimple_stmt_iterator bsi;
1588 basic_block bb, *bbs;
1589 struct loop *loop, *outer;
1590 loop_iterator li;
1591 unsigned i, n;
1593 /* Initialize bb_loop_postorder with a mapping from loop->num to
1594 its postorder index. */
1595 i = 0;
1596 bb_loop_postorder = XNEWVEC (unsigned, number_of_loops ());
1597 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1598 bb_loop_postorder[loop->num] = i++;
1599 /* Collect all basic-blocks in loops and sort them after their
1600 loops postorder. */
1601 i = 0;
1602 bbs = XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
1603 FOR_EACH_BB (bb)
1604 if (bb->loop_father != current_loops->tree_root)
1605 bbs[i++] = bb;
1606 n = i;
1607 qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1608 free (bb_loop_postorder);
1610 /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1611 That results in better locality for all the bitmaps. */
1612 for (i = 0; i < n; ++i)
1614 basic_block bb = bbs[i];
1615 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1616 gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1619 free (bbs);
1621 /* Propagate the information about accessed memory references up
1622 the loop hierarchy. */
1623 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1625 /* Finalize the overall touched references (including subloops). */
1626 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1627 &memory_accesses.refs_stored_in_loop[loop->num]);
1629 /* Propagate the information about accessed memory references up
1630 the loop hierarchy. */
1631 outer = loop_outer (loop);
1632 if (outer == current_loops->tree_root)
1633 continue;
1635 bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1636 &memory_accesses.all_refs_stored_in_loop[loop->num]);
1640 /* Returns true if MEM1 and MEM2 may alias. TTAE_CACHE is used as a cache in
1641 tree_to_aff_combination_expand. */
1643 static bool
1644 mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
1645 struct pointer_map_t **ttae_cache)
1647 /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1648 object and their offset differ in such a way that the locations cannot
1649 overlap, then they cannot alias. */
1650 double_int size1, size2;
1651 aff_tree off1, off2;
1653 /* Perform basic offset and type-based disambiguation. */
1654 if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1655 return false;
1657 /* The expansion of addresses may be a bit expensive, thus we only do
1658 the check at -O2 and higher optimization levels. */
1659 if (optimize < 2)
1660 return true;
1662 get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1663 get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1664 aff_combination_expand (&off1, ttae_cache);
1665 aff_combination_expand (&off2, ttae_cache);
1666 aff_combination_scale (&off1, double_int_minus_one);
1667 aff_combination_add (&off2, &off1);
1669 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1670 return false;
1672 return true;
1675 /* Iterates over all locations of REF in LOOP and its subloops calling
1676 fn.operator() with the location as argument. When that operator
1677 returns true the iteration is stopped and true is returned.
1678 Otherwise false is returned. */
1680 template <typename FN>
1681 static bool
1682 for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn)
1684 unsigned i;
1685 mem_ref_loc_p loc;
1686 struct loop *subloop;
1688 if (ref->accesses_in_loop.length () > (unsigned) loop->num)
1689 FOR_EACH_VEC_ELT (ref->accesses_in_loop[loop->num], i, loc)
1690 if (fn (loc))
1691 return true;
1693 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
1694 if (for_all_locs_in_loop (subloop, ref, fn))
1695 return true;
1697 return false;
1700 /* Rewrites location LOC by TMP_VAR. */
1702 struct rewrite_mem_ref_loc
1704 rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1705 bool operator()(mem_ref_loc_p loc);
1706 tree tmp_var;
1709 bool
1710 rewrite_mem_ref_loc::operator()(mem_ref_loc_p loc)
1712 *loc->ref = tmp_var;
1713 update_stmt (loc->stmt);
1714 return false;
1717 /* Rewrites all references to REF in LOOP by variable TMP_VAR. */
1719 static void
1720 rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var)
1722 for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1725 /* The name and the length of the currently generated variable
1726 for lsm. */
1727 #define MAX_LSM_NAME_LENGTH 40
1728 static char lsm_tmp_name[MAX_LSM_NAME_LENGTH + 1];
1729 static int lsm_tmp_name_length;
1731 /* Adds S to lsm_tmp_name. */
1733 static void
1734 lsm_tmp_name_add (const char *s)
1736 int l = strlen (s) + lsm_tmp_name_length;
1737 if (l > MAX_LSM_NAME_LENGTH)
1738 return;
1740 strcpy (lsm_tmp_name + lsm_tmp_name_length, s);
1741 lsm_tmp_name_length = l;
1744 /* Stores the name for temporary variable that replaces REF to
1745 lsm_tmp_name. */
1747 static void
1748 gen_lsm_tmp_name (tree ref)
1750 const char *name;
1752 switch (TREE_CODE (ref))
1754 case MEM_REF:
1755 case TARGET_MEM_REF:
1756 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1757 lsm_tmp_name_add ("_");
1758 break;
1760 case ADDR_EXPR:
1761 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1762 break;
1764 case BIT_FIELD_REF:
1765 case VIEW_CONVERT_EXPR:
1766 case ARRAY_RANGE_REF:
1767 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1768 break;
1770 case REALPART_EXPR:
1771 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1772 lsm_tmp_name_add ("_RE");
1773 break;
1775 case IMAGPART_EXPR:
1776 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1777 lsm_tmp_name_add ("_IM");
1778 break;
1780 case COMPONENT_REF:
1781 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1782 lsm_tmp_name_add ("_");
1783 name = get_name (TREE_OPERAND (ref, 1));
1784 if (!name)
1785 name = "F";
1786 lsm_tmp_name_add (name);
1787 break;
1789 case ARRAY_REF:
1790 gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
1791 lsm_tmp_name_add ("_I");
1792 break;
1794 case SSA_NAME:
1795 case VAR_DECL:
1796 case PARM_DECL:
1797 name = get_name (ref);
1798 if (!name)
1799 name = "D";
1800 lsm_tmp_name_add (name);
1801 break;
1803 case STRING_CST:
1804 lsm_tmp_name_add ("S");
1805 break;
1807 case RESULT_DECL:
1808 lsm_tmp_name_add ("R");
1809 break;
1811 case INTEGER_CST:
1812 /* Nothing. */
1813 break;
1815 default:
1816 gcc_unreachable ();
1820 /* Determines name for temporary variable that replaces REF.
1821 The name is accumulated into the lsm_tmp_name variable.
1822 N is added to the name of the temporary. */
1824 char *
1825 get_lsm_tmp_name (tree ref, unsigned n)
1827 char ns[2];
1829 lsm_tmp_name_length = 0;
1830 gen_lsm_tmp_name (ref);
1831 lsm_tmp_name_add ("_lsm");
1832 if (n < 10)
1834 ns[0] = '0' + n;
1835 ns[1] = 0;
1836 lsm_tmp_name_add (ns);
1838 return lsm_tmp_name;
1841 struct prev_flag_edges {
1842 /* Edge to insert new flag comparison code. */
1843 edge append_cond_position;
1845 /* Edge for fall through from previous flag comparison. */
1846 edge last_cond_fallthru;
1849 /* Helper function for execute_sm. Emit code to store TMP_VAR into
1850 MEM along edge EX.
1852 The store is only done if MEM has changed. We do this so no
1853 changes to MEM occur on code paths that did not originally store
1854 into it.
1856 The common case for execute_sm will transform:
1858 for (...) {
1859 if (foo)
1860 stuff;
1861 else
1862 MEM = TMP_VAR;
1865 into:
1867 lsm = MEM;
1868 for (...) {
1869 if (foo)
1870 stuff;
1871 else
1872 lsm = TMP_VAR;
1874 MEM = lsm;
1876 This function will generate:
1878 lsm = MEM;
1880 lsm_flag = false;
1882 for (...) {
1883 if (foo)
1884 stuff;
1885 else {
1886 lsm = TMP_VAR;
1887 lsm_flag = true;
1890 if (lsm_flag) <--
1891 MEM = lsm; <--
1894 static void
1895 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag)
1897 basic_block new_bb, then_bb, old_dest;
1898 bool loop_has_only_one_exit;
1899 edge then_old_edge, orig_ex = ex;
1900 gimple_stmt_iterator gsi;
1901 gimple stmt;
1902 struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1904 /* ?? Insert store after previous store if applicable. See note
1905 below. */
1906 if (prev_edges)
1907 ex = prev_edges->append_cond_position;
1909 loop_has_only_one_exit = single_pred_p (ex->dest);
1911 if (loop_has_only_one_exit)
1912 ex = split_block_after_labels (ex->dest);
1914 old_dest = ex->dest;
1915 new_bb = split_edge (ex);
1916 then_bb = create_empty_bb (new_bb);
1917 if (current_loops && new_bb->loop_father)
1918 add_bb_to_loop (then_bb, new_bb->loop_father);
1920 gsi = gsi_start_bb (new_bb);
1921 stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1922 NULL_TREE, NULL_TREE);
1923 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1925 gsi = gsi_start_bb (then_bb);
1926 /* Insert actual store. */
1927 stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1928 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1930 make_edge (new_bb, then_bb, EDGE_TRUE_VALUE);
1931 make_edge (new_bb, old_dest, EDGE_FALSE_VALUE);
1932 then_old_edge = make_edge (then_bb, old_dest, EDGE_FALLTHRU);
1934 set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1936 if (prev_edges)
1938 basic_block prevbb = prev_edges->last_cond_fallthru->src;
1939 redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1940 set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1941 set_immediate_dominator (CDI_DOMINATORS, old_dest,
1942 recompute_dominator (CDI_DOMINATORS, old_dest));
1945 /* ?? Because stores may alias, they must happen in the exact
1946 sequence they originally happened. Save the position right after
1947 the (_lsm) store we just created so we can continue appending after
1948 it and maintain the original order. */
1950 struct prev_flag_edges *p;
1952 if (orig_ex->aux)
1953 orig_ex->aux = NULL;
1954 alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1955 p = (struct prev_flag_edges *) orig_ex->aux;
1956 p->append_cond_position = then_old_edge;
1957 p->last_cond_fallthru = find_edge (new_bb, old_dest);
1958 orig_ex->aux = (void *) p;
1961 if (!loop_has_only_one_exit)
1962 for (gsi = gsi_start_phis (old_dest); !gsi_end_p (gsi); gsi_next (&gsi))
1964 gimple phi = gsi_stmt (gsi);
1965 unsigned i;
1967 for (i = 0; i < gimple_phi_num_args (phi); i++)
1968 if (gimple_phi_arg_edge (phi, i)->src == new_bb)
1970 tree arg = gimple_phi_arg_def (phi, i);
1971 add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
1972 update_stmt (phi);
1975 /* Remove the original fall through edge. This was the
1976 single_succ_edge (new_bb). */
1977 EDGE_SUCC (new_bb, 0)->flags &= ~EDGE_FALLTHRU;
1980 /* When REF is set on the location, set flag indicating the store. */
1982 struct sm_set_flag_if_changed
1984 sm_set_flag_if_changed (tree flag_) : flag (flag_) {}
1985 bool operator()(mem_ref_loc_p loc);
1986 tree flag;
1989 bool
1990 sm_set_flag_if_changed::operator()(mem_ref_loc_p loc)
1992 /* Only set the flag for writes. */
1993 if (is_gimple_assign (loc->stmt)
1994 && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
1996 gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
1997 gimple stmt = gimple_build_assign (flag, boolean_true_node);
1998 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2000 return false;
2003 /* Helper function for execute_sm. On every location where REF is
2004 set, set an appropriate flag indicating the store. */
2006 static tree
2007 execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
2009 tree flag;
2010 char *str = get_lsm_tmp_name (ref->mem.ref, ~0);
2011 lsm_tmp_name_add ("_flag");
2012 flag = create_tmp_reg (boolean_type_node, str);
2013 for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag));
2014 return flag;
2017 /* Executes store motion of memory reference REF from LOOP.
2018 Exits from the LOOP are stored in EXITS. The initialization of the
2019 temporary variable is put to the preheader of the loop, and assignments
2020 to the reference from the temporary variable are emitted to exits. */
2022 static void
2023 execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref)
2025 tree tmp_var, store_flag;
2026 unsigned i;
2027 gimple load;
2028 struct fmt_data fmt_data;
2029 edge ex, latch_edge;
2030 struct lim_aux_data *lim_data;
2031 bool multi_threaded_model_p = false;
2033 if (dump_file && (dump_flags & TDF_DETAILS))
2035 fprintf (dump_file, "Executing store motion of ");
2036 print_generic_expr (dump_file, ref->mem.ref, 0);
2037 fprintf (dump_file, " from loop %d\n", loop->num);
2040 tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2041 get_lsm_tmp_name (ref->mem.ref, ~0));
2043 fmt_data.loop = loop;
2044 fmt_data.orig_loop = loop;
2045 for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2047 if (block_in_transaction (loop_preheader_edge (loop)->src)
2048 || !PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES))
2049 multi_threaded_model_p = true;
2051 if (multi_threaded_model_p)
2052 store_flag = execute_sm_if_changed_flag_set (loop, ref);
2054 rewrite_mem_refs (loop, ref, tmp_var);
2056 /* Emit the load code into the latch, so that we are sure it will
2057 be processed after all dependencies. */
2058 latch_edge = loop_latch_edge (loop);
2060 /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2061 load altogether, since the store is predicated by a flag. We
2062 could, do the load only if it was originally in the loop. */
2063 load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2064 lim_data = init_lim_data (load);
2065 lim_data->max_loop = loop;
2066 lim_data->tgt_loop = loop;
2067 gsi_insert_on_edge (latch_edge, load);
2069 if (multi_threaded_model_p)
2071 load = gimple_build_assign (store_flag, boolean_false_node);
2072 lim_data = init_lim_data (load);
2073 lim_data->max_loop = loop;
2074 lim_data->tgt_loop = loop;
2075 gsi_insert_on_edge (latch_edge, load);
2078 /* Sink the store to every exit from the loop. */
2079 FOR_EACH_VEC_ELT (exits, i, ex)
2080 if (!multi_threaded_model_p)
2082 gimple store;
2083 store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2084 gsi_insert_on_edge (ex, store);
2086 else
2087 execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag);
2090 /* Hoists memory references MEM_REFS out of LOOP. EXITS is the list of exit
2091 edges of the LOOP. */
2093 static void
2094 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2095 vec<edge> exits)
2097 mem_ref_p ref;
2098 unsigned i;
2099 bitmap_iterator bi;
2101 EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2103 ref = memory_accesses.refs_list[i];
2104 execute_sm (loop, exits, ref);
2108 struct ref_always_accessed
2110 ref_always_accessed (struct loop *loop_, tree base_, bool stored_p_)
2111 : loop (loop_), base (base_), stored_p (stored_p_) {}
2112 bool operator()(mem_ref_loc_p loc);
2113 struct loop *loop;
2114 tree base;
2115 bool stored_p;
2118 bool
2119 ref_always_accessed::operator()(mem_ref_loc_p loc)
2121 struct loop *must_exec;
2123 if (!get_lim_data (loc->stmt))
2124 return false;
2126 /* If we require an always executed store make sure the statement
2127 stores to the reference. */
2128 if (stored_p)
2130 tree lhs;
2131 if (!gimple_get_lhs (loc->stmt))
2132 return false;
2133 lhs = get_base_address (gimple_get_lhs (loc->stmt));
2134 if (!lhs)
2135 return false;
2136 if (INDIRECT_REF_P (lhs)
2137 || TREE_CODE (lhs) == MEM_REF)
2138 lhs = TREE_OPERAND (lhs, 0);
2139 if (lhs != base)
2140 return false;
2143 must_exec = get_lim_data (loc->stmt)->always_executed_in;
2144 if (!must_exec)
2145 return false;
2147 if (must_exec == loop
2148 || flow_loop_nested_p (must_exec, loop))
2149 return true;
2151 return false;
2154 /* Returns true if REF is always accessed in LOOP. If STORED_P is true
2155 make sure REF is always stored to in LOOP. */
2157 static bool
2158 ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p)
2160 tree base = ao_ref_base (&ref->mem);
2161 if (TREE_CODE (base) == MEM_REF)
2162 base = TREE_OPERAND (base, 0);
2164 return for_all_locs_in_loop (loop, ref,
2165 ref_always_accessed (loop, base, stored_p));
2168 /* Returns true if REF1 and REF2 are independent. */
2170 static bool
2171 refs_independent_p (mem_ref_p ref1, mem_ref_p ref2)
2173 if (ref1 == ref2)
2174 return true;
2176 if (dump_file && (dump_flags & TDF_DETAILS))
2177 fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2178 ref1->id, ref2->id);
2180 if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2182 if (dump_file && (dump_flags & TDF_DETAILS))
2183 fprintf (dump_file, "dependent.\n");
2184 return false;
2186 else
2188 if (dump_file && (dump_flags & TDF_DETAILS))
2189 fprintf (dump_file, "independent.\n");
2190 return true;
2194 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2195 and its super-loops. */
2197 static void
2198 record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p)
2200 /* We can propagate dependent-in-loop bits up the loop
2201 hierarchy to all outer loops. */
2202 while (loop != current_loops->tree_root
2203 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2204 loop = loop_outer (loop);
2207 /* Returns true if REF is independent on all other memory references in
2208 LOOP. */
2210 static bool
2211 ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p)
2213 bitmap refs_to_check;
2214 unsigned i;
2215 bitmap_iterator bi;
2216 mem_ref_p aref;
2218 if (stored_p)
2219 refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2220 else
2221 refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2223 if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2224 return false;
2226 EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2228 aref = memory_accesses.refs_list[i];
2229 if (!refs_independent_p (ref, aref))
2230 return false;
2233 return true;
2236 /* Returns true if REF is independent on all other memory references in
2237 LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */
2239 static bool
2240 ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p)
2242 stored_p |= bitmap_bit_p (&ref->stored, loop->num);
2244 if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2245 return true;
2246 if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2247 return false;
2249 struct loop *inner = loop->inner;
2250 while (inner)
2252 if (!ref_indep_loop_p_2 (inner, ref, stored_p))
2253 return false;
2254 inner = inner->next;
2257 bool indep_p = ref_indep_loop_p_1 (loop, ref, stored_p);
2259 if (dump_file && (dump_flags & TDF_DETAILS))
2260 fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2261 ref->id, loop->num, indep_p ? "independent" : "dependent");
2263 /* Record the computed result in the cache. */
2264 if (indep_p)
2266 if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2267 && stored_p)
2269 /* If it's independend against all refs then it's independent
2270 against stores, too. */
2271 bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2274 else
2276 record_dep_loop (loop, ref, stored_p);
2277 if (!stored_p)
2279 /* If it's dependent against stores it's dependent against
2280 all refs, too. */
2281 record_dep_loop (loop, ref, true);
2285 return indep_p;
2288 /* Returns true if REF is independent on all other memory references in
2289 LOOP. */
2291 static bool
2292 ref_indep_loop_p (struct loop *loop, mem_ref_p ref)
2294 gcc_checking_assert (MEM_ANALYZABLE (ref));
2296 return ref_indep_loop_p_2 (loop, ref, false);
2299 /* Returns true if we can perform store motion of REF from LOOP. */
2301 static bool
2302 can_sm_ref_p (struct loop *loop, mem_ref_p ref)
2304 tree base;
2306 /* Can't hoist unanalyzable refs. */
2307 if (!MEM_ANALYZABLE (ref))
2308 return false;
2310 /* It should be movable. */
2311 if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2312 || TREE_THIS_VOLATILE (ref->mem.ref)
2313 || !for_each_index (&ref->mem.ref, may_move_till, loop))
2314 return false;
2316 /* If it can throw fail, we do not properly update EH info. */
2317 if (tree_could_throw_p (ref->mem.ref))
2318 return false;
2320 /* If it can trap, it must be always executed in LOOP.
2321 Readonly memory locations may trap when storing to them, but
2322 tree_could_trap_p is a predicate for rvalues, so check that
2323 explicitly. */
2324 base = get_base_address (ref->mem.ref);
2325 if ((tree_could_trap_p (ref->mem.ref)
2326 || (DECL_P (base) && TREE_READONLY (base)))
2327 && !ref_always_accessed_p (loop, ref, true))
2328 return false;
2330 /* And it must be independent on all other memory references
2331 in LOOP. */
2332 if (!ref_indep_loop_p (loop, ref))
2333 return false;
2335 return true;
2338 /* Marks the references in LOOP for that store motion should be performed
2339 in REFS_TO_SM. SM_EXECUTED is the set of references for that store
2340 motion was performed in one of the outer loops. */
2342 static void
2343 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2345 bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2346 unsigned i;
2347 bitmap_iterator bi;
2348 mem_ref_p ref;
2350 EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2352 ref = memory_accesses.refs_list[i];
2353 if (can_sm_ref_p (loop, ref))
2354 bitmap_set_bit (refs_to_sm, i);
2358 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2359 for a store motion optimization (i.e. whether we can insert statement
2360 on its exits). */
2362 static bool
2363 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2364 vec<edge> exits)
2366 unsigned i;
2367 edge ex;
2369 FOR_EACH_VEC_ELT (exits, i, ex)
2370 if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2371 return false;
2373 return true;
2376 /* Try to perform store motion for all memory references modified inside
2377 LOOP. SM_EXECUTED is the bitmap of the memory references for that
2378 store motion was executed in one of the outer loops. */
2380 static void
2381 store_motion_loop (struct loop *loop, bitmap sm_executed)
2383 vec<edge> exits = get_loop_exit_edges (loop);
2384 struct loop *subloop;
2385 bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2387 if (loop_suitable_for_sm (loop, exits))
2389 find_refs_for_sm (loop, sm_executed, sm_in_loop);
2390 hoist_memory_references (loop, sm_in_loop, exits);
2392 exits.release ();
2394 bitmap_ior_into (sm_executed, sm_in_loop);
2395 for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2396 store_motion_loop (subloop, sm_executed);
2397 bitmap_and_compl_into (sm_executed, sm_in_loop);
2398 BITMAP_FREE (sm_in_loop);
2401 /* Try to perform store motion for all memory references modified inside
2402 loops. */
2404 static void
2405 store_motion (void)
2407 struct loop *loop;
2408 bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2410 for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2411 store_motion_loop (loop, sm_executed);
2413 BITMAP_FREE (sm_executed);
2414 gsi_commit_edge_inserts ();
2417 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2418 for each such basic block bb records the outermost loop for that execution
2419 of its header implies execution of bb. CONTAINS_CALL is the bitmap of
2420 blocks that contain a nonpure call. */
2422 static void
2423 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2425 basic_block bb = NULL, *bbs, last = NULL;
2426 unsigned i;
2427 edge e;
2428 struct loop *inn_loop = loop;
2430 if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2432 bbs = get_loop_body_in_dom_order (loop);
2434 for (i = 0; i < loop->num_nodes; i++)
2436 edge_iterator ei;
2437 bb = bbs[i];
2439 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2440 last = bb;
2442 if (bitmap_bit_p (contains_call, bb->index))
2443 break;
2445 FOR_EACH_EDGE (e, ei, bb->succs)
2446 if (!flow_bb_inside_loop_p (loop, e->dest))
2447 break;
2448 if (e)
2449 break;
2451 /* A loop might be infinite (TODO use simple loop analysis
2452 to disprove this if possible). */
2453 if (bb->flags & BB_IRREDUCIBLE_LOOP)
2454 break;
2456 if (!flow_bb_inside_loop_p (inn_loop, bb))
2457 break;
2459 if (bb->loop_father->header == bb)
2461 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2462 break;
2464 /* In a loop that is always entered we may proceed anyway.
2465 But record that we entered it and stop once we leave it. */
2466 inn_loop = bb->loop_father;
2470 while (1)
2472 SET_ALWAYS_EXECUTED_IN (last, loop);
2473 if (last == loop->header)
2474 break;
2475 last = get_immediate_dominator (CDI_DOMINATORS, last);
2478 free (bbs);
2481 for (loop = loop->inner; loop; loop = loop->next)
2482 fill_always_executed_in_1 (loop, contains_call);
2485 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2486 for each such basic block bb records the outermost loop for that execution
2487 of its header implies execution of bb. */
2489 static void
2490 fill_always_executed_in (void)
2492 sbitmap contains_call = sbitmap_alloc (last_basic_block);
2493 basic_block bb;
2494 struct loop *loop;
2496 bitmap_clear (contains_call);
2497 FOR_EACH_BB (bb)
2499 gimple_stmt_iterator gsi;
2500 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2502 if (nonpure_call_p (gsi_stmt (gsi)))
2503 break;
2506 if (!gsi_end_p (gsi))
2507 bitmap_set_bit (contains_call, bb->index);
2510 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2511 fill_always_executed_in_1 (loop, contains_call);
2513 sbitmap_free (contains_call);
2517 /* Compute the global information needed by the loop invariant motion pass. */
2519 static void
2520 tree_ssa_lim_initialize (void)
2522 unsigned i;
2524 bitmap_obstack_initialize (&lim_bitmap_obstack);
2525 lim_aux_data_map = pointer_map_create ();
2527 if (flag_tm)
2528 compute_transaction_bits ();
2530 alloc_aux_for_edges (0);
2532 memory_accesses.refs.create (100);
2533 memory_accesses.refs_list.create (100);
2534 /* Allocate a special, unanalyzable mem-ref with ID zero. */
2535 memory_accesses.refs_list.quick_push
2536 (mem_ref_alloc (error_mark_node, 0, UNANALYZABLE_MEM_ID));
2538 memory_accesses.refs_in_loop.create (number_of_loops ());
2539 memory_accesses.refs_in_loop.quick_grow (number_of_loops ());
2540 memory_accesses.refs_stored_in_loop.create (number_of_loops ());
2541 memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops ());
2542 memory_accesses.all_refs_stored_in_loop.create (number_of_loops ());
2543 memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops ());
2545 for (i = 0; i < number_of_loops (); i++)
2547 bitmap_initialize (&memory_accesses.refs_in_loop[i],
2548 &lim_bitmap_obstack);
2549 bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2550 &lim_bitmap_obstack);
2551 bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2552 &lim_bitmap_obstack);
2555 memory_accesses.ttae_cache = NULL;
2558 /* Cleans up after the invariant motion pass. */
2560 static void
2561 tree_ssa_lim_finalize (void)
2563 basic_block bb;
2564 unsigned i;
2565 mem_ref_p ref;
2567 free_aux_for_edges ();
2569 FOR_EACH_BB (bb)
2570 SET_ALWAYS_EXECUTED_IN (bb, NULL);
2572 bitmap_obstack_release (&lim_bitmap_obstack);
2573 pointer_map_destroy (lim_aux_data_map);
2575 memory_accesses.refs.dispose ();
2577 FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2578 memref_free (ref);
2579 memory_accesses.refs_list.release ();
2581 memory_accesses.refs_in_loop.release ();
2582 memory_accesses.refs_stored_in_loop.release ();
2583 memory_accesses.all_refs_stored_in_loop.release ();
2585 if (memory_accesses.ttae_cache)
2586 free_affine_expand_cache (&memory_accesses.ttae_cache);
2589 /* Moves invariants from loops. Only "expensive" invariants are moved out --
2590 i.e. those that are likely to be win regardless of the register pressure. */
2592 unsigned int
2593 tree_ssa_lim (void)
2595 unsigned int todo;
2597 tree_ssa_lim_initialize ();
2599 /* Gathers information about memory accesses in the loops. */
2600 analyze_memory_references ();
2602 /* Fills ALWAYS_EXECUTED_IN information for basic blocks. */
2603 fill_always_executed_in ();
2605 /* For each statement determine the outermost loop in that it is
2606 invariant and cost for computing the invariant. */
2607 determine_invariantness ();
2609 /* Execute store motion. Force the necessary invariants to be moved
2610 out of the loops as well. */
2611 store_motion ();
2613 /* Move the expressions that are expensive enough. */
2614 todo = move_computations ();
2616 tree_ssa_lim_finalize ();
2618 return todo;