* gcc.target/powerpc/altivec-volatile.c: Adjust expected warning.
[official-gcc.git] / gcc / tree-loop-distribution.c
blob099a7fe479f727a44e41ab986751acbe034d30b5
1 /* Loop distribution.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
5 and Sebastian Pop <sebastian.pop@amd.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
12 later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* This pass performs loop distribution: for example, the loop
25 |DO I = 2, N
26 | A(I) = B(I) + C
27 | D(I) = A(I-1)*E
28 |ENDDO
30 is transformed to
32 |DOALL I = 2, N
33 | A(I) = B(I) + C
34 |ENDDO
36 |DOALL I = 2, N
37 | D(I) = A(I-1)*E
38 |ENDDO
40 This pass uses an RDG, Reduced Dependence Graph built on top of the
41 data dependence relations. The RDG is then topologically sorted to
42 obtain a map of information producers/consumers based on which it
43 generates the new loops. */
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "tree.h"
50 #include "basic-block.h"
51 #include "tree-flow.h"
52 #include "tree-dump.h"
53 #include "timevar.h"
54 #include "cfgloop.h"
55 #include "tree-chrec.h"
56 #include "tree-data-ref.h"
57 #include "tree-scalar-evolution.h"
58 #include "tree-pass.h"
59 #include "lambda.h"
60 #include "langhooks.h"
61 #include "tree-vectorizer.h"
63 /* If bit I is not set, it means that this node represents an
64 operation that has already been performed, and that should not be
65 performed again. This is the subgraph of remaining important
66 computations that is passed to the DFS algorithm for avoiding to
67 include several times the same stores in different loops. */
68 static bitmap remaining_stmts;
70 /* A node of the RDG is marked in this bitmap when it has as a
71 predecessor a node that writes to memory. */
72 static bitmap upstream_mem_writes;
74 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
75 ORIG_LOOP. */
77 static void
78 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
80 tree new_ssa_name;
81 gimple_stmt_iterator si_new, si_orig;
82 edge orig_loop_latch = loop_latch_edge (orig_loop);
83 edge orig_entry_e = loop_preheader_edge (orig_loop);
84 edge new_loop_entry_e = loop_preheader_edge (new_loop);
86 /* Scan the phis in the headers of the old and new loops
87 (they are organized in exactly the same order). */
88 for (si_new = gsi_start_phis (new_loop->header),
89 si_orig = gsi_start_phis (orig_loop->header);
90 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
91 gsi_next (&si_new), gsi_next (&si_orig))
93 tree def;
94 source_location locus;
95 gimple phi_new = gsi_stmt (si_new);
96 gimple phi_orig = gsi_stmt (si_orig);
98 /* Add the first phi argument for the phi in NEW_LOOP (the one
99 associated with the entry of NEW_LOOP) */
100 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
101 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_entry_e);
102 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
104 /* Add the second phi argument for the phi in NEW_LOOP (the one
105 associated with the latch of NEW_LOOP) */
106 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
107 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
109 if (TREE_CODE (def) == SSA_NAME)
111 new_ssa_name = get_current_def (def);
113 if (!new_ssa_name)
114 /* This only happens if there are no definitions inside the
115 loop. Use the the invariant in the new loop as is. */
116 new_ssa_name = def;
118 else
119 /* Could be an integer. */
120 new_ssa_name = def;
122 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
126 /* Return a copy of LOOP placed before LOOP. */
128 static struct loop *
129 copy_loop_before (struct loop *loop)
131 struct loop *res;
132 edge preheader = loop_preheader_edge (loop);
134 if (!single_exit (loop))
135 return NULL;
137 initialize_original_copy_tables ();
138 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
139 free_original_copy_tables ();
141 if (!res)
142 return NULL;
144 update_phis_for_loop_copy (loop, res);
145 rename_variables_in_loop (res);
147 return res;
150 /* Creates an empty basic block after LOOP. */
152 static void
153 create_bb_after_loop (struct loop *loop)
155 edge exit = single_exit (loop);
157 if (!exit)
158 return;
160 split_edge (exit);
163 /* Generate code for PARTITION from the code in LOOP. The loop is
164 copied when COPY_P is true. All the statements not flagged in the
165 PARTITION bitmap are removed from the loop or from its copy. The
166 statements are indexed in sequence inside a basic block, and the
167 basic blocks of a loop are taken in dom order. Returns true when
168 the code gen succeeded. */
170 static bool
171 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
173 unsigned i, x;
174 gimple_stmt_iterator bsi;
175 basic_block *bbs;
177 if (copy_p)
179 loop = copy_loop_before (loop);
180 create_preheader (loop, CP_SIMPLE_PREHEADERS);
181 create_bb_after_loop (loop);
184 if (loop == NULL)
185 return false;
187 /* Remove stmts not in the PARTITION bitmap. The order in which we
188 visit the phi nodes and the statements is exactly as in
189 stmts_from_loop. */
190 bbs = get_loop_body_in_dom_order (loop);
192 for (x = 0, i = 0; i < loop->num_nodes; i++)
194 basic_block bb = bbs[i];
196 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
197 if (!bitmap_bit_p (partition, x++))
199 gimple phi = gsi_stmt (bsi);
200 if (!is_gimple_reg (gimple_phi_result (phi)))
201 mark_virtual_phi_result_for_renaming (phi);
202 remove_phi_node (&bsi, true);
204 else
205 gsi_next (&bsi);
207 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
209 gimple stmt = gsi_stmt (bsi);
210 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
211 && !bitmap_bit_p (partition, x++))
213 unlink_stmt_vdef (stmt);
214 gsi_remove (&bsi, true);
215 release_defs (stmt);
217 else
218 gsi_next (&bsi);
222 free (bbs);
223 return true;
226 /* Build the size argument for a memset call. */
228 static inline tree
229 build_size_arg_loc (location_t loc, tree nb_iter, tree op,
230 gimple_seq *stmt_list)
232 gimple_seq stmts;
233 tree x = size_binop_loc (loc, MULT_EXPR,
234 fold_convert_loc (loc, sizetype, nb_iter),
235 TYPE_SIZE_UNIT (TREE_TYPE (op)));
236 x = force_gimple_operand (x, &stmts, true, NULL);
237 gimple_seq_add_seq (stmt_list, stmts);
239 return x;
242 /* Generate a call to memset. Return true when the operation succeeded. */
244 static bool
245 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
246 gimple_stmt_iterator bsi)
248 tree addr_base, nb_bytes;
249 bool res = false;
250 gimple_seq stmt_list = NULL, stmts;
251 gimple fn_call;
252 tree mem, fn;
253 struct data_reference *dr = XCNEW (struct data_reference);
254 location_t loc = gimple_location (stmt);
256 DR_STMT (dr) = stmt;
257 DR_REF (dr) = op0;
258 if (!dr_analyze_innermost (dr))
259 goto end;
261 /* Test for a positive stride, iterating over every element. */
262 if (integer_zerop (size_binop (MINUS_EXPR,
263 fold_convert (sizetype, DR_STEP (dr)),
264 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
266 addr_base = fold_convert_loc (loc, sizetype,
267 size_binop_loc (loc, PLUS_EXPR,
268 DR_OFFSET (dr),
269 DR_INIT (dr)));
270 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
271 TREE_TYPE (DR_BASE_ADDRESS (dr)),
272 DR_BASE_ADDRESS (dr), addr_base);
274 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
277 /* Test for a negative stride, iterating over every element. */
278 else if (integer_zerop (size_binop (PLUS_EXPR,
279 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
280 fold_convert (sizetype, DR_STEP (dr)))))
282 nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list);
284 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
285 addr_base = fold_convert_loc (loc, sizetype, addr_base);
286 addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
287 fold_convert_loc (loc, sizetype, nb_bytes));
288 addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base,
289 TYPE_SIZE_UNIT (TREE_TYPE (op0)));
290 addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR,
291 TREE_TYPE (DR_BASE_ADDRESS (dr)),
292 DR_BASE_ADDRESS (dr), addr_base);
294 else
295 goto end;
297 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
298 gimple_seq_add_seq (&stmt_list, stmts);
300 fn = build_fold_addr_expr (implicit_built_in_decls [BUILT_IN_MEMSET]);
301 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
302 gimple_seq_add_stmt (&stmt_list, fn_call);
303 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
304 res = true;
306 if (dump_file && (dump_flags & TDF_DETAILS))
307 fprintf (dump_file, "generated memset zero\n");
309 end:
310 free_data_ref (dr);
311 return res;
314 /* Propagate phis in BB b to their uses and remove them. */
316 static void
317 prop_phis (basic_block b)
319 gimple_stmt_iterator psi;
320 gimple_seq phis = phi_nodes (b);
322 for (psi = gsi_start (phis); !gsi_end_p (psi); )
324 gimple phi = gsi_stmt (psi);
325 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
327 gcc_assert (gimple_phi_num_args (phi) == 1);
329 if (!is_gimple_reg (def))
331 imm_use_iterator iter;
332 use_operand_p use_p;
333 gimple stmt;
335 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
336 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
337 SET_USE (use_p, use);
339 else
340 replace_uses_by (def, use);
342 remove_phi_node (&psi, true);
346 /* Tries to generate a builtin function for the instructions of LOOP
347 pointed to by the bits set in PARTITION. Returns true when the
348 operation succeeded. */
350 static bool
351 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
353 bool res = false;
354 unsigned i, x = 0;
355 basic_block *bbs;
356 gimple write = NULL;
357 tree op0, op1;
358 gimple_stmt_iterator bsi;
359 tree nb_iter = number_of_exit_cond_executions (loop);
361 if (!nb_iter || nb_iter == chrec_dont_know)
362 return false;
364 bbs = get_loop_body_in_dom_order (loop);
366 for (i = 0; i < loop->num_nodes; i++)
368 basic_block bb = bbs[i];
370 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
371 x++;
373 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
375 gimple stmt = gsi_stmt (bsi);
377 if (bitmap_bit_p (partition, x++)
378 && is_gimple_assign (stmt)
379 && !is_gimple_reg (gimple_assign_lhs (stmt)))
381 /* Don't generate the builtins when there are more than
382 one memory write. */
383 if (write != NULL)
384 goto end;
386 write = stmt;
387 if (bb == loop->latch)
388 nb_iter = number_of_latch_executions (loop);
393 if (!write)
394 goto end;
396 op0 = gimple_assign_lhs (write);
397 op1 = gimple_assign_rhs1 (write);
399 if (!(TREE_CODE (op0) == ARRAY_REF
400 || TREE_CODE (op0) == MEM_REF))
401 goto end;
403 /* The new statements will be placed before LOOP. */
404 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
406 if (gimple_assign_rhs_code (write) == INTEGER_CST
407 && (integer_zerop (op1) || real_zerop (op1)))
408 res = generate_memset_zero (write, op0, nb_iter, bsi);
410 /* If this is the last partition for which we generate code, we have
411 to destroy the loop. */
412 if (res && !copy_p)
414 unsigned nbbs = loop->num_nodes;
415 basic_block src = loop_preheader_edge (loop)->src;
416 basic_block dest = single_exit (loop)->dest;
417 prop_phis (dest);
418 make_edge (src, dest, EDGE_FALLTHRU);
419 cancel_loop_tree (loop);
421 for (i = 0; i < nbbs; i++)
422 delete_basic_block (bbs[i]);
424 set_immediate_dominator (CDI_DOMINATORS, dest,
425 recompute_dominator (CDI_DOMINATORS, dest));
428 end:
429 free (bbs);
430 return res;
433 /* Generates code for PARTITION. For simple loops, this function can
434 generate a built-in. */
436 static bool
437 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
439 if (generate_builtin (loop, partition, copy_p))
440 return true;
442 return generate_loops_for_partition (loop, partition, copy_p);
446 /* Returns true if the node V of RDG cannot be recomputed. */
448 static bool
449 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
451 if (RDG_MEM_WRITE_STMT (rdg, v))
452 return true;
454 return false;
457 /* Returns true when the vertex V has already been generated in the
458 current partition (V is in PROCESSED), or when V belongs to another
459 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
461 static inline bool
462 already_processed_vertex_p (bitmap processed, int v)
464 return (bitmap_bit_p (processed, v)
465 || !bitmap_bit_p (remaining_stmts, v));
468 /* Returns NULL when there is no anti-dependence among the successors
469 of vertex V, otherwise returns the edge with the anti-dep. */
471 static struct graph_edge *
472 has_anti_dependence (struct vertex *v)
474 struct graph_edge *e;
476 if (v->succ)
477 for (e = v->succ; e; e = e->succ_next)
478 if (RDGE_TYPE (e) == anti_dd)
479 return e;
481 return NULL;
484 /* Returns true when V has an anti-dependence edge among its successors. */
486 static bool
487 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
489 struct graph_edge *e;
491 if (v->pred)
492 for (e = v->pred; e; e = e->pred_next)
493 if (bitmap_bit_p (upstream_mem_writes, e->src)
494 /* Don't consider flow channels: a write to memory followed
495 by a read from memory. These channels allow the split of
496 the RDG in different partitions. */
497 && !RDG_MEM_WRITE_STMT (rdg, e->src))
498 return true;
500 return false;
503 /* Initializes the upstream_mem_writes bitmap following the
504 information from RDG. */
506 static void
507 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
509 int v, x;
510 bitmap seen = BITMAP_ALLOC (NULL);
512 for (v = rdg->n_vertices - 1; v >= 0; v--)
513 if (!bitmap_bit_p (seen, v))
515 unsigned i;
516 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
518 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
520 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
522 if (bitmap_bit_p (seen, x))
523 continue;
525 bitmap_set_bit (seen, x);
527 if (RDG_MEM_WRITE_STMT (rdg, x)
528 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
529 /* In anti dependences the read should occur before
530 the write, this is why both the read and the write
531 should be placed in the same partition. */
532 || has_anti_dependence (&(rdg->vertices[x])))
534 bitmap_set_bit (upstream_mem_writes, x);
538 VEC_free (int, heap, nodes);
542 /* Returns true when vertex u has a memory write node as a predecessor
543 in RDG. */
545 static bool
546 has_upstream_mem_writes (int u)
548 return bitmap_bit_p (upstream_mem_writes, u);
551 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
552 bitmap, bool *);
554 /* Flag all the uses of U. */
556 static void
557 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
558 bitmap processed, bool *part_has_writes)
560 struct graph_edge *e;
562 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
563 if (!bitmap_bit_p (processed, e->dest))
565 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
566 processed, part_has_writes);
567 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
568 part_has_writes);
572 /* Flag the uses of U stopping following the information from
573 upstream_mem_writes. */
575 static void
576 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
577 bitmap processed, bool *part_has_writes)
579 use_operand_p use_p;
580 struct vertex *x = &(rdg->vertices[u]);
581 gimple stmt = RDGV_STMT (x);
582 struct graph_edge *anti_dep = has_anti_dependence (x);
584 /* Keep in the same partition the destination of an antidependence,
585 because this is a store to the exact same location. Putting this
586 in another partition is bad for cache locality. */
587 if (anti_dep)
589 int v = anti_dep->dest;
591 if (!already_processed_vertex_p (processed, v))
592 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
593 processed, part_has_writes);
596 if (gimple_code (stmt) != GIMPLE_PHI)
598 if ((use_p = gimple_vuse_op (stmt)) != NULL_USE_OPERAND_P)
600 tree use = USE_FROM_PTR (use_p);
602 if (TREE_CODE (use) == SSA_NAME)
604 gimple def_stmt = SSA_NAME_DEF_STMT (use);
605 int v = rdg_vertex_for_stmt (rdg, def_stmt);
607 if (v >= 0
608 && !already_processed_vertex_p (processed, v))
609 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
610 processed, part_has_writes);
615 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
617 tree op0 = gimple_assign_lhs (stmt);
619 /* Scalar channels don't have enough space for transmitting data
620 between tasks, unless we add more storage by privatizing. */
621 if (is_gimple_reg (op0))
623 use_operand_p use_p;
624 imm_use_iterator iter;
626 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
628 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
630 if (!already_processed_vertex_p (processed, v))
631 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
632 processed, part_has_writes);
638 /* Flag V from RDG as part of PARTITION, and also flag its loop number
639 in LOOPS. */
641 static void
642 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
643 bool *part_has_writes)
645 struct loop *loop;
647 if (bitmap_bit_p (partition, v))
648 return;
650 loop = loop_containing_stmt (RDG_STMT (rdg, v));
651 bitmap_set_bit (loops, loop->num);
652 bitmap_set_bit (partition, v);
654 if (rdg_cannot_recompute_vertex_p (rdg, v))
656 *part_has_writes = true;
657 bitmap_clear_bit (remaining_stmts, v);
661 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
662 Also flag their loop number in LOOPS. */
664 static void
665 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
666 bitmap loops, bitmap processed,
667 bool *part_has_writes)
669 unsigned i;
670 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
671 int x;
673 bitmap_set_bit (processed, v);
674 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
675 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
676 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
678 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
679 if (!already_processed_vertex_p (processed, x))
680 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
681 part_has_writes);
683 VEC_free (int, heap, nodes);
686 /* Initialize CONDS with all the condition statements from the basic
687 blocks of LOOP. */
689 static void
690 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
692 unsigned i;
693 edge e;
694 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
696 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
698 gimple cond = last_stmt (e->src);
700 if (cond)
701 VEC_safe_push (gimple, heap, *conds, cond);
704 VEC_free (edge, heap, exits);
707 /* Add to PARTITION all the exit condition statements for LOOPS
708 together with all their dependent statements determined from
709 RDG. */
711 static void
712 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
713 bitmap processed, bool *part_has_writes)
715 unsigned i;
716 bitmap_iterator bi;
717 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
719 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
720 collect_condition_stmts (get_loop (i), &conds);
722 while (!VEC_empty (gimple, conds))
724 gimple cond = VEC_pop (gimple, conds);
725 int v = rdg_vertex_for_stmt (rdg, cond);
726 bitmap new_loops = BITMAP_ALLOC (NULL);
728 if (!already_processed_vertex_p (processed, v))
729 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
730 part_has_writes);
732 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
733 if (!bitmap_bit_p (loops, i))
735 bitmap_set_bit (loops, i);
736 collect_condition_stmts (get_loop (i), &conds);
739 BITMAP_FREE (new_loops);
743 /* Flag all the nodes of RDG containing memory accesses that could
744 potentially belong to arrays already accessed in the current
745 PARTITION. */
747 static void
748 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
749 bitmap loops, bitmap processed,
750 VEC (int, heap) **other_stores)
752 bool foo;
753 unsigned i, n;
754 int j, k, kk;
755 bitmap_iterator ii;
756 struct graph_edge *e;
758 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
759 if (RDG_MEM_WRITE_STMT (rdg, i)
760 || RDG_MEM_READS_STMT (rdg, i))
762 for (j = 0; j < rdg->n_vertices; j++)
763 if (!bitmap_bit_p (processed, j)
764 && (RDG_MEM_WRITE_STMT (rdg, j)
765 || RDG_MEM_READS_STMT (rdg, j))
766 && rdg_has_similar_memory_accesses (rdg, i, j))
768 /* Flag first the node J itself, and all the nodes that
769 are needed to compute J. */
770 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
771 processed, &foo);
773 /* When J is a read, we want to coalesce in the same
774 PARTITION all the nodes that are using J: this is
775 needed for better cache locality. */
776 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
778 /* Remove from OTHER_STORES the vertex that we flagged. */
779 if (RDG_MEM_WRITE_STMT (rdg, j))
780 for (k = 0; VEC_iterate (int, *other_stores, k, kk); k++)
781 if (kk == j)
783 VEC_unordered_remove (int, *other_stores, k);
784 break;
788 /* If the node I has two uses, then keep these together in the
789 same PARTITION. */
790 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
792 if (n > 1)
793 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
797 /* Returns a bitmap in which all the statements needed for computing
798 the strongly connected component C of the RDG are flagged, also
799 including the loop exit conditions. */
801 static bitmap
802 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
803 bool *part_has_writes,
804 VEC (int, heap) **other_stores)
806 int i, v;
807 bitmap partition = BITMAP_ALLOC (NULL);
808 bitmap loops = BITMAP_ALLOC (NULL);
809 bitmap processed = BITMAP_ALLOC (NULL);
811 for (i = 0; VEC_iterate (int, c->vertices, i, v); i++)
812 if (!already_processed_vertex_p (processed, v))
813 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
814 part_has_writes);
816 /* Also iterate on the array of stores not in the starting vertices,
817 and determine those vertices that have some memory affinity with
818 the current nodes in the component: these are stores to the same
819 arrays, i.e. we're taking care of cache locality. */
820 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
821 other_stores);
823 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
825 BITMAP_FREE (processed);
826 BITMAP_FREE (loops);
827 return partition;
830 /* Free memory for COMPONENTS. */
832 static void
833 free_rdg_components (VEC (rdgc, heap) *components)
835 int i;
836 rdgc x;
838 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
840 VEC_free (int, heap, x->vertices);
841 free (x);
845 /* Build the COMPONENTS vector with the strongly connected components
846 of RDG in which the STARTING_VERTICES occur. */
848 static void
849 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
850 VEC (rdgc, heap) **components)
852 int i, v;
853 bitmap saved_components = BITMAP_ALLOC (NULL);
854 int n_components = graphds_scc (rdg, NULL);
855 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
857 for (i = 0; i < n_components; i++)
858 all_components[i] = VEC_alloc (int, heap, 3);
860 for (i = 0; i < rdg->n_vertices; i++)
861 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
863 for (i = 0; VEC_iterate (int, starting_vertices, i, v); i++)
865 int c = rdg->vertices[v].component;
867 if (!bitmap_bit_p (saved_components, c))
869 rdgc x = XCNEW (struct rdg_component);
870 x->num = c;
871 x->vertices = all_components[c];
873 VEC_safe_push (rdgc, heap, *components, x);
874 bitmap_set_bit (saved_components, c);
878 for (i = 0; i < n_components; i++)
879 if (!bitmap_bit_p (saved_components, i))
880 VEC_free (int, heap, all_components[i]);
882 free (all_components);
883 BITMAP_FREE (saved_components);
886 /* Aggregate several components into a useful partition that is
887 registered in the PARTITIONS vector. Partitions will be
888 distributed in different loops. */
890 static void
891 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
892 VEC (int, heap) **other_stores,
893 VEC (bitmap, heap) **partitions, bitmap processed)
895 int i;
896 rdgc x;
897 bitmap partition = BITMAP_ALLOC (NULL);
899 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
901 bitmap np;
902 bool part_has_writes = false;
903 int v = VEC_index (int, x->vertices, 0);
905 if (bitmap_bit_p (processed, v))
906 continue;
908 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
909 other_stores);
910 bitmap_ior_into (partition, np);
911 bitmap_ior_into (processed, np);
912 BITMAP_FREE (np);
914 if (part_has_writes)
916 if (dump_file && (dump_flags & TDF_DETAILS))
918 fprintf (dump_file, "ldist useful partition:\n");
919 dump_bitmap (dump_file, partition);
922 VEC_safe_push (bitmap, heap, *partitions, partition);
923 partition = BITMAP_ALLOC (NULL);
927 /* Add the nodes from the RDG that were not marked as processed, and
928 that are used outside the current loop. These are scalar
929 computations that are not yet part of previous partitions. */
930 for (i = 0; i < rdg->n_vertices; i++)
931 if (!bitmap_bit_p (processed, i)
932 && rdg_defs_used_in_other_loops_p (rdg, i))
933 VEC_safe_push (int, heap, *other_stores, i);
935 /* If there are still statements left in the OTHER_STORES array,
936 create other components and partitions with these stores and
937 their dependences. */
938 if (VEC_length (int, *other_stores) > 0)
940 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
941 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
943 rdg_build_components (rdg, *other_stores, &comps);
944 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
946 VEC_free (int, heap, foo);
947 free_rdg_components (comps);
950 /* If there is something left in the last partition, save it. */
951 if (bitmap_count_bits (partition) > 0)
952 VEC_safe_push (bitmap, heap, *partitions, partition);
953 else
954 BITMAP_FREE (partition);
957 /* Dump to FILE the PARTITIONS. */
959 static void
960 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
962 int i;
963 bitmap partition;
965 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
966 debug_bitmap_file (file, partition);
969 /* Debug PARTITIONS. */
970 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
972 DEBUG_FUNCTION void
973 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
975 dump_rdg_partitions (stderr, partitions);
978 /* Returns the number of read and write operations in the RDG. */
980 static int
981 number_of_rw_in_rdg (struct graph *rdg)
983 int i, res = 0;
985 for (i = 0; i < rdg->n_vertices; i++)
987 if (RDG_MEM_WRITE_STMT (rdg, i))
988 ++res;
990 if (RDG_MEM_READS_STMT (rdg, i))
991 ++res;
994 return res;
997 /* Returns the number of read and write operations in a PARTITION of
998 the RDG. */
1000 static int
1001 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
1003 int res = 0;
1004 unsigned i;
1005 bitmap_iterator ii;
1007 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
1009 if (RDG_MEM_WRITE_STMT (rdg, i))
1010 ++res;
1012 if (RDG_MEM_READS_STMT (rdg, i))
1013 ++res;
1016 return res;
1019 /* Returns true when one of the PARTITIONS contains all the read or
1020 write operations of RDG. */
1022 static bool
1023 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1025 int i;
1026 bitmap partition;
1027 int nrw = number_of_rw_in_rdg (rdg);
1029 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1030 if (nrw == number_of_rw_in_partition (rdg, partition))
1031 return true;
1033 return false;
1036 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1037 distributed loops. */
1039 static int
1040 ldist_gen (struct loop *loop, struct graph *rdg,
1041 VEC (int, heap) *starting_vertices)
1043 int i, nbp;
1044 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1045 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1046 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1047 bitmap partition, processed = BITMAP_ALLOC (NULL);
1049 remaining_stmts = BITMAP_ALLOC (NULL);
1050 upstream_mem_writes = BITMAP_ALLOC (NULL);
1052 for (i = 0; i < rdg->n_vertices; i++)
1054 bitmap_set_bit (remaining_stmts, i);
1056 /* Save in OTHER_STORES all the memory writes that are not in
1057 STARTING_VERTICES. */
1058 if (RDG_MEM_WRITE_STMT (rdg, i))
1060 int v;
1061 unsigned j;
1062 bool found = false;
1064 for (j = 0; VEC_iterate (int, starting_vertices, j, v); j++)
1065 if (i == v)
1067 found = true;
1068 break;
1071 if (!found)
1072 VEC_safe_push (int, heap, other_stores, i);
1076 mark_nodes_having_upstream_mem_writes (rdg);
1077 rdg_build_components (rdg, starting_vertices, &components);
1078 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1079 processed);
1080 BITMAP_FREE (processed);
1081 nbp = VEC_length (bitmap, partitions);
1083 if (nbp <= 1
1084 || partition_contains_all_rw (rdg, partitions))
1085 goto ldist_done;
1087 if (dump_file && (dump_flags & TDF_DETAILS))
1088 dump_rdg_partitions (dump_file, partitions);
1090 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1091 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1092 goto ldist_done;
1094 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1095 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1097 ldist_done:
1099 BITMAP_FREE (remaining_stmts);
1100 BITMAP_FREE (upstream_mem_writes);
1102 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1103 BITMAP_FREE (partition);
1105 VEC_free (int, heap, other_stores);
1106 VEC_free (bitmap, heap, partitions);
1107 free_rdg_components (components);
1108 return nbp;
1111 /* Distributes the code from LOOP in such a way that producer
1112 statements are placed before consumer statements. When STMTS is
1113 NULL, performs the maximal distribution, if STMTS is not NULL,
1114 tries to separate only these statements from the LOOP's body.
1115 Returns the number of distributed loops. */
1117 static int
1118 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1120 int res = 0;
1121 struct graph *rdg;
1122 gimple s;
1123 unsigned i;
1124 VEC (int, heap) *vertices;
1126 if (loop->num_nodes > 2)
1128 if (dump_file && (dump_flags & TDF_DETAILS))
1129 fprintf (dump_file,
1130 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1131 loop->num);
1133 return res;
1136 rdg = build_rdg (loop);
1138 if (!rdg)
1140 if (dump_file && (dump_flags & TDF_DETAILS))
1141 fprintf (dump_file,
1142 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1143 loop->num);
1145 return res;
1148 vertices = VEC_alloc (int, heap, 3);
1150 if (dump_file && (dump_flags & TDF_DETAILS))
1151 dump_rdg (dump_file, rdg);
1153 for (i = 0; VEC_iterate (gimple, stmts, i, s); i++)
1155 int v = rdg_vertex_for_stmt (rdg, s);
1157 if (v >= 0)
1159 VEC_safe_push (int, heap, vertices, v);
1161 if (dump_file && (dump_flags & TDF_DETAILS))
1162 fprintf (dump_file,
1163 "ldist asked to generate code for vertex %d\n", v);
1167 res = ldist_gen (loop, rdg, vertices);
1168 VEC_free (int, heap, vertices);
1169 free_rdg (rdg);
1171 return res;
1174 /* Distribute all loops in the current function. */
1176 static unsigned int
1177 tree_loop_distribution (void)
1179 struct loop *loop;
1180 loop_iterator li;
1181 int nb_generated_loops = 0;
1183 FOR_EACH_LOOP (li, loop, 0)
1185 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1187 /* With the following working list, we're asking distribute_loop
1188 to separate the stores of the loop: when dependences allow,
1189 it will end on having one store per loop. */
1190 stores_from_loop (loop, &work_list);
1192 /* A simple heuristic for cache locality is to not split stores
1193 to the same array. Without this call, an unrolled loop would
1194 be split into as many loops as unroll factor, each loop
1195 storing in the same array. */
1196 remove_similar_memory_refs (&work_list);
1198 nb_generated_loops = distribute_loop (loop, work_list);
1200 if (dump_file && (dump_flags & TDF_DETAILS))
1202 if (nb_generated_loops > 1)
1203 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1204 loop->num, nb_generated_loops);
1205 else
1206 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1209 verify_loop_structure ();
1211 VEC_free (gimple, heap, work_list);
1214 return 0;
1217 static bool
1218 gate_tree_loop_distribution (void)
1220 return flag_tree_loop_distribution != 0;
1223 struct gimple_opt_pass pass_loop_distribution =
1226 GIMPLE_PASS,
1227 "ldist", /* name */
1228 gate_tree_loop_distribution, /* gate */
1229 tree_loop_distribution, /* execute */
1230 NULL, /* sub */
1231 NULL, /* next */
1232 0, /* static_pass_number */
1233 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1234 PROP_cfg | PROP_ssa, /* properties_required */
1235 0, /* properties_provided */
1236 0, /* properties_destroyed */
1237 0, /* todo_flags_start */
1238 TODO_dump_func /* todo_flags_finish */