2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
5 and Sebastian Pop <sebastian.pop@amd.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by the
11 Free Software Foundation; either version 3, or (at your option) any
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 /* This pass performs loop distribution: for example, the loop
40 This pass uses an RDG, Reduced Dependence Graph built on top of the
41 data dependence relations. The RDG is then topologically sorted to
42 obtain a map of information producers/consumers based on which it
43 generates the new loops. */
47 #include "coretypes.h"
50 #include "basic-block.h"
51 #include "tree-flow.h"
52 #include "tree-dump.h"
55 #include "tree-chrec.h"
56 #include "tree-data-ref.h"
57 #include "tree-scalar-evolution.h"
58 #include "tree-pass.h"
60 #include "langhooks.h"
61 #include "tree-vectorizer.h"
63 /* If bit I is not set, it means that this node represents an
64 operation that has already been performed, and that should not be
65 performed again. This is the subgraph of remaining important
66 computations that is passed to the DFS algorithm for avoiding to
67 include several times the same stores in different loops. */
68 static bitmap remaining_stmts
;
70 /* A node of the RDG is marked in this bitmap when it has as a
71 predecessor a node that writes to memory. */
72 static bitmap upstream_mem_writes
;
74 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
78 update_phis_for_loop_copy (struct loop
*orig_loop
, struct loop
*new_loop
)
81 gimple_stmt_iterator si_new
, si_orig
;
82 edge orig_loop_latch
= loop_latch_edge (orig_loop
);
83 edge orig_entry_e
= loop_preheader_edge (orig_loop
);
84 edge new_loop_entry_e
= loop_preheader_edge (new_loop
);
86 /* Scan the phis in the headers of the old and new loops
87 (they are organized in exactly the same order). */
88 for (si_new
= gsi_start_phis (new_loop
->header
),
89 si_orig
= gsi_start_phis (orig_loop
->header
);
90 !gsi_end_p (si_new
) && !gsi_end_p (si_orig
);
91 gsi_next (&si_new
), gsi_next (&si_orig
))
94 source_location locus
;
95 gimple phi_new
= gsi_stmt (si_new
);
96 gimple phi_orig
= gsi_stmt (si_orig
);
98 /* Add the first phi argument for the phi in NEW_LOOP (the one
99 associated with the entry of NEW_LOOP) */
100 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_entry_e
);
101 locus
= gimple_phi_arg_location_from_edge (phi_orig
, orig_entry_e
);
102 add_phi_arg (phi_new
, def
, new_loop_entry_e
, locus
);
104 /* Add the second phi argument for the phi in NEW_LOOP (the one
105 associated with the latch of NEW_LOOP) */
106 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_loop_latch
);
107 locus
= gimple_phi_arg_location_from_edge (phi_orig
, orig_loop_latch
);
109 if (TREE_CODE (def
) == SSA_NAME
)
111 new_ssa_name
= get_current_def (def
);
114 /* This only happens if there are no definitions inside the
115 loop. Use the the invariant in the new loop as is. */
119 /* Could be an integer. */
122 add_phi_arg (phi_new
, new_ssa_name
, loop_latch_edge (new_loop
), locus
);
126 /* Return a copy of LOOP placed before LOOP. */
129 copy_loop_before (struct loop
*loop
)
132 edge preheader
= loop_preheader_edge (loop
);
134 if (!single_exit (loop
))
137 initialize_original_copy_tables ();
138 res
= slpeel_tree_duplicate_loop_to_edge_cfg (loop
, preheader
);
139 free_original_copy_tables ();
144 update_phis_for_loop_copy (loop
, res
);
145 rename_variables_in_loop (res
);
150 /* Creates an empty basic block after LOOP. */
153 create_bb_after_loop (struct loop
*loop
)
155 edge exit
= single_exit (loop
);
163 /* Generate code for PARTITION from the code in LOOP. The loop is
164 copied when COPY_P is true. All the statements not flagged in the
165 PARTITION bitmap are removed from the loop or from its copy. The
166 statements are indexed in sequence inside a basic block, and the
167 basic blocks of a loop are taken in dom order. Returns true when
168 the code gen succeeded. */
171 generate_loops_for_partition (struct loop
*loop
, bitmap partition
, bool copy_p
)
174 gimple_stmt_iterator bsi
;
179 loop
= copy_loop_before (loop
);
180 create_preheader (loop
, CP_SIMPLE_PREHEADERS
);
181 create_bb_after_loop (loop
);
187 /* Remove stmts not in the PARTITION bitmap. The order in which we
188 visit the phi nodes and the statements is exactly as in
190 bbs
= get_loop_body_in_dom_order (loop
);
192 for (x
= 0, i
= 0; i
< loop
->num_nodes
; i
++)
194 basic_block bb
= bbs
[i
];
196 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
);)
197 if (!bitmap_bit_p (partition
, x
++))
199 gimple phi
= gsi_stmt (bsi
);
200 if (!is_gimple_reg (gimple_phi_result (phi
)))
201 mark_virtual_phi_result_for_renaming (phi
);
202 remove_phi_node (&bsi
, true);
207 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
);)
209 gimple stmt
= gsi_stmt (bsi
);
210 if (gimple_code (gsi_stmt (bsi
)) != GIMPLE_LABEL
211 && !bitmap_bit_p (partition
, x
++))
213 unlink_stmt_vdef (stmt
);
214 gsi_remove (&bsi
, true);
226 /* Build the size argument for a memset call. */
229 build_size_arg_loc (location_t loc
, tree nb_iter
, tree op
,
230 gimple_seq
*stmt_list
)
233 tree x
= size_binop_loc (loc
, MULT_EXPR
,
234 fold_convert_loc (loc
, sizetype
, nb_iter
),
235 TYPE_SIZE_UNIT (TREE_TYPE (op
)));
236 x
= force_gimple_operand (x
, &stmts
, true, NULL
);
237 gimple_seq_add_seq (stmt_list
, stmts
);
242 /* Generate a call to memset. Return true when the operation succeeded. */
245 generate_memset_zero (gimple stmt
, tree op0
, tree nb_iter
,
246 gimple_stmt_iterator bsi
)
248 tree addr_base
, nb_bytes
;
250 gimple_seq stmt_list
= NULL
, stmts
;
253 struct data_reference
*dr
= XCNEW (struct data_reference
);
254 location_t loc
= gimple_location (stmt
);
258 if (!dr_analyze_innermost (dr
))
261 /* Test for a positive stride, iterating over every element. */
262 if (integer_zerop (size_binop (MINUS_EXPR
,
263 fold_convert (sizetype
, DR_STEP (dr
)),
264 TYPE_SIZE_UNIT (TREE_TYPE (op0
)))))
266 addr_base
= fold_convert_loc (loc
, sizetype
,
267 size_binop_loc (loc
, PLUS_EXPR
,
270 addr_base
= fold_build2_loc (loc
, POINTER_PLUS_EXPR
,
271 TREE_TYPE (DR_BASE_ADDRESS (dr
)),
272 DR_BASE_ADDRESS (dr
), addr_base
);
274 nb_bytes
= build_size_arg_loc (loc
, nb_iter
, op0
, &stmt_list
);
277 /* Test for a negative stride, iterating over every element. */
278 else if (integer_zerop (size_binop (PLUS_EXPR
,
279 TYPE_SIZE_UNIT (TREE_TYPE (op0
)),
280 fold_convert (sizetype
, DR_STEP (dr
)))))
282 nb_bytes
= build_size_arg_loc (loc
, nb_iter
, op0
, &stmt_list
);
284 addr_base
= size_binop_loc (loc
, PLUS_EXPR
, DR_OFFSET (dr
), DR_INIT (dr
));
285 addr_base
= fold_convert_loc (loc
, sizetype
, addr_base
);
286 addr_base
= size_binop_loc (loc
, MINUS_EXPR
, addr_base
,
287 fold_convert_loc (loc
, sizetype
, nb_bytes
));
288 addr_base
= size_binop_loc (loc
, PLUS_EXPR
, addr_base
,
289 TYPE_SIZE_UNIT (TREE_TYPE (op0
)));
290 addr_base
= fold_build2_loc (loc
, POINTER_PLUS_EXPR
,
291 TREE_TYPE (DR_BASE_ADDRESS (dr
)),
292 DR_BASE_ADDRESS (dr
), addr_base
);
297 mem
= force_gimple_operand (addr_base
, &stmts
, true, NULL
);
298 gimple_seq_add_seq (&stmt_list
, stmts
);
300 fn
= build_fold_addr_expr (implicit_built_in_decls
[BUILT_IN_MEMSET
]);
301 fn_call
= gimple_build_call (fn
, 3, mem
, integer_zero_node
, nb_bytes
);
302 gimple_seq_add_stmt (&stmt_list
, fn_call
);
303 gsi_insert_seq_after (&bsi
, stmt_list
, GSI_CONTINUE_LINKING
);
306 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
307 fprintf (dump_file
, "generated memset zero\n");
314 /* Propagate phis in BB b to their uses and remove them. */
317 prop_phis (basic_block b
)
319 gimple_stmt_iterator psi
;
320 gimple_seq phis
= phi_nodes (b
);
322 for (psi
= gsi_start (phis
); !gsi_end_p (psi
); )
324 gimple phi
= gsi_stmt (psi
);
325 tree def
= gimple_phi_result (phi
), use
= gimple_phi_arg_def (phi
, 0);
327 gcc_assert (gimple_phi_num_args (phi
) == 1);
329 if (!is_gimple_reg (def
))
331 imm_use_iterator iter
;
335 FOR_EACH_IMM_USE_STMT (stmt
, iter
, def
)
336 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
337 SET_USE (use_p
, use
);
340 replace_uses_by (def
, use
);
342 remove_phi_node (&psi
, true);
346 /* Tries to generate a builtin function for the instructions of LOOP
347 pointed to by the bits set in PARTITION. Returns true when the
348 operation succeeded. */
351 generate_builtin (struct loop
*loop
, bitmap partition
, bool copy_p
)
358 gimple_stmt_iterator bsi
;
359 tree nb_iter
= number_of_exit_cond_executions (loop
);
361 if (!nb_iter
|| nb_iter
== chrec_dont_know
)
364 bbs
= get_loop_body_in_dom_order (loop
);
366 for (i
= 0; i
< loop
->num_nodes
; i
++)
368 basic_block bb
= bbs
[i
];
370 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
373 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
375 gimple stmt
= gsi_stmt (bsi
);
377 if (bitmap_bit_p (partition
, x
++)
378 && is_gimple_assign (stmt
)
379 && !is_gimple_reg (gimple_assign_lhs (stmt
)))
381 /* Don't generate the builtins when there are more than
387 if (bb
== loop
->latch
)
388 nb_iter
= number_of_latch_executions (loop
);
396 op0
= gimple_assign_lhs (write
);
397 op1
= gimple_assign_rhs1 (write
);
399 if (!(TREE_CODE (op0
) == ARRAY_REF
400 || TREE_CODE (op0
) == MEM_REF
))
403 /* The new statements will be placed before LOOP. */
404 bsi
= gsi_last_bb (loop_preheader_edge (loop
)->src
);
406 if (gimple_assign_rhs_code (write
) == INTEGER_CST
407 && (integer_zerop (op1
) || real_zerop (op1
)))
408 res
= generate_memset_zero (write
, op0
, nb_iter
, bsi
);
410 /* If this is the last partition for which we generate code, we have
411 to destroy the loop. */
414 unsigned nbbs
= loop
->num_nodes
;
415 basic_block src
= loop_preheader_edge (loop
)->src
;
416 basic_block dest
= single_exit (loop
)->dest
;
418 make_edge (src
, dest
, EDGE_FALLTHRU
);
419 cancel_loop_tree (loop
);
421 for (i
= 0; i
< nbbs
; i
++)
422 delete_basic_block (bbs
[i
]);
424 set_immediate_dominator (CDI_DOMINATORS
, dest
,
425 recompute_dominator (CDI_DOMINATORS
, dest
));
433 /* Generates code for PARTITION. For simple loops, this function can
434 generate a built-in. */
437 generate_code_for_partition (struct loop
*loop
, bitmap partition
, bool copy_p
)
439 if (generate_builtin (loop
, partition
, copy_p
))
442 return generate_loops_for_partition (loop
, partition
, copy_p
);
446 /* Returns true if the node V of RDG cannot be recomputed. */
449 rdg_cannot_recompute_vertex_p (struct graph
*rdg
, int v
)
451 if (RDG_MEM_WRITE_STMT (rdg
, v
))
457 /* Returns true when the vertex V has already been generated in the
458 current partition (V is in PROCESSED), or when V belongs to another
459 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
462 already_processed_vertex_p (bitmap processed
, int v
)
464 return (bitmap_bit_p (processed
, v
)
465 || !bitmap_bit_p (remaining_stmts
, v
));
468 /* Returns NULL when there is no anti-dependence among the successors
469 of vertex V, otherwise returns the edge with the anti-dep. */
471 static struct graph_edge
*
472 has_anti_dependence (struct vertex
*v
)
474 struct graph_edge
*e
;
477 for (e
= v
->succ
; e
; e
= e
->succ_next
)
478 if (RDGE_TYPE (e
) == anti_dd
)
484 /* Returns true when V has an anti-dependence edge among its successors. */
487 predecessor_has_mem_write (struct graph
*rdg
, struct vertex
*v
)
489 struct graph_edge
*e
;
492 for (e
= v
->pred
; e
; e
= e
->pred_next
)
493 if (bitmap_bit_p (upstream_mem_writes
, e
->src
)
494 /* Don't consider flow channels: a write to memory followed
495 by a read from memory. These channels allow the split of
496 the RDG in different partitions. */
497 && !RDG_MEM_WRITE_STMT (rdg
, e
->src
))
503 /* Initializes the upstream_mem_writes bitmap following the
504 information from RDG. */
507 mark_nodes_having_upstream_mem_writes (struct graph
*rdg
)
510 bitmap seen
= BITMAP_ALLOC (NULL
);
512 for (v
= rdg
->n_vertices
- 1; v
>= 0; v
--)
513 if (!bitmap_bit_p (seen
, v
))
516 VEC (int, heap
) *nodes
= VEC_alloc (int, heap
, 3);
518 graphds_dfs (rdg
, &v
, 1, &nodes
, false, NULL
);
520 FOR_EACH_VEC_ELT (int, nodes
, i
, x
)
522 if (!bitmap_set_bit (seen
, x
))
525 if (RDG_MEM_WRITE_STMT (rdg
, x
)
526 || predecessor_has_mem_write (rdg
, &(rdg
->vertices
[x
]))
527 /* In anti dependences the read should occur before
528 the write, this is why both the read and the write
529 should be placed in the same partition. */
530 || has_anti_dependence (&(rdg
->vertices
[x
])))
532 bitmap_set_bit (upstream_mem_writes
, x
);
536 VEC_free (int, heap
, nodes
);
540 /* Returns true when vertex u has a memory write node as a predecessor
544 has_upstream_mem_writes (int u
)
546 return bitmap_bit_p (upstream_mem_writes
, u
);
549 static void rdg_flag_vertex_and_dependent (struct graph
*, int, bitmap
, bitmap
,
552 /* Flag all the uses of U. */
555 rdg_flag_all_uses (struct graph
*rdg
, int u
, bitmap partition
, bitmap loops
,
556 bitmap processed
, bool *part_has_writes
)
558 struct graph_edge
*e
;
560 for (e
= rdg
->vertices
[u
].succ
; e
; e
= e
->succ_next
)
561 if (!bitmap_bit_p (processed
, e
->dest
))
563 rdg_flag_vertex_and_dependent (rdg
, e
->dest
, partition
, loops
,
564 processed
, part_has_writes
);
565 rdg_flag_all_uses (rdg
, e
->dest
, partition
, loops
, processed
,
570 /* Flag the uses of U stopping following the information from
571 upstream_mem_writes. */
574 rdg_flag_uses (struct graph
*rdg
, int u
, bitmap partition
, bitmap loops
,
575 bitmap processed
, bool *part_has_writes
)
578 struct vertex
*x
= &(rdg
->vertices
[u
]);
579 gimple stmt
= RDGV_STMT (x
);
580 struct graph_edge
*anti_dep
= has_anti_dependence (x
);
582 /* Keep in the same partition the destination of an antidependence,
583 because this is a store to the exact same location. Putting this
584 in another partition is bad for cache locality. */
587 int v
= anti_dep
->dest
;
589 if (!already_processed_vertex_p (processed
, v
))
590 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
591 processed
, part_has_writes
);
594 if (gimple_code (stmt
) != GIMPLE_PHI
)
596 if ((use_p
= gimple_vuse_op (stmt
)) != NULL_USE_OPERAND_P
)
598 tree use
= USE_FROM_PTR (use_p
);
600 if (TREE_CODE (use
) == SSA_NAME
)
602 gimple def_stmt
= SSA_NAME_DEF_STMT (use
);
603 int v
= rdg_vertex_for_stmt (rdg
, def_stmt
);
606 && !already_processed_vertex_p (processed
, v
))
607 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
608 processed
, part_has_writes
);
613 if (is_gimple_assign (stmt
) && has_upstream_mem_writes (u
))
615 tree op0
= gimple_assign_lhs (stmt
);
617 /* Scalar channels don't have enough space for transmitting data
618 between tasks, unless we add more storage by privatizing. */
619 if (is_gimple_reg (op0
))
622 imm_use_iterator iter
;
624 FOR_EACH_IMM_USE_FAST (use_p
, iter
, op0
)
626 int v
= rdg_vertex_for_stmt (rdg
, USE_STMT (use_p
));
628 if (!already_processed_vertex_p (processed
, v
))
629 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
630 processed
, part_has_writes
);
636 /* Flag V from RDG as part of PARTITION, and also flag its loop number
640 rdg_flag_vertex (struct graph
*rdg
, int v
, bitmap partition
, bitmap loops
,
641 bool *part_has_writes
)
645 if (!bitmap_set_bit (partition
, v
))
648 loop
= loop_containing_stmt (RDG_STMT (rdg
, v
));
649 bitmap_set_bit (loops
, loop
->num
);
651 if (rdg_cannot_recompute_vertex_p (rdg
, v
))
653 *part_has_writes
= true;
654 bitmap_clear_bit (remaining_stmts
, v
);
658 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
659 Also flag their loop number in LOOPS. */
662 rdg_flag_vertex_and_dependent (struct graph
*rdg
, int v
, bitmap partition
,
663 bitmap loops
, bitmap processed
,
664 bool *part_has_writes
)
667 VEC (int, heap
) *nodes
= VEC_alloc (int, heap
, 3);
670 bitmap_set_bit (processed
, v
);
671 rdg_flag_uses (rdg
, v
, partition
, loops
, processed
, part_has_writes
);
672 graphds_dfs (rdg
, &v
, 1, &nodes
, false, remaining_stmts
);
673 rdg_flag_vertex (rdg
, v
, partition
, loops
, part_has_writes
);
675 FOR_EACH_VEC_ELT (int, nodes
, i
, x
)
676 if (!already_processed_vertex_p (processed
, x
))
677 rdg_flag_vertex_and_dependent (rdg
, x
, partition
, loops
, processed
,
680 VEC_free (int, heap
, nodes
);
683 /* Initialize CONDS with all the condition statements from the basic
687 collect_condition_stmts (struct loop
*loop
, VEC (gimple
, heap
) **conds
)
691 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
693 FOR_EACH_VEC_ELT (edge
, exits
, i
, e
)
695 gimple cond
= last_stmt (e
->src
);
698 VEC_safe_push (gimple
, heap
, *conds
, cond
);
701 VEC_free (edge
, heap
, exits
);
704 /* Add to PARTITION all the exit condition statements for LOOPS
705 together with all their dependent statements determined from
709 rdg_flag_loop_exits (struct graph
*rdg
, bitmap loops
, bitmap partition
,
710 bitmap processed
, bool *part_has_writes
)
714 VEC (gimple
, heap
) *conds
= VEC_alloc (gimple
, heap
, 3);
716 EXECUTE_IF_SET_IN_BITMAP (loops
, 0, i
, bi
)
717 collect_condition_stmts (get_loop (i
), &conds
);
719 while (!VEC_empty (gimple
, conds
))
721 gimple cond
= VEC_pop (gimple
, conds
);
722 int v
= rdg_vertex_for_stmt (rdg
, cond
);
723 bitmap new_loops
= BITMAP_ALLOC (NULL
);
725 if (!already_processed_vertex_p (processed
, v
))
726 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, new_loops
, processed
,
729 EXECUTE_IF_SET_IN_BITMAP (new_loops
, 0, i
, bi
)
730 if (bitmap_set_bit (loops
, i
))
731 collect_condition_stmts (get_loop (i
), &conds
);
733 BITMAP_FREE (new_loops
);
737 /* Flag all the nodes of RDG containing memory accesses that could
738 potentially belong to arrays already accessed in the current
742 rdg_flag_similar_memory_accesses (struct graph
*rdg
, bitmap partition
,
743 bitmap loops
, bitmap processed
,
744 VEC (int, heap
) **other_stores
)
750 struct graph_edge
*e
;
752 EXECUTE_IF_SET_IN_BITMAP (partition
, 0, i
, ii
)
753 if (RDG_MEM_WRITE_STMT (rdg
, i
)
754 || RDG_MEM_READS_STMT (rdg
, i
))
756 for (j
= 0; j
< rdg
->n_vertices
; j
++)
757 if (!bitmap_bit_p (processed
, j
)
758 && (RDG_MEM_WRITE_STMT (rdg
, j
)
759 || RDG_MEM_READS_STMT (rdg
, j
))
760 && rdg_has_similar_memory_accesses (rdg
, i
, j
))
762 /* Flag first the node J itself, and all the nodes that
763 are needed to compute J. */
764 rdg_flag_vertex_and_dependent (rdg
, j
, partition
, loops
,
767 /* When J is a read, we want to coalesce in the same
768 PARTITION all the nodes that are using J: this is
769 needed for better cache locality. */
770 rdg_flag_all_uses (rdg
, j
, partition
, loops
, processed
, &foo
);
772 /* Remove from OTHER_STORES the vertex that we flagged. */
773 if (RDG_MEM_WRITE_STMT (rdg
, j
))
774 FOR_EACH_VEC_ELT (int, *other_stores
, k
, kk
)
777 VEC_unordered_remove (int, *other_stores
, k
);
782 /* If the node I has two uses, then keep these together in the
784 for (n
= 0, e
= rdg
->vertices
[i
].succ
; e
; e
= e
->succ_next
, n
++);
787 rdg_flag_all_uses (rdg
, i
, partition
, loops
, processed
, &foo
);
791 /* Returns a bitmap in which all the statements needed for computing
792 the strongly connected component C of the RDG are flagged, also
793 including the loop exit conditions. */
796 build_rdg_partition_for_component (struct graph
*rdg
, rdgc c
,
797 bool *part_has_writes
,
798 VEC (int, heap
) **other_stores
)
801 bitmap partition
= BITMAP_ALLOC (NULL
);
802 bitmap loops
= BITMAP_ALLOC (NULL
);
803 bitmap processed
= BITMAP_ALLOC (NULL
);
805 FOR_EACH_VEC_ELT (int, c
->vertices
, i
, v
)
806 if (!already_processed_vertex_p (processed
, v
))
807 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
, processed
,
810 /* Also iterate on the array of stores not in the starting vertices,
811 and determine those vertices that have some memory affinity with
812 the current nodes in the component: these are stores to the same
813 arrays, i.e. we're taking care of cache locality. */
814 rdg_flag_similar_memory_accesses (rdg
, partition
, loops
, processed
,
817 rdg_flag_loop_exits (rdg
, loops
, partition
, processed
, part_has_writes
);
819 BITMAP_FREE (processed
);
824 /* Free memory for COMPONENTS. */
827 free_rdg_components (VEC (rdgc
, heap
) *components
)
832 FOR_EACH_VEC_ELT (rdgc
, components
, i
, x
)
834 VEC_free (int, heap
, x
->vertices
);
839 /* Build the COMPONENTS vector with the strongly connected components
840 of RDG in which the STARTING_VERTICES occur. */
843 rdg_build_components (struct graph
*rdg
, VEC (int, heap
) *starting_vertices
,
844 VEC (rdgc
, heap
) **components
)
847 bitmap saved_components
= BITMAP_ALLOC (NULL
);
848 int n_components
= graphds_scc (rdg
, NULL
);
849 VEC (int, heap
) **all_components
= XNEWVEC (VEC (int, heap
) *, n_components
);
851 for (i
= 0; i
< n_components
; i
++)
852 all_components
[i
] = VEC_alloc (int, heap
, 3);
854 for (i
= 0; i
< rdg
->n_vertices
; i
++)
855 VEC_safe_push (int, heap
, all_components
[rdg
->vertices
[i
].component
], i
);
857 FOR_EACH_VEC_ELT (int, starting_vertices
, i
, v
)
859 int c
= rdg
->vertices
[v
].component
;
861 if (bitmap_set_bit (saved_components
, c
))
863 rdgc x
= XCNEW (struct rdg_component
);
865 x
->vertices
= all_components
[c
];
867 VEC_safe_push (rdgc
, heap
, *components
, x
);
871 for (i
= 0; i
< n_components
; i
++)
872 if (!bitmap_bit_p (saved_components
, i
))
873 VEC_free (int, heap
, all_components
[i
]);
875 free (all_components
);
876 BITMAP_FREE (saved_components
);
879 /* Aggregate several components into a useful partition that is
880 registered in the PARTITIONS vector. Partitions will be
881 distributed in different loops. */
884 rdg_build_partitions (struct graph
*rdg
, VEC (rdgc
, heap
) *components
,
885 VEC (int, heap
) **other_stores
,
886 VEC (bitmap
, heap
) **partitions
, bitmap processed
)
890 bitmap partition
= BITMAP_ALLOC (NULL
);
892 FOR_EACH_VEC_ELT (rdgc
, components
, i
, x
)
895 bool part_has_writes
= false;
896 int v
= VEC_index (int, x
->vertices
, 0);
898 if (bitmap_bit_p (processed
, v
))
901 np
= build_rdg_partition_for_component (rdg
, x
, &part_has_writes
,
903 bitmap_ior_into (partition
, np
);
904 bitmap_ior_into (processed
, np
);
909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
911 fprintf (dump_file
, "ldist useful partition:\n");
912 dump_bitmap (dump_file
, partition
);
915 VEC_safe_push (bitmap
, heap
, *partitions
, partition
);
916 partition
= BITMAP_ALLOC (NULL
);
920 /* Add the nodes from the RDG that were not marked as processed, and
921 that are used outside the current loop. These are scalar
922 computations that are not yet part of previous partitions. */
923 for (i
= 0; i
< rdg
->n_vertices
; i
++)
924 if (!bitmap_bit_p (processed
, i
)
925 && rdg_defs_used_in_other_loops_p (rdg
, i
))
926 VEC_safe_push (int, heap
, *other_stores
, i
);
928 /* If there are still statements left in the OTHER_STORES array,
929 create other components and partitions with these stores and
930 their dependences. */
931 if (VEC_length (int, *other_stores
) > 0)
933 VEC (rdgc
, heap
) *comps
= VEC_alloc (rdgc
, heap
, 3);
934 VEC (int, heap
) *foo
= VEC_alloc (int, heap
, 3);
936 rdg_build_components (rdg
, *other_stores
, &comps
);
937 rdg_build_partitions (rdg
, comps
, &foo
, partitions
, processed
);
939 VEC_free (int, heap
, foo
);
940 free_rdg_components (comps
);
943 /* If there is something left in the last partition, save it. */
944 if (bitmap_count_bits (partition
) > 0)
945 VEC_safe_push (bitmap
, heap
, *partitions
, partition
);
947 BITMAP_FREE (partition
);
950 /* Dump to FILE the PARTITIONS. */
953 dump_rdg_partitions (FILE *file
, VEC (bitmap
, heap
) *partitions
)
958 FOR_EACH_VEC_ELT (bitmap
, partitions
, i
, partition
)
959 debug_bitmap_file (file
, partition
);
962 /* Debug PARTITIONS. */
963 extern void debug_rdg_partitions (VEC (bitmap
, heap
) *);
966 debug_rdg_partitions (VEC (bitmap
, heap
) *partitions
)
968 dump_rdg_partitions (stderr
, partitions
);
971 /* Returns the number of read and write operations in the RDG. */
974 number_of_rw_in_rdg (struct graph
*rdg
)
978 for (i
= 0; i
< rdg
->n_vertices
; i
++)
980 if (RDG_MEM_WRITE_STMT (rdg
, i
))
983 if (RDG_MEM_READS_STMT (rdg
, i
))
990 /* Returns the number of read and write operations in a PARTITION of
994 number_of_rw_in_partition (struct graph
*rdg
, bitmap partition
)
1000 EXECUTE_IF_SET_IN_BITMAP (partition
, 0, i
, ii
)
1002 if (RDG_MEM_WRITE_STMT (rdg
, i
))
1005 if (RDG_MEM_READS_STMT (rdg
, i
))
1012 /* Returns true when one of the PARTITIONS contains all the read or
1013 write operations of RDG. */
1016 partition_contains_all_rw (struct graph
*rdg
, VEC (bitmap
, heap
) *partitions
)
1020 int nrw
= number_of_rw_in_rdg (rdg
);
1022 FOR_EACH_VEC_ELT (bitmap
, partitions
, i
, partition
)
1023 if (nrw
== number_of_rw_in_partition (rdg
, partition
))
1029 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1030 distributed loops. */
1033 ldist_gen (struct loop
*loop
, struct graph
*rdg
,
1034 VEC (int, heap
) *starting_vertices
)
1037 VEC (rdgc
, heap
) *components
= VEC_alloc (rdgc
, heap
, 3);
1038 VEC (bitmap
, heap
) *partitions
= VEC_alloc (bitmap
, heap
, 3);
1039 VEC (int, heap
) *other_stores
= VEC_alloc (int, heap
, 3);
1040 bitmap partition
, processed
= BITMAP_ALLOC (NULL
);
1042 remaining_stmts
= BITMAP_ALLOC (NULL
);
1043 upstream_mem_writes
= BITMAP_ALLOC (NULL
);
1045 for (i
= 0; i
< rdg
->n_vertices
; i
++)
1047 bitmap_set_bit (remaining_stmts
, i
);
1049 /* Save in OTHER_STORES all the memory writes that are not in
1050 STARTING_VERTICES. */
1051 if (RDG_MEM_WRITE_STMT (rdg
, i
))
1057 FOR_EACH_VEC_ELT (int, starting_vertices
, j
, v
)
1065 VEC_safe_push (int, heap
, other_stores
, i
);
1069 mark_nodes_having_upstream_mem_writes (rdg
);
1070 rdg_build_components (rdg
, starting_vertices
, &components
);
1071 rdg_build_partitions (rdg
, components
, &other_stores
, &partitions
,
1073 BITMAP_FREE (processed
);
1074 nbp
= VEC_length (bitmap
, partitions
);
1077 || partition_contains_all_rw (rdg
, partitions
))
1080 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1081 dump_rdg_partitions (dump_file
, partitions
);
1083 FOR_EACH_VEC_ELT (bitmap
, partitions
, i
, partition
)
1084 if (!generate_code_for_partition (loop
, partition
, i
< nbp
- 1))
1087 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1088 update_ssa (TODO_update_ssa_only_virtuals
| TODO_update_ssa
);
1092 BITMAP_FREE (remaining_stmts
);
1093 BITMAP_FREE (upstream_mem_writes
);
1095 FOR_EACH_VEC_ELT (bitmap
, partitions
, i
, partition
)
1096 BITMAP_FREE (partition
);
1098 VEC_free (int, heap
, other_stores
);
1099 VEC_free (bitmap
, heap
, partitions
);
1100 free_rdg_components (components
);
1104 /* Distributes the code from LOOP in such a way that producer
1105 statements are placed before consumer statements. When STMTS is
1106 NULL, performs the maximal distribution, if STMTS is not NULL,
1107 tries to separate only these statements from the LOOP's body.
1108 Returns the number of distributed loops. */
1111 distribute_loop (struct loop
*loop
, VEC (gimple
, heap
) *stmts
)
1117 VEC (int, heap
) *vertices
;
1119 if (loop
->num_nodes
> 2)
1121 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1123 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1129 rdg
= build_rdg (loop
);
1133 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1135 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1141 vertices
= VEC_alloc (int, heap
, 3);
1143 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1144 dump_rdg (dump_file
, rdg
);
1146 FOR_EACH_VEC_ELT (gimple
, stmts
, i
, s
)
1148 int v
= rdg_vertex_for_stmt (rdg
, s
);
1152 VEC_safe_push (int, heap
, vertices
, v
);
1154 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1156 "ldist asked to generate code for vertex %d\n", v
);
1160 res
= ldist_gen (loop
, rdg
, vertices
);
1161 VEC_free (int, heap
, vertices
);
1167 /* Distribute all loops in the current function. */
1170 tree_loop_distribution (void)
1174 int nb_generated_loops
= 0;
1176 FOR_EACH_LOOP (li
, loop
, 0)
1178 VEC (gimple
, heap
) *work_list
= VEC_alloc (gimple
, heap
, 3);
1180 /* If both flag_tree_loop_distribute_patterns and
1181 flag_tree_loop_distribution are set, then only
1182 distribute_patterns is executed. */
1183 if (flag_tree_loop_distribute_patterns
)
1185 /* With the following working list, we're asking
1186 distribute_loop to separate from the rest of the loop the
1187 stores of the form "A[i] = 0". */
1188 stores_zero_from_loop (loop
, &work_list
);
1190 /* Do nothing if there are no patterns to be distributed. */
1191 if (VEC_length (gimple
, work_list
) > 0)
1192 nb_generated_loops
= distribute_loop (loop
, work_list
);
1194 else if (flag_tree_loop_distribution
)
1196 /* With the following working list, we're asking
1197 distribute_loop to separate the stores of the loop: when
1198 dependences allow, it will end on having one store per
1200 stores_from_loop (loop
, &work_list
);
1202 /* A simple heuristic for cache locality is to not split
1203 stores to the same array. Without this call, an unrolled
1204 loop would be split into as many loops as unroll factor,
1205 each loop storing in the same array. */
1206 remove_similar_memory_refs (&work_list
);
1208 nb_generated_loops
= distribute_loop (loop
, work_list
);
1211 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1213 if (nb_generated_loops
> 1)
1214 fprintf (dump_file
, "Loop %d distributed: split to %d loops.\n",
1215 loop
->num
, nb_generated_loops
);
1217 fprintf (dump_file
, "Loop %d is the same.\n", loop
->num
);
1220 verify_loop_structure ();
1222 VEC_free (gimple
, heap
, work_list
);
1229 gate_tree_loop_distribution (void)
1231 return flag_tree_loop_distribution
1232 || flag_tree_loop_distribute_patterns
;
1235 struct gimple_opt_pass pass_loop_distribution
=
1240 gate_tree_loop_distribution
, /* gate */
1241 tree_loop_distribution
, /* execute */
1244 0, /* static_pass_number */
1245 TV_TREE_LOOP_DISTRIBUTION
, /* tv_id */
1246 PROP_cfg
| PROP_ssa
, /* properties_required */
1247 0, /* properties_provided */
1248 0, /* properties_destroyed */
1249 0, /* todo_flags_start */
1250 TODO_dump_func
/* todo_flags_finish */