2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This pass performs loop distribution: for example, the loop
39 This pass uses an RDG, Reduced Dependence Graph built on top of the
40 data dependence relations. The RDG is then topologically sorted to
41 obtain a map of information producers/consumers based on which it
42 generates the new loops. */
46 #include "coretypes.h"
53 #include "basic-block.h"
54 #include "diagnostic.h"
55 #include "tree-flow.h"
56 #include "tree-dump.h"
61 #include "tree-chrec.h"
62 #include "tree-data-ref.h"
63 #include "tree-scalar-evolution.h"
64 #include "tree-pass.h"
66 #include "langhooks.h"
67 #include "tree-vectorizer.h"
69 /* If bit I is not set, it means that this node represents an
70 operation that has already been performed, and that should not be
71 performed again. This is the subgraph of remaining important
72 computations that is passed to the DFS algorithm for avoiding to
73 include several times the same stores in different loops. */
74 static bitmap remaining_stmts
;
76 /* A node of the RDG is marked in this bitmap when it has as a
77 predecessor a node that writes to memory. */
78 static bitmap upstream_mem_writes
;
80 /* TODOs we need to run after the pass. */
81 static unsigned int todo
;
83 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
87 update_phis_for_loop_copy (struct loop
*orig_loop
, struct loop
*new_loop
)
90 gimple_stmt_iterator si_new
, si_orig
;
91 edge orig_loop_latch
= loop_latch_edge (orig_loop
);
92 edge orig_entry_e
= loop_preheader_edge (orig_loop
);
93 edge new_loop_entry_e
= loop_preheader_edge (new_loop
);
95 /* Scan the phis in the headers of the old and new loops
96 (they are organized in exactly the same order). */
97 for (si_new
= gsi_start_phis (new_loop
->header
),
98 si_orig
= gsi_start_phis (orig_loop
->header
);
99 !gsi_end_p (si_new
) && !gsi_end_p (si_orig
);
100 gsi_next (&si_new
), gsi_next (&si_orig
))
103 gimple phi_new
= gsi_stmt (si_new
);
104 gimple phi_orig
= gsi_stmt (si_orig
);
106 /* Add the first phi argument for the phi in NEW_LOOP (the one
107 associated with the entry of NEW_LOOP) */
108 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_entry_e
);
109 add_phi_arg (phi_new
, def
, new_loop_entry_e
);
111 /* Add the second phi argument for the phi in NEW_LOOP (the one
112 associated with the latch of NEW_LOOP) */
113 def
= PHI_ARG_DEF_FROM_EDGE (phi_orig
, orig_loop_latch
);
115 if (TREE_CODE (def
) == SSA_NAME
)
117 new_ssa_name
= get_current_def (def
);
120 /* This only happens if there are no definitions inside the
121 loop. Use the phi_result in this case. */
122 new_ssa_name
= PHI_RESULT (phi_new
);
125 /* Could be an integer. */
128 add_phi_arg (phi_new
, new_ssa_name
, loop_latch_edge (new_loop
));
132 /* Return a copy of LOOP placed before LOOP. */
135 copy_loop_before (struct loop
*loop
)
138 edge preheader
= loop_preheader_edge (loop
);
140 if (!single_exit (loop
))
143 initialize_original_copy_tables ();
144 res
= slpeel_tree_duplicate_loop_to_edge_cfg (loop
, preheader
);
145 free_original_copy_tables ();
150 update_phis_for_loop_copy (loop
, res
);
151 rename_variables_in_loop (res
);
156 /* Creates an empty basic block after LOOP. */
159 create_bb_after_loop (struct loop
*loop
)
161 edge exit
= single_exit (loop
);
169 /* Generate code for PARTITION from the code in LOOP. The loop is
170 copied when COPY_P is true. All the statements not flagged in the
171 PARTITION bitmap are removed from the loop or from its copy. The
172 statements are indexed in sequence inside a basic block, and the
173 basic blocks of a loop are taken in dom order. Returns true when
174 the code gen succeeded. */
177 generate_loops_for_partition (struct loop
*loop
, bitmap partition
, bool copy_p
)
180 gimple_stmt_iterator bsi
;
185 loop
= copy_loop_before (loop
);
186 create_preheader (loop
, CP_SIMPLE_PREHEADERS
);
187 create_bb_after_loop (loop
);
193 /* Remove stmts not in the PARTITION bitmap. The order in which we
194 visit the phi nodes and the statements is exactly as in
196 bbs
= get_loop_body_in_dom_order (loop
);
198 for (x
= 0, i
= 0; i
< loop
->num_nodes
; i
++)
200 basic_block bb
= bbs
[i
];
202 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
);)
203 if (!bitmap_bit_p (partition
, x
++))
204 remove_phi_node (&bsi
, true);
208 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
);)
209 if (gimple_code (gsi_stmt (bsi
)) != GIMPLE_LABEL
210 && !bitmap_bit_p (partition
, x
++))
211 gsi_remove (&bsi
, false);
215 mark_virtual_ops_in_bb (bb
);
222 /* Build size argument. */
225 build_size_arg (tree nb_iter
, tree op
, gimple_seq
* stmt_list
)
228 gimple_seq stmts
= NULL
;
230 nb_bytes
= fold_build2 (MULT_EXPR
, TREE_TYPE (nb_iter
),
231 nb_iter
, TYPE_SIZE_UNIT (TREE_TYPE (op
)));
232 nb_bytes
= force_gimple_operand (nb_bytes
, &stmts
, true, NULL
);
233 gimple_seq_add_seq (stmt_list
, stmts
);
238 /* Generate a call to memset. Return true when the operation succeeded. */
241 generate_memset_zero (gimple stmt
, tree op0
, tree nb_iter
,
242 gimple_stmt_iterator bsi
)
245 tree nb_bytes
= NULL
;
247 gimple_seq stmts
= NULL
, stmt_list
= NULL
;
249 tree mem
, fndecl
, fntype
, fn
;
250 gimple_stmt_iterator i
;
252 struct data_reference
*dr
= XCNEW (struct data_reference
);
256 if (!dr_analyze_innermost (dr
))
259 /* Test for a positive stride, iterating over every element. */
260 if (integer_zerop (fold_build2 (MINUS_EXPR
, integer_type_node
, DR_STEP (dr
),
261 TYPE_SIZE_UNIT (TREE_TYPE (op0
)))))
263 tree offset
= fold_convert (sizetype
,
264 size_binop (PLUS_EXPR
,
267 addr_base
= fold_build2 (POINTER_PLUS_EXPR
,
268 TREE_TYPE (DR_BASE_ADDRESS (dr
)),
269 DR_BASE_ADDRESS (dr
), offset
);
272 /* Test for a negative stride, iterating over every element. */
273 else if (integer_zerop (fold_build2 (PLUS_EXPR
, integer_type_node
,
274 TYPE_SIZE_UNIT (TREE_TYPE (op0
)),
277 nb_bytes
= build_size_arg (nb_iter
, op0
, &stmt_list
);
278 addr_base
= size_binop (PLUS_EXPR
, DR_OFFSET (dr
), DR_INIT (dr
));
279 addr_base
= fold_build2 (MINUS_EXPR
, sizetype
, addr_base
, nb_bytes
);
280 addr_base
= force_gimple_operand (addr_base
, &stmts
, true, NULL
);
281 gimple_seq_add_seq (&stmt_list
, stmts
);
283 addr_base
= fold_build2 (POINTER_PLUS_EXPR
,
284 TREE_TYPE (DR_BASE_ADDRESS (dr
)),
285 DR_BASE_ADDRESS (dr
), addr_base
);
290 mem
= force_gimple_operand (addr_base
, &stmts
, true, NULL
);
291 gimple_seq_add_seq (&stmt_list
, stmts
);
293 fndecl
= implicit_built_in_decls
[BUILT_IN_MEMSET
];
294 fntype
= TREE_TYPE (fndecl
);
295 fn
= build1 (ADDR_EXPR
, build_pointer_type (fntype
), fndecl
);
298 nb_bytes
= build_size_arg (nb_iter
, op0
, &stmt_list
);
299 fn_call
= gimple_build_call (fn
, 3, mem
, integer_zero_node
, nb_bytes
);
300 gimple_seq_add_stmt (&stmt_list
, fn_call
);
302 for (i
= gsi_start (stmt_list
); !gsi_end_p (i
); gsi_next (&i
))
304 gimple s
= gsi_stmt (i
);
305 update_stmt_if_modified (s
);
307 FOR_EACH_SSA_TREE_OPERAND (t
, s
, iter
, SSA_OP_VIRTUAL_DEFS
)
309 if (TREE_CODE (t
) == SSA_NAME
)
310 t
= SSA_NAME_VAR (t
);
311 mark_sym_for_renaming (t
);
315 /* Mark also the uses of the VDEFS of STMT to be renamed. */
316 FOR_EACH_SSA_TREE_OPERAND (t
, stmt
, iter
, SSA_OP_VIRTUAL_DEFS
)
318 if (TREE_CODE (t
) == SSA_NAME
)
321 imm_use_iterator imm_iter
;
323 FOR_EACH_IMM_USE_STMT (s
, imm_iter
, t
)
326 t
= SSA_NAME_VAR (t
);
328 mark_sym_for_renaming (t
);
331 gsi_insert_seq_after (&bsi
, stmt_list
, GSI_CONTINUE_LINKING
);
334 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
335 fprintf (dump_file
, "generated memset zero\n");
337 todo
|= TODO_rebuild_alias
;
344 /* Propagate phis in BB b to their uses and remove them. */
347 prop_phis (basic_block b
)
349 gimple_stmt_iterator psi
;
350 gimple_seq phis
= phi_nodes (b
);
352 for (psi
= gsi_start (phis
); !gsi_end_p (psi
); )
354 gimple phi
= gsi_stmt (psi
);
355 tree def
= gimple_phi_result (phi
), use
= gimple_phi_arg_def (phi
, 0);
357 gcc_assert (gimple_phi_num_args (phi
) == 1);
359 if (!is_gimple_reg (def
))
361 imm_use_iterator iter
;
365 FOR_EACH_IMM_USE_STMT (stmt
, iter
, def
)
366 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
367 SET_USE (use_p
, use
);
370 replace_uses_by (def
, use
);
372 remove_phi_node (&psi
, true);
376 /* Tries to generate a builtin function for the instructions of LOOP
377 pointed to by the bits set in PARTITION. Returns true when the
378 operation succeeded. */
381 generate_builtin (struct loop
*loop
, bitmap partition
, bool copy_p
)
388 gimple_stmt_iterator bsi
;
389 tree nb_iter
= number_of_exit_cond_executions (loop
);
391 if (!nb_iter
|| nb_iter
== chrec_dont_know
)
394 bbs
= get_loop_body_in_dom_order (loop
);
396 for (i
= 0; i
< loop
->num_nodes
; i
++)
398 basic_block bb
= bbs
[i
];
400 for (bsi
= gsi_start_phis (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
403 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
405 gimple stmt
= gsi_stmt (bsi
);
407 if (bitmap_bit_p (partition
, x
++)
408 && is_gimple_assign (stmt
)
409 && !is_gimple_reg (gimple_assign_lhs (stmt
)))
411 /* Don't generate the builtins when there are more than
424 op0
= gimple_assign_lhs (write
);
425 op1
= gimple_assign_rhs1 (write
);
427 if (!(TREE_CODE (op0
) == ARRAY_REF
428 || TREE_CODE (op0
) == INDIRECT_REF
))
431 /* The new statements will be placed before LOOP. */
432 bsi
= gsi_last_bb (loop_preheader_edge (loop
)->src
);
434 if (gimple_assign_rhs_code (write
) == INTEGER_CST
435 && (integer_zerop (op1
) || real_zerop (op1
)))
436 res
= generate_memset_zero (write
, op0
, nb_iter
, bsi
);
438 /* If this is the last partition for which we generate code, we have
439 to destroy the loop. */
442 unsigned nbbs
= loop
->num_nodes
;
443 basic_block src
= loop_preheader_edge (loop
)->src
;
444 basic_block dest
= single_exit (loop
)->dest
;
446 make_edge (src
, dest
, EDGE_FALLTHRU
);
447 cancel_loop_tree (loop
);
449 for (i
= 0; i
< nbbs
; i
++)
450 delete_basic_block (bbs
[i
]);
452 set_immediate_dominator (CDI_DOMINATORS
, dest
,
453 recompute_dominator (CDI_DOMINATORS
, dest
));
461 /* Generates code for PARTITION. For simple loops, this function can
462 generate a built-in. */
465 generate_code_for_partition (struct loop
*loop
, bitmap partition
, bool copy_p
)
467 if (generate_builtin (loop
, partition
, copy_p
))
470 return generate_loops_for_partition (loop
, partition
, copy_p
);
474 /* Returns true if the node V of RDG cannot be recomputed. */
477 rdg_cannot_recompute_vertex_p (struct graph
*rdg
, int v
)
479 if (RDG_MEM_WRITE_STMT (rdg
, v
))
485 /* Returns true when the vertex V has already been generated in the
486 current partition (V is in PROCESSED), or when V belongs to another
487 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
490 already_processed_vertex_p (bitmap processed
, int v
)
492 return (bitmap_bit_p (processed
, v
)
493 || !bitmap_bit_p (remaining_stmts
, v
));
496 /* Returns NULL when there is no anti-dependence among the successors
497 of vertex V, otherwise returns the edge with the anti-dep. */
499 static struct graph_edge
*
500 has_anti_dependence (struct vertex
*v
)
502 struct graph_edge
*e
;
505 for (e
= v
->succ
; e
; e
= e
->succ_next
)
506 if (RDGE_TYPE (e
) == anti_dd
)
512 /* Returns true when V has an anti-dependence edge among its successors. */
515 predecessor_has_mem_write (struct graph
*rdg
, struct vertex
*v
)
517 struct graph_edge
*e
;
520 for (e
= v
->pred
; e
; e
= e
->pred_next
)
521 if (bitmap_bit_p (upstream_mem_writes
, e
->src
)
522 /* Don't consider flow channels: a write to memory followed
523 by a read from memory. These channels allow the split of
524 the RDG in different partitions. */
525 && !RDG_MEM_WRITE_STMT (rdg
, e
->src
))
531 /* Initializes the upstream_mem_writes bitmap following the
532 information from RDG. */
535 mark_nodes_having_upstream_mem_writes (struct graph
*rdg
)
538 bitmap seen
= BITMAP_ALLOC (NULL
);
540 for (v
= rdg
->n_vertices
- 1; v
>= 0; v
--)
541 if (!bitmap_bit_p (seen
, v
))
544 VEC (int, heap
) *nodes
= VEC_alloc (int, heap
, 3);
545 bool has_upstream_mem_write_p
= false;
547 graphds_dfs (rdg
, &v
, 1, &nodes
, false, NULL
);
549 for (i
= 0; VEC_iterate (int, nodes
, i
, x
); i
++)
551 if (bitmap_bit_p (seen
, x
))
554 bitmap_set_bit (seen
, x
);
556 if (RDG_MEM_WRITE_STMT (rdg
, x
)
557 || predecessor_has_mem_write (rdg
, &(rdg
->vertices
[x
]))
558 /* In anti dependences the read should occur before
559 the write, this is why both the read and the write
560 should be placed in the same partition. */
561 || has_anti_dependence (&(rdg
->vertices
[x
])))
563 has_upstream_mem_write_p
= true;
564 bitmap_set_bit (upstream_mem_writes
, x
);
568 VEC_free (int, heap
, nodes
);
572 /* Returns true when vertex u has a memory write node as a predecessor
576 has_upstream_mem_writes (int u
)
578 return bitmap_bit_p (upstream_mem_writes
, u
);
581 static void rdg_flag_vertex_and_dependent (struct graph
*, int, bitmap
, bitmap
,
584 /* Flag all the uses of U. */
587 rdg_flag_all_uses (struct graph
*rdg
, int u
, bitmap partition
, bitmap loops
,
588 bitmap processed
, bool *part_has_writes
)
590 struct graph_edge
*e
;
592 for (e
= rdg
->vertices
[u
].succ
; e
; e
= e
->succ_next
)
593 if (!bitmap_bit_p (processed
, e
->dest
))
595 rdg_flag_vertex_and_dependent (rdg
, e
->dest
, partition
, loops
,
596 processed
, part_has_writes
);
597 rdg_flag_all_uses (rdg
, e
->dest
, partition
, loops
, processed
,
602 /* Flag the uses of U stopping following the information from
603 upstream_mem_writes. */
606 rdg_flag_uses (struct graph
*rdg
, int u
, bitmap partition
, bitmap loops
,
607 bitmap processed
, bool *part_has_writes
)
611 struct vertex
*x
= &(rdg
->vertices
[u
]);
612 gimple stmt
= RDGV_STMT (x
);
613 struct graph_edge
*anti_dep
= has_anti_dependence (x
);
615 /* Keep in the same partition the destination of an antidependence,
616 because this is a store to the exact same location. Putting this
617 in another partition is bad for cache locality. */
620 int v
= anti_dep
->dest
;
622 if (!already_processed_vertex_p (processed
, v
))
623 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
624 processed
, part_has_writes
);
627 if (gimple_code (stmt
) != GIMPLE_PHI
)
629 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_VIRTUAL_USES
)
631 tree use
= USE_FROM_PTR (use_p
);
633 if (TREE_CODE (use
) == SSA_NAME
)
635 gimple def_stmt
= SSA_NAME_DEF_STMT (use
);
636 int v
= rdg_vertex_for_stmt (rdg
, def_stmt
);
639 && !already_processed_vertex_p (processed
, v
))
640 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
641 processed
, part_has_writes
);
646 if (is_gimple_assign (stmt
) && has_upstream_mem_writes (u
))
648 tree op0
= gimple_assign_lhs (stmt
);
650 /* Scalar channels don't have enough space for transmitting data
651 between tasks, unless we add more storage by privatizing. */
652 if (is_gimple_reg (op0
))
655 imm_use_iterator iter
;
657 FOR_EACH_IMM_USE_FAST (use_p
, iter
, op0
)
659 int v
= rdg_vertex_for_stmt (rdg
, USE_STMT (use_p
));
661 if (!already_processed_vertex_p (processed
, v
))
662 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
,
663 processed
, part_has_writes
);
669 /* Flag V from RDG as part of PARTITION, and also flag its loop number
673 rdg_flag_vertex (struct graph
*rdg
, int v
, bitmap partition
, bitmap loops
,
674 bool *part_has_writes
)
678 if (bitmap_bit_p (partition
, v
))
681 loop
= loop_containing_stmt (RDG_STMT (rdg
, v
));
682 bitmap_set_bit (loops
, loop
->num
);
683 bitmap_set_bit (partition
, v
);
685 if (rdg_cannot_recompute_vertex_p (rdg
, v
))
687 *part_has_writes
= true;
688 bitmap_clear_bit (remaining_stmts
, v
);
692 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
693 Also flag their loop number in LOOPS. */
696 rdg_flag_vertex_and_dependent (struct graph
*rdg
, int v
, bitmap partition
,
697 bitmap loops
, bitmap processed
,
698 bool *part_has_writes
)
701 VEC (int, heap
) *nodes
= VEC_alloc (int, heap
, 3);
704 bitmap_set_bit (processed
, v
);
705 rdg_flag_uses (rdg
, v
, partition
, loops
, processed
, part_has_writes
);
706 graphds_dfs (rdg
, &v
, 1, &nodes
, false, remaining_stmts
);
707 rdg_flag_vertex (rdg
, v
, partition
, loops
, part_has_writes
);
709 for (i
= 0; VEC_iterate (int, nodes
, i
, x
); i
++)
710 if (!already_processed_vertex_p (processed
, x
))
711 rdg_flag_vertex_and_dependent (rdg
, x
, partition
, loops
, processed
,
714 VEC_free (int, heap
, nodes
);
717 /* Initialize CONDS with all the condition statements from the basic
721 collect_condition_stmts (struct loop
*loop
, VEC (gimple
, heap
) **conds
)
725 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
727 for (i
= 0; VEC_iterate (edge
, exits
, i
, e
); i
++)
729 gimple cond
= last_stmt (e
->src
);
732 VEC_safe_push (gimple
, heap
, *conds
, cond
);
735 VEC_free (edge
, heap
, exits
);
738 /* Add to PARTITION all the exit condition statements for LOOPS
739 together with all their dependent statements determined from
743 rdg_flag_loop_exits (struct graph
*rdg
, bitmap loops
, bitmap partition
,
744 bitmap processed
, bool *part_has_writes
)
748 VEC (gimple
, heap
) *conds
= VEC_alloc (gimple
, heap
, 3);
750 EXECUTE_IF_SET_IN_BITMAP (loops
, 0, i
, bi
)
751 collect_condition_stmts (get_loop (i
), &conds
);
753 while (!VEC_empty (gimple
, conds
))
755 gimple cond
= VEC_pop (gimple
, conds
);
756 int v
= rdg_vertex_for_stmt (rdg
, cond
);
757 bitmap new_loops
= BITMAP_ALLOC (NULL
);
759 if (!already_processed_vertex_p (processed
, v
))
760 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, new_loops
, processed
,
763 EXECUTE_IF_SET_IN_BITMAP (new_loops
, 0, i
, bi
)
764 if (!bitmap_bit_p (loops
, i
))
766 bitmap_set_bit (loops
, i
);
767 collect_condition_stmts (get_loop (i
), &conds
);
770 BITMAP_FREE (new_loops
);
774 /* Flag all the nodes of RDG containing memory accesses that could
775 potentially belong to arrays already accessed in the current
779 rdg_flag_similar_memory_accesses (struct graph
*rdg
, bitmap partition
,
780 bitmap loops
, bitmap processed
,
781 VEC (int, heap
) **other_stores
)
787 struct graph_edge
*e
;
789 EXECUTE_IF_SET_IN_BITMAP (partition
, 0, i
, ii
)
790 if (RDG_MEM_WRITE_STMT (rdg
, i
)
791 || RDG_MEM_READS_STMT (rdg
, i
))
793 for (j
= 0; j
< rdg
->n_vertices
; j
++)
794 if (!bitmap_bit_p (processed
, j
)
795 && (RDG_MEM_WRITE_STMT (rdg
, j
)
796 || RDG_MEM_READS_STMT (rdg
, j
))
797 && rdg_has_similar_memory_accesses (rdg
, i
, j
))
799 /* Flag first the node J itself, and all the nodes that
800 are needed to compute J. */
801 rdg_flag_vertex_and_dependent (rdg
, j
, partition
, loops
,
804 /* When J is a read, we want to coalesce in the same
805 PARTITION all the nodes that are using J: this is
806 needed for better cache locality. */
807 rdg_flag_all_uses (rdg
, j
, partition
, loops
, processed
, &foo
);
809 /* Remove from OTHER_STORES the vertex that we flagged. */
810 if (RDG_MEM_WRITE_STMT (rdg
, j
))
811 for (k
= 0; VEC_iterate (int, *other_stores
, k
, kk
); k
++)
814 VEC_unordered_remove (int, *other_stores
, k
);
819 /* If the node I has two uses, then keep these together in the
821 for (n
= 0, e
= rdg
->vertices
[i
].succ
; e
; e
= e
->succ_next
, n
++);
824 rdg_flag_all_uses (rdg
, i
, partition
, loops
, processed
, &foo
);
828 /* Returns a bitmap in which all the statements needed for computing
829 the strongly connected component C of the RDG are flagged, also
830 including the loop exit conditions. */
833 build_rdg_partition_for_component (struct graph
*rdg
, rdgc c
,
834 bool *part_has_writes
,
835 VEC (int, heap
) **other_stores
)
838 bitmap partition
= BITMAP_ALLOC (NULL
);
839 bitmap loops
= BITMAP_ALLOC (NULL
);
840 bitmap processed
= BITMAP_ALLOC (NULL
);
842 for (i
= 0; VEC_iterate (int, c
->vertices
, i
, v
); i
++)
843 if (!already_processed_vertex_p (processed
, v
))
844 rdg_flag_vertex_and_dependent (rdg
, v
, partition
, loops
, processed
,
847 /* Also iterate on the array of stores not in the starting vertices,
848 and determine those vertices that have some memory affinity with
849 the current nodes in the component: these are stores to the same
850 arrays, i.e. we're taking care of cache locality. */
851 rdg_flag_similar_memory_accesses (rdg
, partition
, loops
, processed
,
854 rdg_flag_loop_exits (rdg
, loops
, partition
, processed
, part_has_writes
);
856 BITMAP_FREE (processed
);
861 /* Free memory for COMPONENTS. */
864 free_rdg_components (VEC (rdgc
, heap
) *components
)
869 for (i
= 0; VEC_iterate (rdgc
, components
, i
, x
); i
++)
871 VEC_free (int, heap
, x
->vertices
);
876 /* Build the COMPONENTS vector with the strongly connected components
877 of RDG in which the STARTING_VERTICES occur. */
880 rdg_build_components (struct graph
*rdg
, VEC (int, heap
) *starting_vertices
,
881 VEC (rdgc
, heap
) **components
)
884 bitmap saved_components
= BITMAP_ALLOC (NULL
);
885 int n_components
= graphds_scc (rdg
, NULL
);
886 VEC (int, heap
) **all_components
= XNEWVEC (VEC (int, heap
) *, n_components
);
888 for (i
= 0; i
< n_components
; i
++)
889 all_components
[i
] = VEC_alloc (int, heap
, 3);
891 for (i
= 0; i
< rdg
->n_vertices
; i
++)
892 VEC_safe_push (int, heap
, all_components
[rdg
->vertices
[i
].component
], i
);
894 for (i
= 0; VEC_iterate (int, starting_vertices
, i
, v
); i
++)
896 int c
= rdg
->vertices
[v
].component
;
898 if (!bitmap_bit_p (saved_components
, c
))
900 rdgc x
= XCNEW (struct rdg_component
);
902 x
->vertices
= all_components
[c
];
904 VEC_safe_push (rdgc
, heap
, *components
, x
);
905 bitmap_set_bit (saved_components
, c
);
909 for (i
= 0; i
< n_components
; i
++)
910 if (!bitmap_bit_p (saved_components
, i
))
911 VEC_free (int, heap
, all_components
[i
]);
913 free (all_components
);
914 BITMAP_FREE (saved_components
);
917 /* Aggregate several components into a useful partition that is
918 registered in the PARTITIONS vector. Partitions will be
919 distributed in different loops. */
922 rdg_build_partitions (struct graph
*rdg
, VEC (rdgc
, heap
) *components
,
923 VEC (int, heap
) **other_stores
,
924 VEC (bitmap
, heap
) **partitions
, bitmap processed
)
928 bitmap partition
= BITMAP_ALLOC (NULL
);
930 for (i
= 0; VEC_iterate (rdgc
, components
, i
, x
); i
++)
933 bool part_has_writes
= false;
934 int v
= VEC_index (int, x
->vertices
, 0);
936 if (bitmap_bit_p (processed
, v
))
939 np
= build_rdg_partition_for_component (rdg
, x
, &part_has_writes
,
941 bitmap_ior_into (partition
, np
);
942 bitmap_ior_into (processed
, np
);
947 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
949 fprintf (dump_file
, "ldist useful partition:\n");
950 dump_bitmap (dump_file
, partition
);
953 VEC_safe_push (bitmap
, heap
, *partitions
, partition
);
954 partition
= BITMAP_ALLOC (NULL
);
958 /* Add the nodes from the RDG that were not marked as processed, and
959 that are used outside the current loop. These are scalar
960 computations that are not yet part of previous partitions. */
961 for (i
= 0; i
< rdg
->n_vertices
; i
++)
962 if (!bitmap_bit_p (processed
, i
)
963 && rdg_defs_used_in_other_loops_p (rdg
, i
))
964 VEC_safe_push (int, heap
, *other_stores
, i
);
966 /* If there are still statements left in the OTHER_STORES array,
967 create other components and partitions with these stores and
968 their dependences. */
969 if (VEC_length (int, *other_stores
) > 0)
971 VEC (rdgc
, heap
) *comps
= VEC_alloc (rdgc
, heap
, 3);
972 VEC (int, heap
) *foo
= VEC_alloc (int, heap
, 3);
974 rdg_build_components (rdg
, *other_stores
, &comps
);
975 rdg_build_partitions (rdg
, comps
, &foo
, partitions
, processed
);
977 VEC_free (int, heap
, foo
);
978 free_rdg_components (comps
);
981 /* If there is something left in the last partition, save it. */
982 if (bitmap_count_bits (partition
) > 0)
983 VEC_safe_push (bitmap
, heap
, *partitions
, partition
);
985 BITMAP_FREE (partition
);
988 /* Dump to FILE the PARTITIONS. */
991 dump_rdg_partitions (FILE *file
, VEC (bitmap
, heap
) *partitions
)
996 for (i
= 0; VEC_iterate (bitmap
, partitions
, i
, partition
); i
++)
997 debug_bitmap_file (file
, partition
);
1000 /* Debug PARTITIONS. */
1001 extern void debug_rdg_partitions (VEC (bitmap
, heap
) *);
1004 debug_rdg_partitions (VEC (bitmap
, heap
) *partitions
)
1006 dump_rdg_partitions (stderr
, partitions
);
1009 /* Returns the number of read and write operations in the RDG. */
1012 number_of_rw_in_rdg (struct graph
*rdg
)
1016 for (i
= 0; i
< rdg
->n_vertices
; i
++)
1018 if (RDG_MEM_WRITE_STMT (rdg
, i
))
1021 if (RDG_MEM_READS_STMT (rdg
, i
))
1028 /* Returns the number of read and write operations in a PARTITION of
1032 number_of_rw_in_partition (struct graph
*rdg
, bitmap partition
)
1038 EXECUTE_IF_SET_IN_BITMAP (partition
, 0, i
, ii
)
1040 if (RDG_MEM_WRITE_STMT (rdg
, i
))
1043 if (RDG_MEM_READS_STMT (rdg
, i
))
1050 /* Returns true when one of the PARTITIONS contains all the read or
1051 write operations of RDG. */
1054 partition_contains_all_rw (struct graph
*rdg
, VEC (bitmap
, heap
) *partitions
)
1058 int nrw
= number_of_rw_in_rdg (rdg
);
1060 for (i
= 0; VEC_iterate (bitmap
, partitions
, i
, partition
); i
++)
1061 if (nrw
== number_of_rw_in_partition (rdg
, partition
))
1067 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1068 distributed loops. */
1071 ldist_gen (struct loop
*loop
, struct graph
*rdg
,
1072 VEC (int, heap
) *starting_vertices
)
1075 VEC (rdgc
, heap
) *components
= VEC_alloc (rdgc
, heap
, 3);
1076 VEC (bitmap
, heap
) *partitions
= VEC_alloc (bitmap
, heap
, 3);
1077 VEC (int, heap
) *other_stores
= VEC_alloc (int, heap
, 3);
1078 bitmap partition
, processed
= BITMAP_ALLOC (NULL
);
1080 remaining_stmts
= BITMAP_ALLOC (NULL
);
1081 upstream_mem_writes
= BITMAP_ALLOC (NULL
);
1083 for (i
= 0; i
< rdg
->n_vertices
; i
++)
1085 bitmap_set_bit (remaining_stmts
, i
);
1087 /* Save in OTHER_STORES all the memory writes that are not in
1088 STARTING_VERTICES. */
1089 if (RDG_MEM_WRITE_STMT (rdg
, i
))
1095 for (j
= 0; VEC_iterate (int, starting_vertices
, j
, v
); j
++)
1103 VEC_safe_push (int, heap
, other_stores
, i
);
1107 mark_nodes_having_upstream_mem_writes (rdg
);
1108 rdg_build_components (rdg
, starting_vertices
, &components
);
1109 rdg_build_partitions (rdg
, components
, &other_stores
, &partitions
,
1111 BITMAP_FREE (processed
);
1112 nbp
= VEC_length (bitmap
, partitions
);
1115 || partition_contains_all_rw (rdg
, partitions
))
1118 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1119 dump_rdg_partitions (dump_file
, partitions
);
1121 for (i
= 0; VEC_iterate (bitmap
, partitions
, i
, partition
); i
++)
1122 if (!generate_code_for_partition (loop
, partition
, i
< nbp
- 1))
1125 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1126 update_ssa (TODO_update_ssa_only_virtuals
| TODO_update_ssa
);
1130 BITMAP_FREE (remaining_stmts
);
1131 BITMAP_FREE (upstream_mem_writes
);
1133 for (i
= 0; VEC_iterate (bitmap
, partitions
, i
, partition
); i
++)
1134 BITMAP_FREE (partition
);
1136 VEC_free (int, heap
, other_stores
);
1137 VEC_free (bitmap
, heap
, partitions
);
1138 free_rdg_components (components
);
1142 /* Distributes the code from LOOP in such a way that producer
1143 statements are placed before consumer statements. When STMTS is
1144 NULL, performs the maximal distribution, if STMTS is not NULL,
1145 tries to separate only these statements from the LOOP's body.
1146 Returns the number of distributed loops. */
1149 distribute_loop (struct loop
*loop
, VEC (gimple
, heap
) *stmts
)
1155 VEC (int, heap
) *vertices
;
1157 if (loop
->num_nodes
> 2)
1159 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1161 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1167 rdg
= build_rdg (loop
);
1171 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1173 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1179 vertices
= VEC_alloc (int, heap
, 3);
1181 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1182 dump_rdg (dump_file
, rdg
);
1184 for (i
= 0; VEC_iterate (gimple
, stmts
, i
, s
); i
++)
1186 int v
= rdg_vertex_for_stmt (rdg
, s
);
1190 VEC_safe_push (int, heap
, vertices
, v
);
1192 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1194 "ldist asked to generate code for vertex %d\n", v
);
1198 res
= ldist_gen (loop
, rdg
, vertices
);
1199 VEC_free (int, heap
, vertices
);
1205 /* Distribute all loops in the current function. */
1208 tree_loop_distribution (void)
1212 int nb_generated_loops
= 0;
1216 FOR_EACH_LOOP (li
, loop
, 0)
1218 VEC (gimple
, heap
) *work_list
= VEC_alloc (gimple
, heap
, 3);
1220 /* With the following working list, we're asking distribute_loop
1221 to separate the stores of the loop: when dependences allow,
1222 it will end on having one store per loop. */
1223 stores_from_loop (loop
, &work_list
);
1225 /* A simple heuristic for cache locality is to not split stores
1226 to the same array. Without this call, an unrolled loop would
1227 be split into as many loops as unroll factor, each loop
1228 storing in the same array. */
1229 remove_similar_memory_refs (&work_list
);
1231 nb_generated_loops
= distribute_loop (loop
, work_list
);
1233 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1235 if (nb_generated_loops
> 1)
1236 fprintf (dump_file
, "Loop %d distributed: split to %d loops.\n",
1237 loop
->num
, nb_generated_loops
);
1239 fprintf (dump_file
, "Loop %d is the same.\n", loop
->num
);
1242 verify_loop_structure ();
1244 VEC_free (gimple
, heap
, work_list
);
1251 gate_tree_loop_distribution (void)
1253 return flag_tree_loop_distribution
!= 0;
1256 struct gimple_opt_pass pass_loop_distribution
=
1261 gate_tree_loop_distribution
, /* gate */
1262 tree_loop_distribution
, /* execute */
1265 0, /* static_pass_number */
1266 TV_TREE_LOOP_DISTRIBUTION
, /* tv_id */
1267 PROP_cfg
| PROP_ssa
, /* properties_required */
1268 0, /* properties_provided */
1269 0, /* properties_destroyed */
1270 0, /* todo_flags_start */
1271 TODO_dump_func
| TODO_verify_loops
/* todo_flags_finish */