2017-10-11 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / tree-loop-distribution.c
blob98ad50e5e7ad4c510f1a197c0085732277b17867
1 /* Loop distribution.
2 Copyright (C) 2006-2017 Free Software Foundation, Inc.
3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This pass performs loop distribution: for example, the loop
24 |DO I = 2, N
25 | A(I) = B(I) + C
26 | D(I) = A(I-1)*E
27 |ENDDO
29 is transformed to
31 |DOALL I = 2, N
32 | A(I) = B(I) + C
33 |ENDDO
35 |DOALL I = 2, N
36 | D(I) = A(I-1)*E
37 |ENDDO
39 Loop distribution is the dual of loop fusion. It separates statements
40 of a loop (or loop nest) into multiple loops (or loop nests) with the
41 same loop header. The major goal is to separate statements which may
42 be vectorized from those that can't. This pass implements distribution
43 in the following steps:
45 1) Seed partitions with specific type statements. For now we support
46 two types seed statements: statement defining variable used outside
47 of loop; statement storing to memory.
48 2) Build reduced dependence graph (RDG) for loop to be distributed.
49 The vertices (RDG:V) model all statements in the loop and the edges
50 (RDG:E) model flow and control dependencies between statements.
51 3) Apart from RDG, compute data dependencies between memory references.
52 4) Starting from seed statement, build up partition by adding depended
53 statements according to RDG's dependence information. Partition is
54 classified as parallel type if it can be executed paralleled; or as
55 sequential type if it can't. Parallel type partition is further
56 classified as different builtin kinds if it can be implemented as
57 builtin function calls.
58 5) Build partition dependence graph (PG) based on data dependencies.
59 The vertices (PG:V) model all partitions and the edges (PG:E) model
60 all data dependencies between every partitions pair. In general,
61 data dependence is either compilation time known or unknown. In C
62 family languages, there exists quite amount compilation time unknown
63 dependencies because of possible alias relation of data references.
64 We categorize PG's edge to two types: "true" edge that represents
65 compilation time known data dependencies; "alias" edge for all other
66 data dependencies.
67 6) Traverse subgraph of PG as if all "alias" edges don't exist. Merge
68 partitions in each strong connected component (SCC) correspondingly.
69 Build new PG for merged partitions.
70 7) Traverse PG again and this time with both "true" and "alias" edges
71 included. We try to break SCCs by removing some edges. Because
72 SCCs by "true" edges are all fused in step 6), we can break SCCs
73 by removing some "alias" edges. It's NP-hard to choose optimal
74 edge set, fortunately simple approximation is good enough for us
75 given the small problem scale.
76 8) Collect all data dependencies of the removed "alias" edges. Create
77 runtime alias checks for collected data dependencies.
78 9) Version loop under the condition of runtime alias checks. Given
79 loop distribution generally introduces additional overhead, it is
80 only useful if vectorization is achieved in distributed loop. We
81 version loop with internal function call IFN_LOOP_DIST_ALIAS. If
82 no distributed loop can be vectorized, we simply remove distributed
83 loops and recover to the original one.
85 TODO:
86 1) We only distribute innermost loops now. This pass should handle loop
87 nests in the future.
88 2) We only fuse partitions in SCC now. A better fusion algorithm is
89 desired to minimize loop overhead, maximize parallelism and maximize
90 data reuse. */
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "tree.h"
97 #include "gimple.h"
98 #include "cfghooks.h"
99 #include "tree-pass.h"
100 #include "ssa.h"
101 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "cfganal.h"
104 #include "gimple-iterator.h"
105 #include "gimplify-me.h"
106 #include "stor-layout.h"
107 #include "tree-cfg.h"
108 #include "tree-ssa-loop-manip.h"
109 #include "tree-ssa-loop.h"
110 #include "tree-into-ssa.h"
111 #include "tree-ssa.h"
112 #include "cfgloop.h"
113 #include "tree-scalar-evolution.h"
114 #include "params.h"
115 #include "tree-vectorizer.h"
118 #define MAX_DATAREFS_NUM \
119 ((unsigned) PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
121 /* Hashtable helpers. */
123 struct ddr_hasher : nofree_ptr_hash <struct data_dependence_relation>
125 static inline hashval_t hash (const data_dependence_relation *);
126 static inline bool equal (const data_dependence_relation *,
127 const data_dependence_relation *);
130 /* Hash function for data dependence. */
132 inline hashval_t
133 ddr_hasher::hash (const data_dependence_relation *ddr)
135 inchash::hash h;
136 h.add_ptr (DDR_A (ddr));
137 h.add_ptr (DDR_B (ddr));
138 return h.end ();
141 /* Hash table equality function for data dependence. */
143 inline bool
144 ddr_hasher::equal (const data_dependence_relation *ddr1,
145 const data_dependence_relation *ddr2)
147 return (DDR_A (ddr1) == DDR_A (ddr2) && DDR_B (ddr1) == DDR_B (ddr2));
150 /* The loop (nest) to be distributed. */
151 static vec<loop_p> loop_nest;
153 /* Vector of data references in the loop to be distributed. */
154 static vec<data_reference_p> datarefs_vec;
156 /* Store index of data reference in aux field. */
157 #define DR_INDEX(dr) ((uintptr_t) (dr)->aux)
159 /* Hash table for data dependence relation in the loop to be distributed. */
160 static hash_table<ddr_hasher> *ddrs_table;
162 /* A Reduced Dependence Graph (RDG) vertex representing a statement. */
163 struct rdg_vertex
165 /* The statement represented by this vertex. */
166 gimple *stmt;
168 /* Vector of data-references in this statement. */
169 vec<data_reference_p> datarefs;
171 /* True when the statement contains a write to memory. */
172 bool has_mem_write;
174 /* True when the statement contains a read from memory. */
175 bool has_mem_reads;
178 #define RDGV_STMT(V) ((struct rdg_vertex *) ((V)->data))->stmt
179 #define RDGV_DATAREFS(V) ((struct rdg_vertex *) ((V)->data))->datarefs
180 #define RDGV_HAS_MEM_WRITE(V) ((struct rdg_vertex *) ((V)->data))->has_mem_write
181 #define RDGV_HAS_MEM_READS(V) ((struct rdg_vertex *) ((V)->data))->has_mem_reads
182 #define RDG_STMT(RDG, I) RDGV_STMT (&(RDG->vertices[I]))
183 #define RDG_DATAREFS(RDG, I) RDGV_DATAREFS (&(RDG->vertices[I]))
184 #define RDG_MEM_WRITE_STMT(RDG, I) RDGV_HAS_MEM_WRITE (&(RDG->vertices[I]))
185 #define RDG_MEM_READS_STMT(RDG, I) RDGV_HAS_MEM_READS (&(RDG->vertices[I]))
187 /* Data dependence type. */
189 enum rdg_dep_type
191 /* Read After Write (RAW). */
192 flow_dd = 'f',
194 /* Control dependence (execute conditional on). */
195 control_dd = 'c'
198 /* Dependence information attached to an edge of the RDG. */
200 struct rdg_edge
202 /* Type of the dependence. */
203 enum rdg_dep_type type;
206 #define RDGE_TYPE(E) ((struct rdg_edge *) ((E)->data))->type
208 /* Dump vertex I in RDG to FILE. */
210 static void
211 dump_rdg_vertex (FILE *file, struct graph *rdg, int i)
213 struct vertex *v = &(rdg->vertices[i]);
214 struct graph_edge *e;
216 fprintf (file, "(vertex %d: (%s%s) (in:", i,
217 RDG_MEM_WRITE_STMT (rdg, i) ? "w" : "",
218 RDG_MEM_READS_STMT (rdg, i) ? "r" : "");
220 if (v->pred)
221 for (e = v->pred; e; e = e->pred_next)
222 fprintf (file, " %d", e->src);
224 fprintf (file, ") (out:");
226 if (v->succ)
227 for (e = v->succ; e; e = e->succ_next)
228 fprintf (file, " %d", e->dest);
230 fprintf (file, ")\n");
231 print_gimple_stmt (file, RDGV_STMT (v), 0, TDF_VOPS|TDF_MEMSYMS);
232 fprintf (file, ")\n");
235 /* Call dump_rdg_vertex on stderr. */
237 DEBUG_FUNCTION void
238 debug_rdg_vertex (struct graph *rdg, int i)
240 dump_rdg_vertex (stderr, rdg, i);
243 /* Dump the reduced dependence graph RDG to FILE. */
245 static void
246 dump_rdg (FILE *file, struct graph *rdg)
248 fprintf (file, "(rdg\n");
249 for (int i = 0; i < rdg->n_vertices; i++)
250 dump_rdg_vertex (file, rdg, i);
251 fprintf (file, ")\n");
254 /* Call dump_rdg on stderr. */
256 DEBUG_FUNCTION void
257 debug_rdg (struct graph *rdg)
259 dump_rdg (stderr, rdg);
262 static void
263 dot_rdg_1 (FILE *file, struct graph *rdg)
265 int i;
266 pretty_printer buffer;
267 pp_needs_newline (&buffer) = false;
268 buffer.buffer->stream = file;
270 fprintf (file, "digraph RDG {\n");
272 for (i = 0; i < rdg->n_vertices; i++)
274 struct vertex *v = &(rdg->vertices[i]);
275 struct graph_edge *e;
277 fprintf (file, "%d [label=\"[%d] ", i, i);
278 pp_gimple_stmt_1 (&buffer, RDGV_STMT (v), 0, TDF_SLIM);
279 pp_flush (&buffer);
280 fprintf (file, "\"]\n");
282 /* Highlight reads from memory. */
283 if (RDG_MEM_READS_STMT (rdg, i))
284 fprintf (file, "%d [style=filled, fillcolor=green]\n", i);
286 /* Highlight stores to memory. */
287 if (RDG_MEM_WRITE_STMT (rdg, i))
288 fprintf (file, "%d [style=filled, fillcolor=red]\n", i);
290 if (v->succ)
291 for (e = v->succ; e; e = e->succ_next)
292 switch (RDGE_TYPE (e))
294 case flow_dd:
295 /* These are the most common dependences: don't print these. */
296 fprintf (file, "%d -> %d \n", i, e->dest);
297 break;
299 case control_dd:
300 fprintf (file, "%d -> %d [label=control] \n", i, e->dest);
301 break;
303 default:
304 gcc_unreachable ();
308 fprintf (file, "}\n\n");
311 /* Display the Reduced Dependence Graph using dotty. */
313 DEBUG_FUNCTION void
314 dot_rdg (struct graph *rdg)
316 /* When debugging, you may want to enable the following code. */
317 #ifdef HAVE_POPEN
318 FILE *file = popen ("dot -Tx11", "w");
319 if (!file)
320 return;
321 dot_rdg_1 (file, rdg);
322 fflush (file);
323 close (fileno (file));
324 pclose (file);
325 #else
326 dot_rdg_1 (stderr, rdg);
327 #endif
330 /* Returns the index of STMT in RDG. */
332 static int
333 rdg_vertex_for_stmt (struct graph *rdg ATTRIBUTE_UNUSED, gimple *stmt)
335 int index = gimple_uid (stmt);
336 gcc_checking_assert (index == -1 || RDG_STMT (rdg, index) == stmt);
337 return index;
340 /* Creates dependence edges in RDG for all the uses of DEF. IDEF is
341 the index of DEF in RDG. */
343 static void
344 create_rdg_edges_for_scalar (struct graph *rdg, tree def, int idef)
346 use_operand_p imm_use_p;
347 imm_use_iterator iterator;
349 FOR_EACH_IMM_USE_FAST (imm_use_p, iterator, def)
351 struct graph_edge *e;
352 int use = rdg_vertex_for_stmt (rdg, USE_STMT (imm_use_p));
354 if (use < 0)
355 continue;
357 e = add_edge (rdg, idef, use);
358 e->data = XNEW (struct rdg_edge);
359 RDGE_TYPE (e) = flow_dd;
363 /* Creates an edge for the control dependences of BB to the vertex V. */
365 static void
366 create_edge_for_control_dependence (struct graph *rdg, basic_block bb,
367 int v, control_dependences *cd)
369 bitmap_iterator bi;
370 unsigned edge_n;
371 EXECUTE_IF_SET_IN_BITMAP (cd->get_edges_dependent_on (bb->index),
372 0, edge_n, bi)
374 basic_block cond_bb = cd->get_edge_src (edge_n);
375 gimple *stmt = last_stmt (cond_bb);
376 if (stmt && is_ctrl_stmt (stmt))
378 struct graph_edge *e;
379 int c = rdg_vertex_for_stmt (rdg, stmt);
380 if (c < 0)
381 continue;
383 e = add_edge (rdg, c, v);
384 e->data = XNEW (struct rdg_edge);
385 RDGE_TYPE (e) = control_dd;
390 /* Creates the edges of the reduced dependence graph RDG. */
392 static void
393 create_rdg_flow_edges (struct graph *rdg)
395 int i;
396 def_operand_p def_p;
397 ssa_op_iter iter;
399 for (i = 0; i < rdg->n_vertices; i++)
400 FOR_EACH_PHI_OR_STMT_DEF (def_p, RDG_STMT (rdg, i),
401 iter, SSA_OP_DEF)
402 create_rdg_edges_for_scalar (rdg, DEF_FROM_PTR (def_p), i);
405 /* Creates the edges of the reduced dependence graph RDG. */
407 static void
408 create_rdg_cd_edges (struct graph *rdg, control_dependences *cd, loop_p loop)
410 int i;
412 for (i = 0; i < rdg->n_vertices; i++)
414 gimple *stmt = RDG_STMT (rdg, i);
415 if (gimple_code (stmt) == GIMPLE_PHI)
417 edge_iterator ei;
418 edge e;
419 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
420 if (flow_bb_inside_loop_p (loop, e->src))
421 create_edge_for_control_dependence (rdg, e->src, i, cd);
423 else
424 create_edge_for_control_dependence (rdg, gimple_bb (stmt), i, cd);
428 /* Build the vertices of the reduced dependence graph RDG. Return false
429 if that failed. */
431 static bool
432 create_rdg_vertices (struct graph *rdg, vec<gimple *> stmts, loop_p loop)
434 int i;
435 gimple *stmt;
437 FOR_EACH_VEC_ELT (stmts, i, stmt)
439 struct vertex *v = &(rdg->vertices[i]);
441 /* Record statement to vertex mapping. */
442 gimple_set_uid (stmt, i);
444 v->data = XNEW (struct rdg_vertex);
445 RDGV_STMT (v) = stmt;
446 RDGV_DATAREFS (v).create (0);
447 RDGV_HAS_MEM_WRITE (v) = false;
448 RDGV_HAS_MEM_READS (v) = false;
449 if (gimple_code (stmt) == GIMPLE_PHI)
450 continue;
452 unsigned drp = datarefs_vec.length ();
453 if (!find_data_references_in_stmt (loop, stmt, &datarefs_vec))
454 return false;
455 for (unsigned j = drp; j < datarefs_vec.length (); ++j)
457 data_reference_p dr = datarefs_vec[j];
458 if (DR_IS_READ (dr))
459 RDGV_HAS_MEM_READS (v) = true;
460 else
461 RDGV_HAS_MEM_WRITE (v) = true;
462 RDGV_DATAREFS (v).safe_push (dr);
465 return true;
468 /* Array mapping basic block's index to its topological order. */
469 static int *bb_top_order_index;
470 /* And size of the array. */
471 static int bb_top_order_index_size;
473 /* If X has a smaller topological sort number than Y, returns -1;
474 if greater, returns 1. */
476 static int
477 bb_top_order_cmp (const void *x, const void *y)
479 basic_block bb1 = *(const basic_block *) x;
480 basic_block bb2 = *(const basic_block *) y;
482 gcc_assert (bb1->index < bb_top_order_index_size
483 && bb2->index < bb_top_order_index_size);
484 gcc_assert (bb1 == bb2
485 || bb_top_order_index[bb1->index]
486 != bb_top_order_index[bb2->index]);
488 return (bb_top_order_index[bb1->index] - bb_top_order_index[bb2->index]);
491 /* Initialize STMTS with all the statements of LOOP. We use topological
492 order to discover all statements. The order is important because
493 generate_loops_for_partition is using the same traversal for identifying
494 statements in loop copies. */
496 static void
497 stmts_from_loop (struct loop *loop, vec<gimple *> *stmts)
499 unsigned int i;
500 basic_block *bbs = get_loop_body_in_custom_order (loop, bb_top_order_cmp);
502 for (i = 0; i < loop->num_nodes; i++)
504 basic_block bb = bbs[i];
506 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
507 gsi_next (&bsi))
508 if (!virtual_operand_p (gimple_phi_result (bsi.phi ())))
509 stmts->safe_push (bsi.phi ());
511 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);
512 gsi_next (&bsi))
514 gimple *stmt = gsi_stmt (bsi);
515 if (gimple_code (stmt) != GIMPLE_LABEL && !is_gimple_debug (stmt))
516 stmts->safe_push (stmt);
520 free (bbs);
523 /* Free the reduced dependence graph RDG. */
525 static void
526 free_rdg (struct graph *rdg)
528 int i;
530 for (i = 0; i < rdg->n_vertices; i++)
532 struct vertex *v = &(rdg->vertices[i]);
533 struct graph_edge *e;
535 for (e = v->succ; e; e = e->succ_next)
536 free (e->data);
538 if (v->data)
540 gimple_set_uid (RDGV_STMT (v), -1);
541 (RDGV_DATAREFS (v)).release ();
542 free (v->data);
546 free_graph (rdg);
549 /* Build the Reduced Dependence Graph (RDG) with one vertex per statement of
550 LOOP, and one edge per flow dependence or control dependence from control
551 dependence CD. During visiting each statement, data references are also
552 collected and recorded in global data DATAREFS_VEC. */
554 static struct graph *
555 build_rdg (struct loop *loop, control_dependences *cd)
557 struct graph *rdg;
559 /* Create the RDG vertices from the stmts of the loop nest. */
560 auto_vec<gimple *, 10> stmts;
561 stmts_from_loop (loop, &stmts);
562 rdg = new_graph (stmts.length ());
563 if (!create_rdg_vertices (rdg, stmts, loop))
565 free_rdg (rdg);
566 return NULL;
568 stmts.release ();
570 create_rdg_flow_edges (rdg);
571 if (cd)
572 create_rdg_cd_edges (rdg, cd, loop);
574 return rdg;
578 /* Kind of distributed loop. */
579 enum partition_kind {
580 PKIND_NORMAL, PKIND_MEMSET, PKIND_MEMCPY, PKIND_MEMMOVE
583 /* Type of distributed loop. */
584 enum partition_type {
585 /* The distributed loop can be executed parallelly. */
586 PTYPE_PARALLEL = 0,
587 /* The distributed loop has to be executed sequentially. */
588 PTYPE_SEQUENTIAL
591 /* Partition for loop distribution. */
592 struct partition
594 /* Statements of the partition. */
595 bitmap stmts;
596 /* True if the partition defines variable which is used outside of loop. */
597 bool reduction_p;
598 /* For builtin partition, true if it executes one iteration more than
599 number of loop (latch) iterations. */
600 bool plus_one;
601 enum partition_kind kind;
602 enum partition_type type;
603 /* data-references a kind != PKIND_NORMAL partition is about. */
604 data_reference_p main_dr;
605 data_reference_p secondary_dr;
606 /* Number of loop (latch) iterations. */
607 tree niter;
608 /* Data references in the partition. */
609 bitmap datarefs;
613 /* Allocate and initialize a partition from BITMAP. */
615 static partition *
616 partition_alloc (void)
618 partition *partition = XCNEW (struct partition);
619 partition->stmts = BITMAP_ALLOC (NULL);
620 partition->reduction_p = false;
621 partition->kind = PKIND_NORMAL;
622 partition->datarefs = BITMAP_ALLOC (NULL);
623 return partition;
626 /* Free PARTITION. */
628 static void
629 partition_free (partition *partition)
631 BITMAP_FREE (partition->stmts);
632 BITMAP_FREE (partition->datarefs);
633 free (partition);
636 /* Returns true if the partition can be generated as a builtin. */
638 static bool
639 partition_builtin_p (partition *partition)
641 return partition->kind != PKIND_NORMAL;
644 /* Returns true if the partition contains a reduction. */
646 static bool
647 partition_reduction_p (partition *partition)
649 return partition->reduction_p;
652 /* Partitions are fused because of different reasons. */
653 enum fuse_type
655 FUSE_NON_BUILTIN = 0,
656 FUSE_REDUCTION = 1,
657 FUSE_SHARE_REF = 2,
658 FUSE_SAME_SCC = 3,
659 FUSE_FINALIZE = 4
662 /* Description on different fusing reason. */
663 static const char *fuse_message[] = {
664 "they are non-builtins",
665 "they have reductions",
666 "they have shared memory refs",
667 "they are in the same dependence scc",
668 "there is no point to distribute loop"};
670 static void
671 update_type_for_merge (struct graph *, partition *, partition *);
673 /* Merge PARTITION into the partition DEST. RDG is the reduced dependence
674 graph and we update type for result partition if it is non-NULL. */
676 static void
677 partition_merge_into (struct graph *rdg, partition *dest,
678 partition *partition, enum fuse_type ft)
680 if (dump_file && (dump_flags & TDF_DETAILS))
682 fprintf (dump_file, "Fuse partitions because %s:\n", fuse_message[ft]);
683 fprintf (dump_file, " Part 1: ");
684 dump_bitmap (dump_file, dest->stmts);
685 fprintf (dump_file, " Part 2: ");
686 dump_bitmap (dump_file, partition->stmts);
689 dest->kind = PKIND_NORMAL;
690 if (dest->type == PTYPE_PARALLEL)
691 dest->type = partition->type;
693 bitmap_ior_into (dest->stmts, partition->stmts);
694 if (partition_reduction_p (partition))
695 dest->reduction_p = true;
697 /* Further check if any data dependence prevents us from executing the
698 new partition parallelly. */
699 if (dest->type == PTYPE_PARALLEL && rdg != NULL)
700 update_type_for_merge (rdg, dest, partition);
702 bitmap_ior_into (dest->datarefs, partition->datarefs);
706 /* Returns true when DEF is an SSA_NAME defined in LOOP and used after
707 the LOOP. */
709 static bool
710 ssa_name_has_uses_outside_loop_p (tree def, loop_p loop)
712 imm_use_iterator imm_iter;
713 use_operand_p use_p;
715 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
717 gimple *use_stmt = USE_STMT (use_p);
718 if (!is_gimple_debug (use_stmt)
719 && loop != loop_containing_stmt (use_stmt))
720 return true;
723 return false;
726 /* Returns true when STMT defines a scalar variable used after the
727 loop LOOP. */
729 static bool
730 stmt_has_scalar_dependences_outside_loop (loop_p loop, gimple *stmt)
732 def_operand_p def_p;
733 ssa_op_iter op_iter;
735 if (gimple_code (stmt) == GIMPLE_PHI)
736 return ssa_name_has_uses_outside_loop_p (gimple_phi_result (stmt), loop);
738 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
739 if (ssa_name_has_uses_outside_loop_p (DEF_FROM_PTR (def_p), loop))
740 return true;
742 return false;
745 /* Return a copy of LOOP placed before LOOP. */
747 static struct loop *
748 copy_loop_before (struct loop *loop)
750 struct loop *res;
751 edge preheader = loop_preheader_edge (loop);
753 initialize_original_copy_tables ();
754 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, NULL, preheader);
755 gcc_assert (res != NULL);
756 free_original_copy_tables ();
757 delete_update_ssa ();
759 return res;
762 /* Creates an empty basic block after LOOP. */
764 static void
765 create_bb_after_loop (struct loop *loop)
767 edge exit = single_exit (loop);
769 if (!exit)
770 return;
772 split_edge (exit);
775 /* Generate code for PARTITION from the code in LOOP. The loop is
776 copied when COPY_P is true. All the statements not flagged in the
777 PARTITION bitmap are removed from the loop or from its copy. The
778 statements are indexed in sequence inside a basic block, and the
779 basic blocks of a loop are taken in dom order. */
781 static void
782 generate_loops_for_partition (struct loop *loop, partition *partition,
783 bool copy_p)
785 unsigned i;
786 basic_block *bbs;
788 if (copy_p)
790 int orig_loop_num = loop->orig_loop_num;
791 loop = copy_loop_before (loop);
792 gcc_assert (loop != NULL);
793 loop->orig_loop_num = orig_loop_num;
794 create_preheader (loop, CP_SIMPLE_PREHEADERS);
795 create_bb_after_loop (loop);
797 else
799 /* Origin number is set to the new versioned loop's num. */
800 gcc_assert (loop->orig_loop_num != loop->num);
803 /* Remove stmts not in the PARTITION bitmap. */
804 bbs = get_loop_body_in_dom_order (loop);
806 if (MAY_HAVE_DEBUG_STMTS)
807 for (i = 0; i < loop->num_nodes; i++)
809 basic_block bb = bbs[i];
811 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);
812 gsi_next (&bsi))
814 gphi *phi = bsi.phi ();
815 if (!virtual_operand_p (gimple_phi_result (phi))
816 && !bitmap_bit_p (partition->stmts, gimple_uid (phi)))
817 reset_debug_uses (phi);
820 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
822 gimple *stmt = gsi_stmt (bsi);
823 if (gimple_code (stmt) != GIMPLE_LABEL
824 && !is_gimple_debug (stmt)
825 && !bitmap_bit_p (partition->stmts, gimple_uid (stmt)))
826 reset_debug_uses (stmt);
830 for (i = 0; i < loop->num_nodes; i++)
832 basic_block bb = bbs[i];
833 edge inner_exit = NULL;
835 if (loop != bb->loop_father)
836 inner_exit = single_exit (bb->loop_father);
838 for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
840 gphi *phi = bsi.phi ();
841 if (!virtual_operand_p (gimple_phi_result (phi))
842 && !bitmap_bit_p (partition->stmts, gimple_uid (phi)))
843 remove_phi_node (&bsi, true);
844 else
845 gsi_next (&bsi);
848 for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
850 gimple *stmt = gsi_stmt (bsi);
851 if (gimple_code (stmt) != GIMPLE_LABEL
852 && !is_gimple_debug (stmt)
853 && !bitmap_bit_p (partition->stmts, gimple_uid (stmt)))
855 /* In distribution of loop nest, if bb is inner loop's exit_bb,
856 we choose its exit edge/path in order to avoid generating
857 infinite loop. For all other cases, we choose an arbitrary
858 path through the empty CFG part that this unnecessary
859 control stmt controls. */
860 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
862 if (inner_exit && inner_exit->flags & EDGE_TRUE_VALUE)
863 gimple_cond_make_true (cond_stmt);
864 else
865 gimple_cond_make_false (cond_stmt);
866 update_stmt (stmt);
868 else if (gimple_code (stmt) == GIMPLE_SWITCH)
870 gswitch *switch_stmt = as_a <gswitch *> (stmt);
871 gimple_switch_set_index
872 (switch_stmt, CASE_LOW (gimple_switch_label (switch_stmt, 1)));
873 update_stmt (stmt);
875 else
877 unlink_stmt_vdef (stmt);
878 gsi_remove (&bsi, true);
879 release_defs (stmt);
880 continue;
883 gsi_next (&bsi);
887 free (bbs);
890 /* Build the size argument for a memory operation call. */
892 static tree
893 build_size_arg_loc (location_t loc, data_reference_p dr, tree nb_iter,
894 bool plus_one)
896 tree size = fold_convert_loc (loc, sizetype, nb_iter);
897 if (plus_one)
898 size = size_binop (PLUS_EXPR, size, size_one_node);
899 size = fold_build2_loc (loc, MULT_EXPR, sizetype, size,
900 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))));
901 size = fold_convert_loc (loc, size_type_node, size);
902 return size;
905 /* Build an address argument for a memory operation call. */
907 static tree
908 build_addr_arg_loc (location_t loc, data_reference_p dr, tree nb_bytes)
910 tree addr_base;
912 addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
913 addr_base = fold_convert_loc (loc, sizetype, addr_base);
915 /* Test for a negative stride, iterating over every element. */
916 if (tree_int_cst_sgn (DR_STEP (dr)) == -1)
918 addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
919 fold_convert_loc (loc, sizetype, nb_bytes));
920 addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base,
921 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))));
924 return fold_build_pointer_plus_loc (loc, DR_BASE_ADDRESS (dr), addr_base);
927 /* If VAL memory representation contains the same value in all bytes,
928 return that value, otherwise return -1.
929 E.g. for 0x24242424 return 0x24, for IEEE double
930 747708026454360457216.0 return 0x44, etc. */
932 static int
933 const_with_all_bytes_same (tree val)
935 unsigned char buf[64];
936 int i, len;
938 if (integer_zerop (val)
939 || (TREE_CODE (val) == CONSTRUCTOR
940 && !TREE_CLOBBER_P (val)
941 && CONSTRUCTOR_NELTS (val) == 0))
942 return 0;
944 if (real_zerop (val))
946 /* Only return 0 for +0.0, not for -0.0, which doesn't have
947 an all bytes same memory representation. Don't transform
948 -0.0 stores into +0.0 even for !HONOR_SIGNED_ZEROS. */
949 switch (TREE_CODE (val))
951 case REAL_CST:
952 if (!real_isneg (TREE_REAL_CST_PTR (val)))
953 return 0;
954 break;
955 case COMPLEX_CST:
956 if (!const_with_all_bytes_same (TREE_REALPART (val))
957 && !const_with_all_bytes_same (TREE_IMAGPART (val)))
958 return 0;
959 break;
960 case VECTOR_CST:
961 unsigned int j;
962 for (j = 0; j < VECTOR_CST_NELTS (val); ++j)
963 if (const_with_all_bytes_same (VECTOR_CST_ELT (val, j)))
964 break;
965 if (j == VECTOR_CST_NELTS (val))
966 return 0;
967 break;
968 default:
969 break;
973 if (CHAR_BIT != 8 || BITS_PER_UNIT != 8)
974 return -1;
976 len = native_encode_expr (val, buf, sizeof (buf));
977 if (len == 0)
978 return -1;
979 for (i = 1; i < len; i++)
980 if (buf[i] != buf[0])
981 return -1;
982 return buf[0];
985 /* Generate a call to memset for PARTITION in LOOP. */
987 static void
988 generate_memset_builtin (struct loop *loop, partition *partition)
990 gimple_stmt_iterator gsi;
991 gimple *stmt, *fn_call;
992 tree mem, fn, nb_bytes;
993 location_t loc;
994 tree val;
996 stmt = DR_STMT (partition->main_dr);
997 loc = gimple_location (stmt);
999 /* The new statements will be placed before LOOP. */
1000 gsi = gsi_last_bb (loop_preheader_edge (loop)->src);
1002 nb_bytes = build_size_arg_loc (loc, partition->main_dr, partition->niter,
1003 partition->plus_one);
1004 nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE,
1005 false, GSI_CONTINUE_LINKING);
1006 mem = build_addr_arg_loc (loc, partition->main_dr, nb_bytes);
1007 mem = force_gimple_operand_gsi (&gsi, mem, true, NULL_TREE,
1008 false, GSI_CONTINUE_LINKING);
1010 /* This exactly matches the pattern recognition in classify_partition. */
1011 val = gimple_assign_rhs1 (stmt);
1012 /* Handle constants like 0x15151515 and similarly
1013 floating point constants etc. where all bytes are the same. */
1014 int bytev = const_with_all_bytes_same (val);
1015 if (bytev != -1)
1016 val = build_int_cst (integer_type_node, bytev);
1017 else if (TREE_CODE (val) == INTEGER_CST)
1018 val = fold_convert (integer_type_node, val);
1019 else if (!useless_type_conversion_p (integer_type_node, TREE_TYPE (val)))
1021 tree tem = make_ssa_name (integer_type_node);
1022 gimple *cstmt = gimple_build_assign (tem, NOP_EXPR, val);
1023 gsi_insert_after (&gsi, cstmt, GSI_CONTINUE_LINKING);
1024 val = tem;
1027 fn = build_fold_addr_expr (builtin_decl_implicit (BUILT_IN_MEMSET));
1028 fn_call = gimple_build_call (fn, 3, mem, val, nb_bytes);
1029 gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING);
1031 if (dump_file && (dump_flags & TDF_DETAILS))
1033 fprintf (dump_file, "generated memset");
1034 if (bytev == 0)
1035 fprintf (dump_file, " zero\n");
1036 else
1037 fprintf (dump_file, "\n");
1041 /* Generate a call to memcpy for PARTITION in LOOP. */
1043 static void
1044 generate_memcpy_builtin (struct loop *loop, partition *partition)
1046 gimple_stmt_iterator gsi;
1047 gimple *stmt, *fn_call;
1048 tree dest, src, fn, nb_bytes;
1049 location_t loc;
1050 enum built_in_function kind;
1052 stmt = DR_STMT (partition->main_dr);
1053 loc = gimple_location (stmt);
1055 /* The new statements will be placed before LOOP. */
1056 gsi = gsi_last_bb (loop_preheader_edge (loop)->src);
1058 nb_bytes = build_size_arg_loc (loc, partition->main_dr, partition->niter,
1059 partition->plus_one);
1060 nb_bytes = force_gimple_operand_gsi (&gsi, nb_bytes, true, NULL_TREE,
1061 false, GSI_CONTINUE_LINKING);
1062 dest = build_addr_arg_loc (loc, partition->main_dr, nb_bytes);
1063 src = build_addr_arg_loc (loc, partition->secondary_dr, nb_bytes);
1064 if (partition->kind == PKIND_MEMCPY
1065 || ! ptr_derefs_may_alias_p (dest, src))
1066 kind = BUILT_IN_MEMCPY;
1067 else
1068 kind = BUILT_IN_MEMMOVE;
1070 dest = force_gimple_operand_gsi (&gsi, dest, true, NULL_TREE,
1071 false, GSI_CONTINUE_LINKING);
1072 src = force_gimple_operand_gsi (&gsi, src, true, NULL_TREE,
1073 false, GSI_CONTINUE_LINKING);
1074 fn = build_fold_addr_expr (builtin_decl_implicit (kind));
1075 fn_call = gimple_build_call (fn, 3, dest, src, nb_bytes);
1076 gsi_insert_after (&gsi, fn_call, GSI_CONTINUE_LINKING);
1078 if (dump_file && (dump_flags & TDF_DETAILS))
1080 if (kind == BUILT_IN_MEMCPY)
1081 fprintf (dump_file, "generated memcpy\n");
1082 else
1083 fprintf (dump_file, "generated memmove\n");
1087 /* Remove and destroy the loop LOOP. */
1089 static void
1090 destroy_loop (struct loop *loop)
1092 unsigned nbbs = loop->num_nodes;
1093 edge exit = single_exit (loop);
1094 basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest;
1095 basic_block *bbs;
1096 unsigned i;
1098 bbs = get_loop_body_in_dom_order (loop);
1100 redirect_edge_pred (exit, src);
1101 exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE);
1102 exit->flags |= EDGE_FALLTHRU;
1103 cancel_loop_tree (loop);
1104 rescan_loop_exit (exit, false, true);
1106 i = nbbs;
1109 /* We have made sure to not leave any dangling uses of SSA
1110 names defined in the loop. With the exception of virtuals.
1111 Make sure we replace all uses of virtual defs that will remain
1112 outside of the loop with the bare symbol as delete_basic_block
1113 will release them. */
1114 --i;
1115 for (gphi_iterator gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi);
1116 gsi_next (&gsi))
1118 gphi *phi = gsi.phi ();
1119 if (virtual_operand_p (gimple_phi_result (phi)))
1120 mark_virtual_phi_result_for_renaming (phi);
1122 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi);
1123 gsi_next (&gsi))
1125 gimple *stmt = gsi_stmt (gsi);
1126 tree vdef = gimple_vdef (stmt);
1127 if (vdef && TREE_CODE (vdef) == SSA_NAME)
1128 mark_virtual_operand_for_renaming (vdef);
1130 delete_basic_block (bbs[i]);
1132 while (i != 0);
1134 free (bbs);
1136 set_immediate_dominator (CDI_DOMINATORS, dest,
1137 recompute_dominator (CDI_DOMINATORS, dest));
1140 /* Generates code for PARTITION. Return whether LOOP needs to be destroyed. */
1142 static bool
1143 generate_code_for_partition (struct loop *loop,
1144 partition *partition, bool copy_p)
1146 switch (partition->kind)
1148 case PKIND_NORMAL:
1149 /* Reductions all have to be in the last partition. */
1150 gcc_assert (!partition_reduction_p (partition)
1151 || !copy_p);
1152 generate_loops_for_partition (loop, partition, copy_p);
1153 return false;
1155 case PKIND_MEMSET:
1156 generate_memset_builtin (loop, partition);
1157 break;
1159 case PKIND_MEMCPY:
1160 case PKIND_MEMMOVE:
1161 generate_memcpy_builtin (loop, partition);
1162 break;
1164 default:
1165 gcc_unreachable ();
1168 /* Common tail for partitions we turn into a call. If this was the last
1169 partition for which we generate code, we have to destroy the loop. */
1170 if (!copy_p)
1171 return true;
1172 return false;
1175 /* Return data dependence relation for data references A and B. The two
1176 data references must be in lexicographic order wrto reduced dependence
1177 graph RDG. We firstly try to find ddr from global ddr hash table. If
1178 it doesn't exist, compute the ddr and cache it. */
1180 static data_dependence_relation *
1181 get_data_dependence (struct graph *rdg, data_reference_p a, data_reference_p b)
1183 struct data_dependence_relation ent, **slot;
1184 struct data_dependence_relation *ddr;
1186 gcc_assert (DR_IS_WRITE (a) || DR_IS_WRITE (b));
1187 gcc_assert (rdg_vertex_for_stmt (rdg, DR_STMT (a))
1188 <= rdg_vertex_for_stmt (rdg, DR_STMT (b)));
1189 ent.a = a;
1190 ent.b = b;
1191 slot = ddrs_table->find_slot (&ent, INSERT);
1192 if (*slot == NULL)
1194 ddr = initialize_data_dependence_relation (a, b, loop_nest);
1195 compute_affine_dependence (ddr, loop_nest[0]);
1196 *slot = ddr;
1199 return *slot;
1202 /* In reduced dependence graph RDG for loop distribution, return true if
1203 dependence between references DR1 and DR2 leads to a dependence cycle
1204 and such dependence cycle can't be resolved by runtime alias check. */
1206 static bool
1207 data_dep_in_cycle_p (struct graph *rdg,
1208 data_reference_p dr1, data_reference_p dr2)
1210 struct data_dependence_relation *ddr;
1212 /* Re-shuffle data-refs to be in topological order. */
1213 if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1))
1214 > rdg_vertex_for_stmt (rdg, DR_STMT (dr2)))
1215 std::swap (dr1, dr2);
1217 ddr = get_data_dependence (rdg, dr1, dr2);
1219 /* In case of no data dependence. */
1220 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1221 return false;
1222 /* For unknown data dependence or known data dependence which can't be
1223 expressed in classic distance vector, we check if it can be resolved
1224 by runtime alias check. If yes, we still consider data dependence
1225 as won't introduce data dependence cycle. */
1226 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know
1227 || DDR_NUM_DIST_VECTS (ddr) == 0)
1228 return !runtime_alias_check_p (ddr, NULL, true);
1229 else if (DDR_NUM_DIST_VECTS (ddr) > 1)
1230 return true;
1231 else if (DDR_REVERSED_P (ddr)
1232 || lambda_vector_zerop (DDR_DIST_VECT (ddr, 0), 1))
1233 return false;
1235 return true;
1238 /* Given reduced dependence graph RDG, PARTITION1 and PARTITION2, update
1239 PARTITION1's type after merging PARTITION2 into PARTITION1. */
1241 static void
1242 update_type_for_merge (struct graph *rdg,
1243 partition *partition1, partition *partition2)
1245 unsigned i, j;
1246 bitmap_iterator bi, bj;
1247 data_reference_p dr1, dr2;
1249 EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi)
1251 unsigned start = (partition1 == partition2) ? i + 1 : 0;
1253 dr1 = datarefs_vec[i];
1254 EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, start, j, bj)
1256 dr2 = datarefs_vec[j];
1257 if (DR_IS_READ (dr1) && DR_IS_READ (dr2))
1258 continue;
1260 /* Partition can only be executed sequentially if there is any
1261 data dependence cycle. */
1262 if (data_dep_in_cycle_p (rdg, dr1, dr2))
1264 partition1->type = PTYPE_SEQUENTIAL;
1265 return;
1271 /* Returns a partition with all the statements needed for computing
1272 the vertex V of the RDG, also including the loop exit conditions. */
1274 static partition *
1275 build_rdg_partition_for_vertex (struct graph *rdg, int v)
1277 partition *partition = partition_alloc ();
1278 auto_vec<int, 3> nodes;
1279 unsigned i, j;
1280 int x;
1281 data_reference_p dr;
1283 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
1285 FOR_EACH_VEC_ELT (nodes, i, x)
1287 bitmap_set_bit (partition->stmts, x);
1289 for (j = 0; RDG_DATAREFS (rdg, x).iterate (j, &dr); ++j)
1291 unsigned idx = (unsigned) DR_INDEX (dr);
1292 gcc_assert (idx < datarefs_vec.length ());
1294 /* Partition can only be executed sequentially if there is any
1295 unknown data reference. */
1296 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr)
1297 || !DR_INIT (dr) || !DR_STEP (dr))
1298 partition->type = PTYPE_SEQUENTIAL;
1300 bitmap_set_bit (partition->datarefs, idx);
1304 if (partition->type == PTYPE_SEQUENTIAL)
1305 return partition;
1307 /* Further check if any data dependence prevents us from executing the
1308 partition parallelly. */
1309 update_type_for_merge (rdg, partition, partition);
1311 return partition;
1314 /* Classifies the builtin kind we can generate for PARTITION of RDG and LOOP.
1315 For the moment we detect memset, memcpy and memmove patterns. Bitmap
1316 STMT_IN_ALL_PARTITIONS contains statements belonging to all partitions. */
1318 static void
1319 classify_partition (loop_p loop, struct graph *rdg, partition *partition,
1320 bitmap stmt_in_all_partitions)
1322 bitmap_iterator bi;
1323 unsigned i;
1324 tree nb_iter;
1325 data_reference_p single_load, single_store;
1326 bool volatiles_p = false, plus_one = false, has_reduction = false;
1328 partition->kind = PKIND_NORMAL;
1329 partition->main_dr = NULL;
1330 partition->secondary_dr = NULL;
1331 partition->niter = NULL_TREE;
1332 partition->plus_one = false;
1334 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi)
1336 gimple *stmt = RDG_STMT (rdg, i);
1338 if (gimple_has_volatile_ops (stmt))
1339 volatiles_p = true;
1341 /* If the stmt is not included by all partitions and there is uses
1342 outside of the loop, then mark the partition as reduction. */
1343 if (stmt_has_scalar_dependences_outside_loop (loop, stmt))
1345 /* Due to limitation in the transform phase we have to fuse all
1346 reduction partitions. As a result, this could cancel valid
1347 loop distribution especially for loop that induction variable
1348 is used outside of loop. To workaround this issue, we skip
1349 marking partition as reudction if the reduction stmt belongs
1350 to all partitions. In such case, reduction will be computed
1351 correctly no matter how partitions are fused/distributed. */
1352 if (!bitmap_bit_p (stmt_in_all_partitions, i))
1354 partition->reduction_p = true;
1355 return;
1357 has_reduction = true;
1361 /* Perform general partition disqualification for builtins. */
1362 if (volatiles_p
1363 /* Simple workaround to prevent classifying the partition as builtin
1364 if it contains any use outside of loop. */
1365 || has_reduction
1366 || !flag_tree_loop_distribute_patterns)
1367 return;
1369 /* Detect memset and memcpy. */
1370 single_load = NULL;
1371 single_store = NULL;
1372 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, bi)
1374 gimple *stmt = RDG_STMT (rdg, i);
1375 data_reference_p dr;
1376 unsigned j;
1378 if (gimple_code (stmt) == GIMPLE_PHI)
1379 continue;
1381 /* Any scalar stmts are ok. */
1382 if (!gimple_vuse (stmt))
1383 continue;
1385 /* Otherwise just regular loads/stores. */
1386 if (!gimple_assign_single_p (stmt))
1387 return;
1389 /* But exactly one store and/or load. */
1390 for (j = 0; RDG_DATAREFS (rdg, i).iterate (j, &dr); ++j)
1392 tree type = TREE_TYPE (DR_REF (dr));
1394 /* The memset, memcpy and memmove library calls are only
1395 able to deal with generic address space. */
1396 if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)))
1397 return;
1399 if (DR_IS_READ (dr))
1401 if (single_load != NULL)
1402 return;
1403 single_load = dr;
1405 else
1407 if (single_store != NULL)
1408 return;
1409 single_store = dr;
1414 if (!single_store)
1415 return;
1417 nb_iter = number_of_latch_executions (loop);
1418 gcc_assert (nb_iter && nb_iter != chrec_dont_know);
1419 if (dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src,
1420 gimple_bb (DR_STMT (single_store))))
1421 plus_one = true;
1423 if (single_store && !single_load)
1425 gimple *stmt = DR_STMT (single_store);
1426 tree rhs = gimple_assign_rhs1 (stmt);
1427 if (const_with_all_bytes_same (rhs) == -1
1428 && (!INTEGRAL_TYPE_P (TREE_TYPE (rhs))
1429 || (TYPE_MODE (TREE_TYPE (rhs))
1430 != TYPE_MODE (unsigned_char_type_node))))
1431 return;
1432 if (TREE_CODE (rhs) == SSA_NAME
1433 && !SSA_NAME_IS_DEFAULT_DEF (rhs)
1434 && flow_bb_inside_loop_p (loop, gimple_bb (SSA_NAME_DEF_STMT (rhs))))
1435 return;
1436 if (!adjacent_dr_p (single_store)
1437 || !dominated_by_p (CDI_DOMINATORS,
1438 loop->latch, gimple_bb (stmt)))
1439 return;
1440 partition->kind = PKIND_MEMSET;
1441 partition->main_dr = single_store;
1442 partition->niter = nb_iter;
1443 partition->plus_one = plus_one;
1445 else if (single_store && single_load)
1447 gimple *store = DR_STMT (single_store);
1448 gimple *load = DR_STMT (single_load);
1449 /* Direct aggregate copy or via an SSA name temporary. */
1450 if (load != store
1451 && gimple_assign_lhs (load) != gimple_assign_rhs1 (store))
1452 return;
1453 if (!adjacent_dr_p (single_store)
1454 || !adjacent_dr_p (single_load)
1455 || !operand_equal_p (DR_STEP (single_store),
1456 DR_STEP (single_load), 0)
1457 || !dominated_by_p (CDI_DOMINATORS,
1458 loop->latch, gimple_bb (store)))
1459 return;
1460 /* Now check that if there is a dependence this dependence is
1461 of a suitable form for memmove. */
1462 ddr_p ddr = get_data_dependence (rdg, single_load, single_store);
1463 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1464 return;
1466 if (DDR_ARE_DEPENDENT (ddr) != chrec_known)
1468 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1469 return;
1471 lambda_vector dist_v;
1472 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1474 int dist = dist_v[index_in_loop_nest (loop->num,
1475 DDR_LOOP_NEST (ddr))];
1476 if (dist > 0 && !DDR_REVERSED_P (ddr))
1477 return;
1479 partition->kind = PKIND_MEMMOVE;
1481 else
1482 partition->kind = PKIND_MEMCPY;
1483 partition->main_dr = single_store;
1484 partition->secondary_dr = single_load;
1485 partition->niter = nb_iter;
1486 partition->plus_one = plus_one;
1490 /* Returns true when PARTITION1 and PARTITION2 access the same memory
1491 object in RDG. */
1493 static bool
1494 share_memory_accesses (struct graph *rdg,
1495 partition *partition1, partition *partition2)
1497 unsigned i, j;
1498 bitmap_iterator bi, bj;
1499 data_reference_p dr1, dr2;
1501 /* First check whether in the intersection of the two partitions are
1502 any loads or stores. Common loads are the situation that happens
1503 most often. */
1504 EXECUTE_IF_AND_IN_BITMAP (partition1->stmts, partition2->stmts, 0, i, bi)
1505 if (RDG_MEM_WRITE_STMT (rdg, i)
1506 || RDG_MEM_READS_STMT (rdg, i))
1507 return true;
1509 /* Then check whether the two partitions access the same memory object. */
1510 EXECUTE_IF_SET_IN_BITMAP (partition1->datarefs, 0, i, bi)
1512 dr1 = datarefs_vec[i];
1514 if (!DR_BASE_ADDRESS (dr1)
1515 || !DR_OFFSET (dr1) || !DR_INIT (dr1) || !DR_STEP (dr1))
1516 continue;
1518 EXECUTE_IF_SET_IN_BITMAP (partition2->datarefs, 0, j, bj)
1520 dr2 = datarefs_vec[j];
1522 if (!DR_BASE_ADDRESS (dr2)
1523 || !DR_OFFSET (dr2) || !DR_INIT (dr2) || !DR_STEP (dr2))
1524 continue;
1526 if (operand_equal_p (DR_BASE_ADDRESS (dr1), DR_BASE_ADDRESS (dr2), 0)
1527 && operand_equal_p (DR_OFFSET (dr1), DR_OFFSET (dr2), 0)
1528 && operand_equal_p (DR_INIT (dr1), DR_INIT (dr2), 0)
1529 && operand_equal_p (DR_STEP (dr1), DR_STEP (dr2), 0))
1530 return true;
1534 return false;
1537 /* For each seed statement in STARTING_STMTS, this function builds
1538 partition for it by adding depended statements according to RDG.
1539 All partitions are recorded in PARTITIONS. */
1541 static void
1542 rdg_build_partitions (struct graph *rdg,
1543 vec<gimple *> starting_stmts,
1544 vec<partition *> *partitions)
1546 auto_bitmap processed;
1547 int i;
1548 gimple *stmt;
1550 FOR_EACH_VEC_ELT (starting_stmts, i, stmt)
1552 int v = rdg_vertex_for_stmt (rdg, stmt);
1554 if (dump_file && (dump_flags & TDF_DETAILS))
1555 fprintf (dump_file,
1556 "ldist asked to generate code for vertex %d\n", v);
1558 /* If the vertex is already contained in another partition so
1559 is the partition rooted at it. */
1560 if (bitmap_bit_p (processed, v))
1561 continue;
1563 partition *partition = build_rdg_partition_for_vertex (rdg, v);
1564 bitmap_ior_into (processed, partition->stmts);
1566 if (dump_file && (dump_flags & TDF_DETAILS))
1568 fprintf (dump_file, "ldist creates useful %s partition:\n",
1569 partition->type == PTYPE_PARALLEL ? "parallel" : "sequent");
1570 bitmap_print (dump_file, partition->stmts, " ", "\n");
1573 partitions->safe_push (partition);
1576 /* All vertices should have been assigned to at least one partition now,
1577 other than vertices belonging to dead code. */
1580 /* Dump to FILE the PARTITIONS. */
1582 static void
1583 dump_rdg_partitions (FILE *file, vec<partition *> partitions)
1585 int i;
1586 partition *partition;
1588 FOR_EACH_VEC_ELT (partitions, i, partition)
1589 debug_bitmap_file (file, partition->stmts);
1592 /* Debug PARTITIONS. */
1593 extern void debug_rdg_partitions (vec<partition *> );
1595 DEBUG_FUNCTION void
1596 debug_rdg_partitions (vec<partition *> partitions)
1598 dump_rdg_partitions (stderr, partitions);
1601 /* Returns the number of read and write operations in the RDG. */
1603 static int
1604 number_of_rw_in_rdg (struct graph *rdg)
1606 int i, res = 0;
1608 for (i = 0; i < rdg->n_vertices; i++)
1610 if (RDG_MEM_WRITE_STMT (rdg, i))
1611 ++res;
1613 if (RDG_MEM_READS_STMT (rdg, i))
1614 ++res;
1617 return res;
1620 /* Returns the number of read and write operations in a PARTITION of
1621 the RDG. */
1623 static int
1624 number_of_rw_in_partition (struct graph *rdg, partition *partition)
1626 int res = 0;
1627 unsigned i;
1628 bitmap_iterator ii;
1630 EXECUTE_IF_SET_IN_BITMAP (partition->stmts, 0, i, ii)
1632 if (RDG_MEM_WRITE_STMT (rdg, i))
1633 ++res;
1635 if (RDG_MEM_READS_STMT (rdg, i))
1636 ++res;
1639 return res;
1642 /* Returns true when one of the PARTITIONS contains all the read or
1643 write operations of RDG. */
1645 static bool
1646 partition_contains_all_rw (struct graph *rdg,
1647 vec<partition *> partitions)
1649 int i;
1650 partition *partition;
1651 int nrw = number_of_rw_in_rdg (rdg);
1653 FOR_EACH_VEC_ELT (partitions, i, partition)
1654 if (nrw == number_of_rw_in_partition (rdg, partition))
1655 return true;
1657 return false;
1660 /* Compute partition dependence created by the data references in DRS1
1661 and DRS2, modify and return DIR according to that. IF ALIAS_DDR is
1662 not NULL, we record dependence introduced by possible alias between
1663 two data references in ALIAS_DDRS; otherwise, we simply ignore such
1664 dependence as if it doesn't exist at all. */
1666 static int
1667 pg_add_dependence_edges (struct graph *rdg, int dir,
1668 bitmap drs1, bitmap drs2, vec<ddr_p> *alias_ddrs)
1670 unsigned i, j;
1671 bitmap_iterator bi, bj;
1672 data_reference_p dr1, dr2, saved_dr1;
1674 /* dependence direction - 0 is no dependence, -1 is back,
1675 1 is forth, 2 is both (we can stop then, merging will occur). */
1676 EXECUTE_IF_SET_IN_BITMAP (drs1, 0, i, bi)
1678 dr1 = datarefs_vec[i];
1680 EXECUTE_IF_SET_IN_BITMAP (drs2, 0, j, bj)
1682 int res, this_dir = 1;
1683 ddr_p ddr;
1685 dr2 = datarefs_vec[j];
1687 /* Skip all <read, read> data dependence. */
1688 if (DR_IS_READ (dr1) && DR_IS_READ (dr2))
1689 continue;
1691 saved_dr1 = dr1;
1692 /* Re-shuffle data-refs to be in topological order. */
1693 if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1))
1694 > rdg_vertex_for_stmt (rdg, DR_STMT (dr2)))
1696 std::swap (dr1, dr2);
1697 this_dir = -this_dir;
1699 ddr = get_data_dependence (rdg, dr1, dr2);
1700 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1702 this_dir = 0;
1703 res = data_ref_compare_tree (DR_BASE_ADDRESS (dr1),
1704 DR_BASE_ADDRESS (dr2));
1705 /* Be conservative. If data references are not well analyzed,
1706 or the two data references have the same base address and
1707 offset, add dependence and consider it alias to each other.
1708 In other words, the dependence can not be resolved by
1709 runtime alias check. */
1710 if (!DR_BASE_ADDRESS (dr1) || !DR_BASE_ADDRESS (dr2)
1711 || !DR_OFFSET (dr1) || !DR_OFFSET (dr2)
1712 || !DR_INIT (dr1) || !DR_INIT (dr2)
1713 || !DR_STEP (dr1) || !tree_fits_uhwi_p (DR_STEP (dr1))
1714 || !DR_STEP (dr2) || !tree_fits_uhwi_p (DR_STEP (dr2))
1715 || res == 0)
1716 this_dir = 2;
1717 /* Data dependence could be resolved by runtime alias check,
1718 record it in ALIAS_DDRS. */
1719 else if (alias_ddrs != NULL)
1720 alias_ddrs->safe_push (ddr);
1721 /* Or simply ignore it. */
1723 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
1725 if (DDR_REVERSED_P (ddr))
1726 this_dir = -this_dir;
1728 /* Known dependences can still be unordered througout the
1729 iteration space, see gcc.dg/tree-ssa/ldist-16.c. */
1730 if (DDR_NUM_DIST_VECTS (ddr) != 1)
1731 this_dir = 2;
1732 /* If the overlap is exact preserve stmt order. */
1733 else if (lambda_vector_zerop (DDR_DIST_VECT (ddr, 0), 1))
1735 /* Else as the distance vector is lexicographic positive swap
1736 the dependence direction. */
1737 else
1738 this_dir = -this_dir;
1740 else
1741 this_dir = 0;
1742 if (this_dir == 2)
1743 return 2;
1744 else if (dir == 0)
1745 dir = this_dir;
1746 else if (this_dir != 0 && dir != this_dir)
1747 return 2;
1748 /* Shuffle "back" dr1. */
1749 dr1 = saved_dr1;
1752 return dir;
1755 /* Compare postorder number of the partition graph vertices V1 and V2. */
1757 static int
1758 pgcmp (const void *v1_, const void *v2_)
1760 const vertex *v1 = (const vertex *)v1_;
1761 const vertex *v2 = (const vertex *)v2_;
1762 return v2->post - v1->post;
1765 /* Data attached to vertices of partition dependence graph. */
1766 struct pg_vdata
1768 /* ID of the corresponding partition. */
1769 int id;
1770 /* The partition. */
1771 struct partition *partition;
1774 /* Data attached to edges of partition dependence graph. */
1775 struct pg_edata
1777 /* If the dependence edge can be resolved by runtime alias check,
1778 this vector contains data dependence relations for runtime alias
1779 check. On the other hand, if the dependence edge is introduced
1780 because of compilation time known data dependence, this vector
1781 contains nothing. */
1782 vec<ddr_p> alias_ddrs;
1785 /* Callback data for traversing edges in graph. */
1786 struct pg_edge_callback_data
1788 /* Bitmap contains strong connected components should be merged. */
1789 bitmap sccs_to_merge;
1790 /* Array constains component information for all vertices. */
1791 int *vertices_component;
1792 /* Vector to record all data dependence relations which are needed
1793 to break strong connected components by runtime alias checks. */
1794 vec<ddr_p> *alias_ddrs;
1797 /* Initialize vertice's data for partition dependence graph PG with
1798 PARTITIONS. */
1800 static void
1801 init_partition_graph_vertices (struct graph *pg,
1802 vec<struct partition *> *partitions)
1804 int i;
1805 partition *partition;
1806 struct pg_vdata *data;
1808 for (i = 0; partitions->iterate (i, &partition); ++i)
1810 data = new pg_vdata;
1811 pg->vertices[i].data = data;
1812 data->id = i;
1813 data->partition = partition;
1817 /* Add edge <I, J> to partition dependence graph PG. Attach vector of data
1818 dependence relations to the EDGE if DDRS isn't NULL. */
1820 static void
1821 add_partition_graph_edge (struct graph *pg, int i, int j, vec<ddr_p> *ddrs)
1823 struct graph_edge *e = add_edge (pg, i, j);
1825 /* If the edge is attached with data dependence relations, it means this
1826 dependence edge can be resolved by runtime alias checks. */
1827 if (ddrs != NULL)
1829 struct pg_edata *data = new pg_edata;
1831 gcc_assert (ddrs->length () > 0);
1832 e->data = data;
1833 data->alias_ddrs = vNULL;
1834 data->alias_ddrs.safe_splice (*ddrs);
1838 /* Callback function for graph travesal algorithm. It returns true
1839 if edge E should skipped when traversing the graph. */
1841 static bool
1842 pg_skip_alias_edge (struct graph_edge *e)
1844 struct pg_edata *data = (struct pg_edata *)e->data;
1845 return (data != NULL && data->alias_ddrs.length () > 0);
1848 /* Callback function freeing data attached to edge E of graph. */
1850 static void
1851 free_partition_graph_edata_cb (struct graph *, struct graph_edge *e, void *)
1853 if (e->data != NULL)
1855 struct pg_edata *data = (struct pg_edata *)e->data;
1856 data->alias_ddrs.release ();
1857 delete data;
1861 /* Free data attached to vertice of partition dependence graph PG. */
1863 static void
1864 free_partition_graph_vdata (struct graph *pg)
1866 int i;
1867 struct pg_vdata *data;
1869 for (i = 0; i < pg->n_vertices; ++i)
1871 data = (struct pg_vdata *)pg->vertices[i].data;
1872 delete data;
1876 /* Build and return partition dependence graph for PARTITIONS. RDG is
1877 reduced dependence graph for the loop to be distributed. If IGNORE_ALIAS_P
1878 is true, data dependence caused by possible alias between references
1879 is ignored, as if it doesn't exist at all; otherwise all depdendences
1880 are considered. */
1882 static struct graph *
1883 build_partition_graph (struct graph *rdg,
1884 vec<struct partition *> *partitions,
1885 bool ignore_alias_p)
1887 int i, j;
1888 struct partition *partition1, *partition2;
1889 graph *pg = new_graph (partitions->length ());
1890 auto_vec<ddr_p> alias_ddrs, *alias_ddrs_p;
1892 alias_ddrs_p = ignore_alias_p ? NULL : &alias_ddrs;
1894 init_partition_graph_vertices (pg, partitions);
1896 for (i = 0; partitions->iterate (i, &partition1); ++i)
1898 for (j = i + 1; partitions->iterate (j, &partition2); ++j)
1900 /* dependence direction - 0 is no dependence, -1 is back,
1901 1 is forth, 2 is both (we can stop then, merging will occur). */
1902 int dir = 0;
1904 /* If the first partition has reduction, add back edge; if the
1905 second partition has reduction, add forth edge. This makes
1906 sure that reduction partition will be sorted as the last one. */
1907 if (partition_reduction_p (partition1))
1908 dir = -1;
1909 else if (partition_reduction_p (partition2))
1910 dir = 1;
1912 /* Cleanup the temporary vector. */
1913 alias_ddrs.truncate (0);
1915 dir = pg_add_dependence_edges (rdg, dir, partition1->datarefs,
1916 partition2->datarefs, alias_ddrs_p);
1918 /* Add edge to partition graph if there exists dependence. There
1919 are two types of edges. One type edge is caused by compilation
1920 time known dependence, this type can not be resolved by runtime
1921 alias check. The other type can be resolved by runtime alias
1922 check. */
1923 if (dir == 1 || dir == 2
1924 || alias_ddrs.length () > 0)
1926 /* Attach data dependence relations to edge that can be resolved
1927 by runtime alias check. */
1928 bool alias_edge_p = (dir != 1 && dir != 2);
1929 add_partition_graph_edge (pg, i, j,
1930 (alias_edge_p) ? &alias_ddrs : NULL);
1932 if (dir == -1 || dir == 2
1933 || alias_ddrs.length () > 0)
1935 /* Attach data dependence relations to edge that can be resolved
1936 by runtime alias check. */
1937 bool alias_edge_p = (dir != -1 && dir != 2);
1938 add_partition_graph_edge (pg, j, i,
1939 (alias_edge_p) ? &alias_ddrs : NULL);
1943 return pg;
1946 /* Sort partitions in PG in descending post order and store them in
1947 PARTITIONS. */
1949 static void
1950 sort_partitions_by_post_order (struct graph *pg,
1951 vec<struct partition *> *partitions)
1953 int i;
1954 struct pg_vdata *data;
1956 /* Now order the remaining nodes in descending postorder. */
1957 qsort (pg->vertices, pg->n_vertices, sizeof (vertex), pgcmp);
1958 partitions->truncate (0);
1959 for (i = 0; i < pg->n_vertices; ++i)
1961 data = (struct pg_vdata *)pg->vertices[i].data;
1962 if (data->partition)
1963 partitions->safe_push (data->partition);
1967 /* Given reduced dependence graph RDG merge strong connected components
1968 of PARTITIONS. In this function, data dependence caused by possible
1969 alias between references is ignored, as if it doesn't exist at all. */
1971 static void
1972 merge_dep_scc_partitions (struct graph *rdg,
1973 vec<struct partition *> *partitions)
1975 struct partition *partition1, *partition2;
1976 struct pg_vdata *data;
1977 graph *pg = build_partition_graph (rdg, partitions, true);
1978 int i, j, num_sccs = graphds_scc (pg, NULL);
1980 /* Strong connected compoenent means dependence cycle, we cannot distribute
1981 them. So fuse them together. */
1982 if ((unsigned) num_sccs < partitions->length ())
1984 for (i = 0; i < num_sccs; ++i)
1986 for (j = 0; partitions->iterate (j, &partition1); ++j)
1987 if (pg->vertices[j].component == i)
1988 break;
1989 for (j = j + 1; partitions->iterate (j, &partition2); ++j)
1990 if (pg->vertices[j].component == i)
1992 partition_merge_into (NULL, partition1,
1993 partition2, FUSE_SAME_SCC);
1994 partition1->type = PTYPE_SEQUENTIAL;
1995 (*partitions)[j] = NULL;
1996 partition_free (partition2);
1997 data = (struct pg_vdata *)pg->vertices[j].data;
1998 data->partition = NULL;
2003 sort_partitions_by_post_order (pg, partitions);
2004 gcc_assert (partitions->length () == (unsigned)num_sccs);
2005 free_partition_graph_vdata (pg);
2006 free_graph (pg);
2009 /* Callback function for traversing edge E in graph G. DATA is private
2010 callback data. */
2012 static void
2013 pg_collect_alias_ddrs (struct graph *g, struct graph_edge *e, void *data)
2015 int i, j, component;
2016 struct pg_edge_callback_data *cbdata;
2017 struct pg_edata *edata = (struct pg_edata *) e->data;
2019 /* If the edge doesn't have attached data dependence, it represents
2020 compilation time known dependences. This type dependence cannot
2021 be resolved by runtime alias check. */
2022 if (edata == NULL || edata->alias_ddrs.length () == 0)
2023 return;
2025 cbdata = (struct pg_edge_callback_data *) data;
2026 i = e->src;
2027 j = e->dest;
2028 component = cbdata->vertices_component[i];
2029 /* Vertices are topologically sorted according to compilation time
2030 known dependences, so we can break strong connected components
2031 by removing edges of the opposite direction, i.e, edges pointing
2032 from vertice with smaller post number to vertice with bigger post
2033 number. */
2034 if (g->vertices[i].post < g->vertices[j].post
2035 /* We only need to remove edges connecting vertices in the same
2036 strong connected component to break it. */
2037 && component == cbdata->vertices_component[j]
2038 /* Check if we want to break the strong connected component or not. */
2039 && !bitmap_bit_p (cbdata->sccs_to_merge, component))
2040 cbdata->alias_ddrs->safe_splice (edata->alias_ddrs);
2043 /* This is the main function breaking strong conected components in
2044 PARTITIONS giving reduced depdendence graph RDG. Store data dependence
2045 relations for runtime alias check in ALIAS_DDRS. */
2047 static void
2048 break_alias_scc_partitions (struct graph *rdg,
2049 vec<struct partition *> *partitions,
2050 vec<ddr_p> *alias_ddrs)
2052 int i, j, k, num_sccs, num_sccs_no_alias;
2053 /* Build partition dependence graph. */
2054 graph *pg = build_partition_graph (rdg, partitions, false);
2056 alias_ddrs->truncate (0);
2057 /* Find strong connected components in the graph, with all dependence edges
2058 considered. */
2059 num_sccs = graphds_scc (pg, NULL);
2060 /* All SCCs now can be broken by runtime alias checks because SCCs caused by
2061 compilation time known dependences are merged before this function. */
2062 if ((unsigned) num_sccs < partitions->length ())
2064 struct pg_edge_callback_data cbdata;
2065 auto_bitmap sccs_to_merge;
2066 auto_vec<enum partition_type> scc_types;
2067 struct partition *partition, *first;
2069 /* If all paritions in a SCC has the same type, we can simply merge the
2070 SCC. This loop finds out such SCCS and record them in bitmap. */
2071 bitmap_set_range (sccs_to_merge, 0, (unsigned) num_sccs);
2072 for (i = 0; i < num_sccs; ++i)
2074 for (j = 0; partitions->iterate (j, &first); ++j)
2075 if (pg->vertices[j].component == i)
2076 break;
2077 for (++j; partitions->iterate (j, &partition); ++j)
2079 if (pg->vertices[j].component != i)
2080 continue;
2082 if (first->type != partition->type)
2084 bitmap_clear_bit (sccs_to_merge, i);
2085 break;
2090 /* Initialize callback data for traversing. */
2091 cbdata.sccs_to_merge = sccs_to_merge;
2092 cbdata.alias_ddrs = alias_ddrs;
2093 cbdata.vertices_component = XNEWVEC (int, pg->n_vertices);
2094 /* Record the component information which will be corrupted by next
2095 graph scc finding call. */
2096 for (i = 0; i < pg->n_vertices; ++i)
2097 cbdata.vertices_component[i] = pg->vertices[i].component;
2099 /* Collect data dependences for runtime alias checks to break SCCs. */
2100 if (bitmap_count_bits (sccs_to_merge) != (unsigned) num_sccs)
2102 /* Run SCC finding algorithm again, with alias dependence edges
2103 skipped. This is to topologically sort paritions according to
2104 compilation time known dependence. Note the topological order
2105 is stored in the form of pg's post order number. */
2106 num_sccs_no_alias = graphds_scc (pg, NULL, pg_skip_alias_edge);
2107 gcc_assert (partitions->length () == (unsigned) num_sccs_no_alias);
2108 /* With topological order, we can construct two subgraphs L and R.
2109 L contains edge <x, y> where x < y in terms of post order, while
2110 R contains edge <x, y> where x > y. Edges for compilation time
2111 known dependence all fall in R, so we break SCCs by removing all
2112 (alias) edges of in subgraph L. */
2113 for_each_edge (pg, pg_collect_alias_ddrs, &cbdata);
2116 /* For SCC that doesn't need to be broken, merge it. */
2117 for (i = 0; i < num_sccs; ++i)
2119 if (!bitmap_bit_p (sccs_to_merge, i))
2120 continue;
2122 for (j = 0; partitions->iterate (j, &first); ++j)
2123 if (cbdata.vertices_component[j] == i)
2124 break;
2125 for (k = j + 1; partitions->iterate (k, &partition); ++k)
2127 struct pg_vdata *data;
2129 if (cbdata.vertices_component[k] != i)
2130 continue;
2132 /* Update postorder number so that merged reduction partition is
2133 sorted after other partitions. */
2134 if (!partition_reduction_p (first)
2135 && partition_reduction_p (partition))
2137 gcc_assert (pg->vertices[k].post < pg->vertices[j].post);
2138 pg->vertices[j].post = pg->vertices[k].post;
2140 partition_merge_into (NULL, first, partition, FUSE_SAME_SCC);
2141 (*partitions)[k] = NULL;
2142 partition_free (partition);
2143 data = (struct pg_vdata *)pg->vertices[k].data;
2144 gcc_assert (data->id == k);
2145 data->partition = NULL;
2150 sort_partitions_by_post_order (pg, partitions);
2151 free_partition_graph_vdata (pg);
2152 for_each_edge (pg, free_partition_graph_edata_cb, NULL);
2153 free_graph (pg);
2155 if (dump_file && (dump_flags & TDF_DETAILS))
2157 fprintf (dump_file, "Possible alias data dependence to break:\n");
2158 dump_data_dependence_relations (dump_file, *alias_ddrs);
2162 /* Compute and return an expression whose value is the segment length which
2163 will be accessed by DR in NITERS iterations. */
2165 static tree
2166 data_ref_segment_size (struct data_reference *dr, tree niters)
2168 tree segment_length;
2170 if (integer_zerop (DR_STEP (dr)))
2171 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2172 else
2173 segment_length = size_binop (MULT_EXPR,
2174 fold_convert (sizetype, DR_STEP (dr)),
2175 fold_convert (sizetype, niters));
2177 return segment_length;
2180 /* Return true if LOOP's latch is dominated by statement for data reference
2181 DR. */
2183 static inline bool
2184 latch_dominated_by_data_ref (struct loop *loop, data_reference *dr)
2186 return dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src,
2187 gimple_bb (DR_STMT (dr)));
2190 /* Compute alias check pairs and store them in COMP_ALIAS_PAIRS for LOOP's
2191 data dependence relations ALIAS_DDRS. */
2193 static void
2194 compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs,
2195 vec<dr_with_seg_len_pair_t> *comp_alias_pairs)
2197 unsigned int i;
2198 unsigned HOST_WIDE_INT factor = 1;
2199 tree niters_plus_one, niters = number_of_latch_executions (loop);
2201 gcc_assert (niters != NULL_TREE && niters != chrec_dont_know);
2202 niters = fold_convert (sizetype, niters);
2203 niters_plus_one = size_binop (PLUS_EXPR, niters, size_one_node);
2205 if (dump_file && (dump_flags & TDF_DETAILS))
2206 fprintf (dump_file, "Creating alias check pairs:\n");
2208 /* Iterate all data dependence relations and compute alias check pairs. */
2209 for (i = 0; i < alias_ddrs->length (); i++)
2211 ddr_p ddr = (*alias_ddrs)[i];
2212 struct data_reference *dr_a = DDR_A (ddr);
2213 struct data_reference *dr_b = DDR_B (ddr);
2214 tree seg_length_a, seg_length_b;
2215 int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
2216 DR_BASE_ADDRESS (dr_b));
2218 if (comp_res == 0)
2219 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b));
2220 gcc_assert (comp_res != 0);
2222 if (latch_dominated_by_data_ref (loop, dr_a))
2223 seg_length_a = data_ref_segment_size (dr_a, niters_plus_one);
2224 else
2225 seg_length_a = data_ref_segment_size (dr_a, niters);
2227 if (latch_dominated_by_data_ref (loop, dr_b))
2228 seg_length_b = data_ref_segment_size (dr_b, niters_plus_one);
2229 else
2230 seg_length_b = data_ref_segment_size (dr_b, niters);
2232 dr_with_seg_len_pair_t dr_with_seg_len_pair
2233 (dr_with_seg_len (dr_a, seg_length_a),
2234 dr_with_seg_len (dr_b, seg_length_b));
2236 /* Canonicalize pairs by sorting the two DR members. */
2237 if (comp_res > 0)
2238 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2240 comp_alias_pairs->safe_push (dr_with_seg_len_pair);
2243 if (tree_fits_uhwi_p (niters))
2244 factor = tree_to_uhwi (niters);
2246 /* Prune alias check pairs. */
2247 prune_runtime_alias_test_list (comp_alias_pairs, factor);
2248 if (dump_file && (dump_flags & TDF_DETAILS))
2249 fprintf (dump_file,
2250 "Improved number of alias checks from %d to %d\n",
2251 alias_ddrs->length (), comp_alias_pairs->length ());
2254 /* Given data dependence relations in ALIAS_DDRS, generate runtime alias
2255 checks and version LOOP under condition of these runtime alias checks. */
2257 static void
2258 version_loop_by_alias_check (struct loop *loop, vec<ddr_p> *alias_ddrs)
2260 profile_probability prob;
2261 basic_block cond_bb;
2262 struct loop *nloop;
2263 tree lhs, arg0, cond_expr = NULL_TREE;
2264 gimple_seq cond_stmts = NULL;
2265 gimple *call_stmt = NULL;
2266 auto_vec<dr_with_seg_len_pair_t> comp_alias_pairs;
2268 /* Generate code for runtime alias checks if necessary. */
2269 gcc_assert (alias_ddrs->length () > 0);
2271 if (dump_file && (dump_flags & TDF_DETAILS))
2272 fprintf (dump_file,
2273 "Version loop <%d> with runtime alias check\n", loop->num);
2275 compute_alias_check_pairs (loop, alias_ddrs, &comp_alias_pairs);
2276 create_runtime_alias_checks (loop, &comp_alias_pairs, &cond_expr);
2277 cond_expr = force_gimple_operand_1 (cond_expr, &cond_stmts,
2278 is_gimple_val, NULL_TREE);
2280 /* Depend on vectorizer to fold IFN_LOOP_DIST_ALIAS. */
2281 if (flag_tree_loop_vectorize)
2283 /* Generate internal function call for loop distribution alias check. */
2284 call_stmt = gimple_build_call_internal (IFN_LOOP_DIST_ALIAS,
2285 2, NULL_TREE, cond_expr);
2286 lhs = make_ssa_name (boolean_type_node);
2287 gimple_call_set_lhs (call_stmt, lhs);
2289 else
2290 lhs = cond_expr;
2292 prob = profile_probability::guessed_always ().apply_scale (9, 10);
2293 initialize_original_copy_tables ();
2294 nloop = loop_version (loop, lhs, &cond_bb, prob, prob.invert (),
2295 prob, prob.invert (), true);
2296 free_original_copy_tables ();
2297 /* Record the original loop number in newly generated loops. In case of
2298 distribution, the original loop will be distributed and the new loop
2299 is kept. */
2300 loop->orig_loop_num = nloop->num;
2301 nloop->orig_loop_num = nloop->num;
2302 nloop->dont_vectorize = true;
2303 nloop->force_vectorize = false;
2305 if (call_stmt)
2307 /* Record new loop's num in IFN_LOOP_DIST_ALIAS because the original
2308 loop could be destroyed. */
2309 arg0 = build_int_cst (integer_type_node, loop->orig_loop_num);
2310 gimple_call_set_arg (call_stmt, 0, arg0);
2311 gimple_seq_add_stmt_without_update (&cond_stmts, call_stmt);
2314 if (cond_stmts)
2316 gimple_stmt_iterator cond_gsi = gsi_last_bb (cond_bb);
2317 gsi_insert_seq_before (&cond_gsi, cond_stmts, GSI_SAME_STMT);
2319 update_ssa (TODO_update_ssa);
2322 /* Return true if loop versioning is needed to distrubute PARTITIONS.
2323 ALIAS_DDRS are data dependence relations for runtime alias check. */
2325 static inline bool
2326 version_for_distribution_p (vec<struct partition *> *partitions,
2327 vec<ddr_p> *alias_ddrs)
2329 /* No need to version loop if we have only one partition. */
2330 if (partitions->length () == 1)
2331 return false;
2333 /* Need to version loop if runtime alias check is necessary. */
2334 return (alias_ddrs->length () > 0);
2337 /* Fuse all partitions if necessary before finalizing distribution. */
2339 static void
2340 finalize_partitions (vec<struct partition *> *partitions,
2341 vec<ddr_p> *alias_ddrs)
2343 unsigned i;
2344 struct partition *a, *partition;
2346 if (partitions->length () == 1
2347 || alias_ddrs->length () > 0)
2348 return;
2350 a = (*partitions)[0];
2351 if (a->kind != PKIND_NORMAL)
2352 return;
2354 for (i = 1; partitions->iterate (i, &partition); ++i)
2356 /* Don't fuse if partition has different type or it is a builtin. */
2357 if (partition->type != a->type
2358 || partition->kind != PKIND_NORMAL)
2359 return;
2362 /* Fuse all partitions. */
2363 for (i = 1; partitions->iterate (i, &partition); ++i)
2365 partition_merge_into (NULL, a, partition, FUSE_FINALIZE);
2366 partition_free (partition);
2368 partitions->truncate (1);
2371 /* Distributes the code from LOOP in such a way that producer statements
2372 are placed before consumer statements. Tries to separate only the
2373 statements from STMTS into separate loops. Returns the number of
2374 distributed loops. Set NB_CALLS to number of generated builtin calls.
2375 Set *DESTROY_P to whether LOOP needs to be destroyed. */
2377 static int
2378 distribute_loop (struct loop *loop, vec<gimple *> stmts,
2379 control_dependences *cd, int *nb_calls, bool *destroy_p)
2381 ddrs_table = new hash_table<ddr_hasher> (389);
2382 struct graph *rdg;
2383 partition *partition;
2384 bool any_builtin;
2385 int i, nbp;
2387 *destroy_p = false;
2388 *nb_calls = 0;
2389 loop_nest.create (0);
2390 if (!find_loop_nest (loop, &loop_nest))
2392 loop_nest.release ();
2393 delete ddrs_table;
2394 return 0;
2397 datarefs_vec.create (20);
2398 rdg = build_rdg (loop, cd);
2399 if (!rdg)
2401 if (dump_file && (dump_flags & TDF_DETAILS))
2402 fprintf (dump_file,
2403 "Loop %d not distributed: failed to build the RDG.\n",
2404 loop->num);
2406 loop_nest.release ();
2407 free_data_refs (datarefs_vec);
2408 delete ddrs_table;
2409 return 0;
2412 if (datarefs_vec.length () > MAX_DATAREFS_NUM)
2414 if (dump_file && (dump_flags & TDF_DETAILS))
2415 fprintf (dump_file,
2416 "Loop %d not distributed: too many memory references.\n",
2417 loop->num);
2419 free_rdg (rdg);
2420 loop_nest.release ();
2421 free_data_refs (datarefs_vec);
2422 delete ddrs_table;
2423 return 0;
2426 data_reference_p dref;
2427 for (i = 0; datarefs_vec.iterate (i, &dref); ++i)
2428 dref->aux = (void *) (uintptr_t) i;
2430 if (dump_file && (dump_flags & TDF_DETAILS))
2431 dump_rdg (dump_file, rdg);
2433 auto_vec<struct partition *, 3> partitions;
2434 rdg_build_partitions (rdg, stmts, &partitions);
2436 auto_vec<ddr_p> alias_ddrs;
2438 auto_bitmap stmt_in_all_partitions;
2439 bitmap_copy (stmt_in_all_partitions, partitions[0]->stmts);
2440 for (i = 1; partitions.iterate (i, &partition); ++i)
2441 bitmap_and_into (stmt_in_all_partitions, partitions[i]->stmts);
2443 any_builtin = false;
2444 FOR_EACH_VEC_ELT (partitions, i, partition)
2446 classify_partition (loop, rdg, partition, stmt_in_all_partitions);
2447 any_builtin |= partition_builtin_p (partition);
2450 /* If we are only distributing patterns but did not detect any,
2451 simply bail out. */
2452 if (!flag_tree_loop_distribution
2453 && !any_builtin)
2455 nbp = 0;
2456 goto ldist_done;
2459 /* If we are only distributing patterns fuse all partitions that
2460 were not classified as builtins. This also avoids chopping
2461 a loop into pieces, separated by builtin calls. That is, we
2462 only want no or a single loop body remaining. */
2463 struct partition *into;
2464 if (!flag_tree_loop_distribution)
2466 for (i = 0; partitions.iterate (i, &into); ++i)
2467 if (!partition_builtin_p (into))
2468 break;
2469 for (++i; partitions.iterate (i, &partition); ++i)
2470 if (!partition_builtin_p (partition))
2472 partition_merge_into (NULL, into, partition, FUSE_NON_BUILTIN);
2473 partitions.unordered_remove (i);
2474 partition_free (partition);
2475 i--;
2479 /* Due to limitations in the transform phase we have to fuse all
2480 reduction partitions into the last partition so the existing
2481 loop will contain all loop-closed PHI nodes. */
2482 for (i = 0; partitions.iterate (i, &into); ++i)
2483 if (partition_reduction_p (into))
2484 break;
2485 for (i = i + 1; partitions.iterate (i, &partition); ++i)
2486 if (partition_reduction_p (partition))
2488 partition_merge_into (rdg, into, partition, FUSE_REDUCTION);
2489 partitions.unordered_remove (i);
2490 partition_free (partition);
2491 i--;
2494 /* Apply our simple cost model - fuse partitions with similar
2495 memory accesses. */
2496 for (i = 0; partitions.iterate (i, &into); ++i)
2498 bool changed = false;
2499 if (partition_builtin_p (into))
2500 continue;
2501 for (int j = i + 1;
2502 partitions.iterate (j, &partition); ++j)
2504 if (share_memory_accesses (rdg, into, partition))
2506 partition_merge_into (rdg, into, partition, FUSE_SHARE_REF);
2507 partitions.unordered_remove (j);
2508 partition_free (partition);
2509 j--;
2510 changed = true;
2513 /* If we fused 0 1 2 in step 1 to 0,2 1 as 0 and 2 have similar
2514 accesses when 1 and 2 have similar accesses but not 0 and 1
2515 then in the next iteration we will fail to consider merging
2516 1 into 0,2. So try again if we did any merging into 0. */
2517 if (changed)
2518 i--;
2521 /* Build the partition dependency graph. */
2522 if (partitions.length () > 1)
2524 merge_dep_scc_partitions (rdg, &partitions);
2525 alias_ddrs.truncate (0);
2526 if (partitions.length () > 1)
2527 break_alias_scc_partitions (rdg, &partitions, &alias_ddrs);
2530 finalize_partitions (&partitions, &alias_ddrs);
2532 nbp = partitions.length ();
2533 if (nbp == 0
2534 || (nbp == 1 && !partition_builtin_p (partitions[0]))
2535 || (nbp > 1 && partition_contains_all_rw (rdg, partitions)))
2537 nbp = 0;
2538 goto ldist_done;
2541 if (version_for_distribution_p (&partitions, &alias_ddrs))
2542 version_loop_by_alias_check (loop, &alias_ddrs);
2544 if (dump_file && (dump_flags & TDF_DETAILS))
2546 fprintf (dump_file,
2547 "distribute loop <%d> into partitions:\n", loop->num);
2548 dump_rdg_partitions (dump_file, partitions);
2551 FOR_EACH_VEC_ELT (partitions, i, partition)
2553 if (partition_builtin_p (partition))
2554 (*nb_calls)++;
2555 *destroy_p |= generate_code_for_partition (loop, partition, i < nbp - 1);
2558 ldist_done:
2559 loop_nest.release ();
2560 free_data_refs (datarefs_vec);
2561 for (hash_table<ddr_hasher>::iterator iter = ddrs_table->begin ();
2562 iter != ddrs_table->end (); ++iter)
2564 free_dependence_relation (*iter);
2565 *iter = NULL;
2567 delete ddrs_table;
2569 FOR_EACH_VEC_ELT (partitions, i, partition)
2570 partition_free (partition);
2572 free_rdg (rdg);
2573 return nbp - *nb_calls;
2576 /* Distribute all loops in the current function. */
2578 namespace {
2580 const pass_data pass_data_loop_distribution =
2582 GIMPLE_PASS, /* type */
2583 "ldist", /* name */
2584 OPTGROUP_LOOP, /* optinfo_flags */
2585 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
2586 ( PROP_cfg | PROP_ssa ), /* properties_required */
2587 0, /* properties_provided */
2588 0, /* properties_destroyed */
2589 0, /* todo_flags_start */
2590 0, /* todo_flags_finish */
2593 class pass_loop_distribution : public gimple_opt_pass
2595 public:
2596 pass_loop_distribution (gcc::context *ctxt)
2597 : gimple_opt_pass (pass_data_loop_distribution, ctxt)
2600 /* opt_pass methods: */
2601 virtual bool gate (function *)
2603 return flag_tree_loop_distribution
2604 || flag_tree_loop_distribute_patterns;
2607 virtual unsigned int execute (function *);
2609 }; // class pass_loop_distribution
2611 unsigned int
2612 pass_loop_distribution::execute (function *fun)
2614 struct loop *loop;
2615 bool changed = false;
2616 basic_block bb;
2617 control_dependences *cd = NULL;
2618 auto_vec<loop_p> loops_to_be_destroyed;
2620 if (number_of_loops (fun) <= 1)
2621 return 0;
2623 /* Compute topological order for basic blocks. Topological order is
2624 needed because data dependence is computed for data references in
2625 lexicographical order. */
2626 if (bb_top_order_index == NULL)
2628 int rpo_num;
2629 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
2631 bb_top_order_index = XNEWVEC (int, last_basic_block_for_fn (cfun));
2632 bb_top_order_index_size = last_basic_block_for_fn (cfun);
2633 rpo_num = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, true);
2634 for (int i = 0; i < rpo_num; i++)
2635 bb_top_order_index[rpo[i]] = i;
2637 free (rpo);
2640 FOR_ALL_BB_FN (bb, fun)
2642 gimple_stmt_iterator gsi;
2643 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2644 gimple_set_uid (gsi_stmt (gsi), -1);
2645 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2646 gimple_set_uid (gsi_stmt (gsi), -1);
2649 /* We can at the moment only distribute non-nested loops, thus restrict
2650 walking to innermost loops. */
2651 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
2653 auto_vec<gimple *> work_list;
2654 basic_block *bbs;
2655 int num = loop->num;
2656 unsigned int i;
2658 /* If the loop doesn't have a single exit we will fail anyway,
2659 so do that early. */
2660 if (!single_exit (loop))
2661 continue;
2663 /* Only optimize hot loops. */
2664 if (!optimize_loop_for_speed_p (loop))
2665 continue;
2667 /* Don't distribute loop if niters is unknown. */
2668 tree niters = number_of_latch_executions (loop);
2669 if (niters == NULL_TREE || niters == chrec_dont_know)
2670 continue;
2672 /* Initialize the worklist with stmts we seed the partitions with. */
2673 bbs = get_loop_body_in_dom_order (loop);
2674 for (i = 0; i < loop->num_nodes; ++i)
2676 for (gphi_iterator gsi = gsi_start_phis (bbs[i]);
2677 !gsi_end_p (gsi);
2678 gsi_next (&gsi))
2680 gphi *phi = gsi.phi ();
2681 if (virtual_operand_p (gimple_phi_result (phi)))
2682 continue;
2683 /* Distribute stmts which have defs that are used outside of
2684 the loop. */
2685 if (!stmt_has_scalar_dependences_outside_loop (loop, phi))
2686 continue;
2687 work_list.safe_push (phi);
2689 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
2690 !gsi_end_p (gsi);
2691 gsi_next (&gsi))
2693 gimple *stmt = gsi_stmt (gsi);
2695 /* If there is a stmt with side-effects bail out - we
2696 cannot and should not distribute this loop. */
2697 if (gimple_has_side_effects (stmt))
2699 work_list.truncate (0);
2700 goto out;
2703 /* Distribute stmts which have defs that are used outside of
2704 the loop. */
2705 if (stmt_has_scalar_dependences_outside_loop (loop, stmt))
2707 /* Otherwise only distribute stores for now. */
2708 else if (!gimple_vdef (stmt))
2709 continue;
2711 work_list.safe_push (stmt);
2714 out:
2715 free (bbs);
2717 int nb_generated_loops = 0;
2718 int nb_generated_calls = 0;
2719 location_t loc = find_loop_location (loop);
2720 if (work_list.length () > 0)
2722 if (!cd)
2724 calculate_dominance_info (CDI_DOMINATORS);
2725 calculate_dominance_info (CDI_POST_DOMINATORS);
2726 cd = new control_dependences ();
2727 free_dominance_info (CDI_POST_DOMINATORS);
2729 bool destroy_p;
2730 nb_generated_loops = distribute_loop (loop, work_list, cd,
2731 &nb_generated_calls,
2732 &destroy_p);
2733 if (destroy_p)
2734 loops_to_be_destroyed.safe_push (loop);
2737 if (nb_generated_loops + nb_generated_calls > 0)
2739 changed = true;
2740 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
2741 loc, "Loop %d distributed: split to %d loops "
2742 "and %d library calls.\n",
2743 num, nb_generated_loops, nb_generated_calls);
2745 else if (dump_file && (dump_flags & TDF_DETAILS))
2746 fprintf (dump_file, "Loop %d is the same.\n", num);
2749 if (cd)
2750 delete cd;
2752 if (bb_top_order_index != NULL)
2754 free (bb_top_order_index);
2755 bb_top_order_index = NULL;
2756 bb_top_order_index_size = 0;
2759 if (changed)
2761 /* Destroy loop bodies that could not be reused. Do this late as we
2762 otherwise can end up refering to stale data in control dependences. */
2763 unsigned i;
2764 FOR_EACH_VEC_ELT (loops_to_be_destroyed, i, loop)
2765 destroy_loop (loop);
2767 /* Cached scalar evolutions now may refer to wrong or non-existing
2768 loops. */
2769 scev_reset_htab ();
2770 mark_virtual_operands_for_renaming (fun);
2771 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
2774 checking_verify_loop_structure ();
2776 return 0;
2779 } // anon namespace
2781 gimple_opt_pass *
2782 make_pass_loop_distribution (gcc::context *ctxt)
2784 return new pass_loop_distribution (ctxt);