1 /* Expansion pass for OMP directives. Outlines regions of certain OMP
2 directives to separate functions, converts others into explicit calls to the
3 runtime library (libgomp) and so forth
5 Copyright (C) 2005-2017 Free Software Foundation, Inc.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
33 #include "tree-pass.h"
37 #include "pretty-print.h"
38 #include "diagnostic-core.h"
39 #include "fold-const.h"
40 #include "stor-layout.h"
42 #include "internal-fn.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-walk.h"
48 #include "tree-into-ssa.h"
50 #include "splay-tree.h"
52 #include "omp-general.h"
53 #include "omp-offload.h"
54 #include "tree-cfgcleanup.h"
55 #include "symbol-summary.h"
57 #include "gomp-constants.h"
58 #include "gimple-pretty-print.h"
59 #include "hsa-common.h"
63 /* OMP region information. Every parallel and workshare
64 directive is enclosed between two markers, the OMP_* directive
65 and a corresponding GIMPLE_OMP_RETURN statement. */
69 /* The enclosing region. */
70 struct omp_region
*outer
;
72 /* First child region. */
73 struct omp_region
*inner
;
75 /* Next peer region. */
76 struct omp_region
*next
;
78 /* Block containing the omp directive as its last stmt. */
81 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
84 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
87 /* If this is a combined parallel+workshare region, this is a list
88 of additional arguments needed by the combined parallel+workshare
90 vec
<tree
, va_gc
> *ws_args
;
92 /* The code for the omp directive of this region. */
93 enum gimple_code type
;
95 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
96 enum omp_clause_schedule_kind sched_kind
;
98 /* Schedule modifiers. */
99 unsigned char sched_modifiers
;
101 /* True if this is a combined parallel+workshare region. */
102 bool is_combined_parallel
;
104 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
106 gomp_ordered
*ord_stmt
;
109 static struct omp_region
*root_omp_region
;
110 static bool omp_any_child_fn_dumped
;
112 static void expand_omp_build_assign (gimple_stmt_iterator
*, tree
, tree
,
114 static gphi
*find_phi_with_arg_on_edge (tree
, edge
);
115 static void expand_omp (struct omp_region
*region
);
117 /* Return true if REGION is a combined parallel+workshare region. */
120 is_combined_parallel (struct omp_region
*region
)
122 return region
->is_combined_parallel
;
125 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
126 is the immediate dominator of PAR_ENTRY_BB, return true if there
127 are no data dependencies that would prevent expanding the parallel
128 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
130 When expanding a combined parallel+workshare region, the call to
131 the child function may need additional arguments in the case of
132 GIMPLE_OMP_FOR regions. In some cases, these arguments are
133 computed out of variables passed in from the parent to the child
134 via 'struct .omp_data_s'. For instance:
136 #pragma omp parallel for schedule (guided, i * 4)
141 # BLOCK 2 (PAR_ENTRY_BB)
143 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
145 # BLOCK 3 (WS_ENTRY_BB)
146 .omp_data_i = &.omp_data_o;
147 D.1667 = .omp_data_i->i;
149 #pragma omp for schedule (guided, D.1598)
151 When we outline the parallel region, the call to the child function
152 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
153 that value is computed *after* the call site. So, in principle we
154 cannot do the transformation.
156 To see whether the code in WS_ENTRY_BB blocks the combined
157 parallel+workshare call, we collect all the variables used in the
158 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
159 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
162 FIXME. If we had the SSA form built at this point, we could merely
163 hoist the code in block 3 into block 2 and be done with it. But at
164 this point we don't have dataflow information and though we could
165 hack something up here, it is really not worth the aggravation. */
168 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
170 struct omp_for_data fd
;
171 gimple
*ws_stmt
= last_stmt (ws_entry_bb
);
173 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
176 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
178 omp_extract_for_data (as_a
<gomp_for
*> (ws_stmt
), &fd
, NULL
);
180 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
182 if (fd
.iter_type
!= long_integer_type_node
)
185 /* FIXME. We give up too easily here. If any of these arguments
186 are not constants, they will likely involve variables that have
187 been mapped into fields of .omp_data_s for sharing with the child
188 function. With appropriate data flow, it would be possible to
190 if (!is_gimple_min_invariant (fd
.loop
.n1
)
191 || !is_gimple_min_invariant (fd
.loop
.n2
)
192 || !is_gimple_min_invariant (fd
.loop
.step
)
193 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
199 /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
200 presence (SIMD_SCHEDULE). */
203 omp_adjust_chunk_size (tree chunk_size
, bool simd_schedule
)
208 int vf
= omp_max_vf ();
212 tree type
= TREE_TYPE (chunk_size
);
213 chunk_size
= fold_build2 (PLUS_EXPR
, type
, chunk_size
,
214 build_int_cst (type
, vf
- 1));
215 return fold_build2 (BIT_AND_EXPR
, type
, chunk_size
,
216 build_int_cst (type
, -vf
));
219 /* Collect additional arguments needed to emit a combined
220 parallel+workshare call. WS_STMT is the workshare directive being
223 static vec
<tree
, va_gc
> *
224 get_ws_args_for (gimple
*par_stmt
, gimple
*ws_stmt
)
227 location_t loc
= gimple_location (ws_stmt
);
228 vec
<tree
, va_gc
> *ws_args
;
230 if (gomp_for
*for_stmt
= dyn_cast
<gomp_for
*> (ws_stmt
))
232 struct omp_for_data fd
;
235 omp_extract_for_data (for_stmt
, &fd
, NULL
);
239 if (gimple_omp_for_combined_into_p (for_stmt
))
242 = omp_find_clause (gimple_omp_parallel_clauses (par_stmt
),
243 OMP_CLAUSE__LOOPTEMP_
);
245 n1
= OMP_CLAUSE_DECL (innerc
);
246 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
247 OMP_CLAUSE__LOOPTEMP_
);
249 n2
= OMP_CLAUSE_DECL (innerc
);
252 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
254 t
= fold_convert_loc (loc
, long_integer_type_node
, n1
);
255 ws_args
->quick_push (t
);
257 t
= fold_convert_loc (loc
, long_integer_type_node
, n2
);
258 ws_args
->quick_push (t
);
260 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
261 ws_args
->quick_push (t
);
265 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
266 t
= omp_adjust_chunk_size (t
, fd
.simd_schedule
);
267 ws_args
->quick_push (t
);
272 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
274 /* Number of sections is equal to the number of edges from the
275 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
276 the exit of the sections region. */
277 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
278 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
279 vec_alloc (ws_args
, 1);
280 ws_args
->quick_push (t
);
287 /* Discover whether REGION is a combined parallel+workshare region. */
290 determine_parallel_type (struct omp_region
*region
)
292 basic_block par_entry_bb
, par_exit_bb
;
293 basic_block ws_entry_bb
, ws_exit_bb
;
295 if (region
== NULL
|| region
->inner
== NULL
296 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
297 || region
->inner
->cont
== NULL
)
300 /* We only support parallel+for and parallel+sections. */
301 if (region
->type
!= GIMPLE_OMP_PARALLEL
302 || (region
->inner
->type
!= GIMPLE_OMP_FOR
303 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
306 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
307 WS_EXIT_BB -> PAR_EXIT_BB. */
308 par_entry_bb
= region
->entry
;
309 par_exit_bb
= region
->exit
;
310 ws_entry_bb
= region
->inner
->entry
;
311 ws_exit_bb
= region
->inner
->exit
;
313 if (single_succ (par_entry_bb
) == ws_entry_bb
314 && single_succ (ws_exit_bb
) == par_exit_bb
315 && workshare_safe_to_combine_p (ws_entry_bb
)
316 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
317 || (last_and_only_stmt (ws_entry_bb
)
318 && last_and_only_stmt (par_exit_bb
))))
320 gimple
*par_stmt
= last_stmt (par_entry_bb
);
321 gimple
*ws_stmt
= last_stmt (ws_entry_bb
);
323 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
325 /* If this is a combined parallel loop, we need to determine
326 whether or not to use the combined library calls. There
327 are two cases where we do not apply the transformation:
328 static loops and any kind of ordered loop. In the first
329 case, we already open code the loop so there is no need
330 to do anything else. In the latter case, the combined
331 parallel loop call would still need extra synchronization
332 to implement ordered semantics, so there would not be any
333 gain in using the combined call. */
334 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
335 tree c
= omp_find_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
337 || ((OMP_CLAUSE_SCHEDULE_KIND (c
) & OMP_CLAUSE_SCHEDULE_MASK
)
338 == OMP_CLAUSE_SCHEDULE_STATIC
)
339 || omp_find_clause (clauses
, OMP_CLAUSE_ORDERED
))
341 region
->is_combined_parallel
= false;
342 region
->inner
->is_combined_parallel
= false;
347 region
->is_combined_parallel
= true;
348 region
->inner
->is_combined_parallel
= true;
349 region
->ws_args
= get_ws_args_for (par_stmt
, ws_stmt
);
353 /* Debugging dumps for parallel regions. */
354 void dump_omp_region (FILE *, struct omp_region
*, int);
355 void debug_omp_region (struct omp_region
*);
356 void debug_all_omp_regions (void);
358 /* Dump the parallel region tree rooted at REGION. */
361 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
363 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
364 gimple_code_name
[region
->type
]);
367 dump_omp_region (file
, region
->inner
, indent
+ 4);
371 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
372 region
->cont
->index
);
376 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
377 region
->exit
->index
);
379 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
382 dump_omp_region (file
, region
->next
, indent
);
386 debug_omp_region (struct omp_region
*region
)
388 dump_omp_region (stderr
, region
, 0);
392 debug_all_omp_regions (void)
394 dump_omp_region (stderr
, root_omp_region
, 0);
397 /* Create a new parallel region starting at STMT inside region PARENT. */
399 static struct omp_region
*
400 new_omp_region (basic_block bb
, enum gimple_code type
,
401 struct omp_region
*parent
)
403 struct omp_region
*region
= XCNEW (struct omp_region
);
405 region
->outer
= parent
;
411 /* This is a nested region. Add it to the list of inner
412 regions in PARENT. */
413 region
->next
= parent
->inner
;
414 parent
->inner
= region
;
418 /* This is a toplevel region. Add it to the list of toplevel
419 regions in ROOT_OMP_REGION. */
420 region
->next
= root_omp_region
;
421 root_omp_region
= region
;
427 /* Release the memory associated with the region tree rooted at REGION. */
430 free_omp_region_1 (struct omp_region
*region
)
432 struct omp_region
*i
, *n
;
434 for (i
= region
->inner
; i
; i
= n
)
437 free_omp_region_1 (i
);
443 /* Release the memory for the entire omp region tree. */
446 omp_free_regions (void)
448 struct omp_region
*r
, *n
;
449 for (r
= root_omp_region
; r
; r
= n
)
452 free_omp_region_1 (r
);
454 root_omp_region
= NULL
;
457 /* A convenience function to build an empty GIMPLE_COND with just the
461 gimple_build_cond_empty (tree cond
)
463 enum tree_code pred_code
;
466 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
467 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
470 /* Return true if a parallel REGION is within a declare target function or
471 within a target region and is not a part of a gridified target. */
474 parallel_needs_hsa_kernel_p (struct omp_region
*region
)
476 bool indirect
= false;
477 for (region
= region
->outer
; region
; region
= region
->outer
)
479 if (region
->type
== GIMPLE_OMP_PARALLEL
)
481 else if (region
->type
== GIMPLE_OMP_TARGET
)
483 gomp_target
*tgt_stmt
484 = as_a
<gomp_target
*> (last_stmt (region
->entry
));
486 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt
),
487 OMP_CLAUSE__GRIDDIM_
))
494 if (lookup_attribute ("omp declare target",
495 DECL_ATTRIBUTES (current_function_decl
)))
501 /* Build the function calls to GOMP_parallel_start etc to actually
502 generate the parallel operation. REGION is the parallel region
503 being expanded. BB is the block where to insert the code. WS_ARGS
504 will be set if this is a call to a combined parallel+workshare
505 construct, it contains the list of additional arguments needed by
506 the workshare construct. */
509 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
510 gomp_parallel
*entry_stmt
,
511 vec
<tree
, va_gc
> *ws_args
)
513 tree t
, t1
, t2
, val
, cond
, c
, clauses
, flags
;
514 gimple_stmt_iterator gsi
;
516 enum built_in_function start_ix
;
518 location_t clause_loc
;
519 vec
<tree
, va_gc
> *args
;
521 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
523 /* Determine what flavor of GOMP_parallel we will be
525 start_ix
= BUILT_IN_GOMP_PARALLEL
;
526 if (is_combined_parallel (region
))
528 switch (region
->inner
->type
)
531 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
532 switch (region
->inner
->sched_kind
)
534 case OMP_CLAUSE_SCHEDULE_RUNTIME
:
537 case OMP_CLAUSE_SCHEDULE_DYNAMIC
:
538 case OMP_CLAUSE_SCHEDULE_GUIDED
:
539 if (region
->inner
->sched_modifiers
540 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC
)
542 start_ix2
= 3 + region
->inner
->sched_kind
;
547 start_ix2
= region
->inner
->sched_kind
;
550 start_ix2
+= (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
;
551 start_ix
= (enum built_in_function
) start_ix2
;
553 case GIMPLE_OMP_SECTIONS
:
554 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS
;
561 /* By default, the value of NUM_THREADS is zero (selected at run time)
562 and there is no conditional. */
564 val
= build_int_cst (unsigned_type_node
, 0);
565 flags
= build_int_cst (unsigned_type_node
, 0);
567 c
= omp_find_clause (clauses
, OMP_CLAUSE_IF
);
569 cond
= OMP_CLAUSE_IF_EXPR (c
);
571 c
= omp_find_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
574 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
575 clause_loc
= OMP_CLAUSE_LOCATION (c
);
578 clause_loc
= gimple_location (entry_stmt
);
580 c
= omp_find_clause (clauses
, OMP_CLAUSE_PROC_BIND
);
582 flags
= build_int_cst (unsigned_type_node
, OMP_CLAUSE_PROC_BIND_KIND (c
));
584 /* Ensure 'val' is of the correct type. */
585 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
587 /* If we found the clause 'if (cond)', build either
588 (cond != 0) or (cond ? val : 1u). */
591 cond
= gimple_boolify (cond
);
593 if (integer_zerop (val
))
594 val
= fold_build2_loc (clause_loc
,
595 EQ_EXPR
, unsigned_type_node
, cond
,
596 build_int_cst (TREE_TYPE (cond
), 0));
599 basic_block cond_bb
, then_bb
, else_bb
;
600 edge e
, e_then
, e_else
;
601 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
603 tmp_var
= create_tmp_var (TREE_TYPE (val
));
604 if (gimple_in_ssa_p (cfun
))
606 tmp_then
= make_ssa_name (tmp_var
);
607 tmp_else
= make_ssa_name (tmp_var
);
608 tmp_join
= make_ssa_name (tmp_var
);
617 e
= split_block_after_labels (bb
);
622 then_bb
= create_empty_bb (cond_bb
);
623 else_bb
= create_empty_bb (then_bb
);
624 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
625 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
627 stmt
= gimple_build_cond_empty (cond
);
628 gsi
= gsi_start_bb (cond_bb
);
629 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
631 gsi
= gsi_start_bb (then_bb
);
632 expand_omp_build_assign (&gsi
, tmp_then
, val
, true);
634 gsi
= gsi_start_bb (else_bb
);
635 expand_omp_build_assign (&gsi
, tmp_else
,
636 build_int_cst (unsigned_type_node
, 1),
639 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
640 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
641 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
642 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
643 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
644 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
646 if (gimple_in_ssa_p (cfun
))
648 gphi
*phi
= create_phi_node (tmp_join
, bb
);
649 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
650 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
656 gsi
= gsi_start_bb (bb
);
657 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
658 false, GSI_CONTINUE_LINKING
);
661 gsi
= gsi_last_bb (bb
);
662 t
= gimple_omp_parallel_data_arg (entry_stmt
);
664 t1
= null_pointer_node
;
666 t1
= build_fold_addr_expr (t
);
667 tree child_fndecl
= gimple_omp_parallel_child_fn (entry_stmt
);
668 t2
= build_fold_addr_expr (child_fndecl
);
670 vec_alloc (args
, 4 + vec_safe_length (ws_args
));
671 args
->quick_push (t2
);
672 args
->quick_push (t1
);
673 args
->quick_push (val
);
675 args
->splice (*ws_args
);
676 args
->quick_push (flags
);
678 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
679 builtin_decl_explicit (start_ix
), args
);
681 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
682 false, GSI_CONTINUE_LINKING
);
684 if (hsa_gen_requested_p ()
685 && parallel_needs_hsa_kernel_p (region
))
687 cgraph_node
*child_cnode
= cgraph_node::get (child_fndecl
);
688 hsa_register_kernel (child_cnode
);
692 /* Insert a function call whose name is FUNC_NAME with the information from
693 ENTRY_STMT into the basic_block BB. */
696 expand_cilk_for_call (basic_block bb
, gomp_parallel
*entry_stmt
,
697 vec
<tree
, va_gc
> *ws_args
)
700 gimple_stmt_iterator gsi
;
701 vec
<tree
, va_gc
> *args
;
703 gcc_assert (vec_safe_length (ws_args
) == 2);
704 tree func_name
= (*ws_args
)[0];
705 tree grain
= (*ws_args
)[1];
707 tree clauses
= gimple_omp_parallel_clauses (entry_stmt
);
708 tree count
= omp_find_clause (clauses
, OMP_CLAUSE__CILK_FOR_COUNT_
);
709 gcc_assert (count
!= NULL_TREE
);
710 count
= OMP_CLAUSE_OPERAND (count
, 0);
712 gsi
= gsi_last_bb (bb
);
713 t
= gimple_omp_parallel_data_arg (entry_stmt
);
715 t1
= null_pointer_node
;
717 t1
= build_fold_addr_expr (t
);
718 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
721 args
->quick_push (t2
);
722 args
->quick_push (t1
);
723 args
->quick_push (count
);
724 args
->quick_push (grain
);
725 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
, func_name
, args
);
727 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, false,
728 GSI_CONTINUE_LINKING
);
731 /* Build the function call to GOMP_task to actually
732 generate the task operation. BB is the block where to insert the code. */
735 expand_task_call (struct omp_region
*region
, basic_block bb
,
736 gomp_task
*entry_stmt
)
739 gimple_stmt_iterator gsi
;
740 location_t loc
= gimple_location (entry_stmt
);
742 tree clauses
= gimple_omp_task_clauses (entry_stmt
);
744 tree ifc
= omp_find_clause (clauses
, OMP_CLAUSE_IF
);
745 tree untied
= omp_find_clause (clauses
, OMP_CLAUSE_UNTIED
);
746 tree mergeable
= omp_find_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
747 tree depend
= omp_find_clause (clauses
, OMP_CLAUSE_DEPEND
);
748 tree finalc
= omp_find_clause (clauses
, OMP_CLAUSE_FINAL
);
749 tree priority
= omp_find_clause (clauses
, OMP_CLAUSE_PRIORITY
);
752 = (untied
? GOMP_TASK_FLAG_UNTIED
: 0)
753 | (mergeable
? GOMP_TASK_FLAG_MERGEABLE
: 0)
754 | (depend
? GOMP_TASK_FLAG_DEPEND
: 0);
756 bool taskloop_p
= gimple_omp_task_taskloop_p (entry_stmt
);
757 tree startvar
= NULL_TREE
, endvar
= NULL_TREE
, step
= NULL_TREE
;
758 tree num_tasks
= NULL_TREE
;
762 gimple
*g
= last_stmt (region
->outer
->entry
);
763 gcc_assert (gimple_code (g
) == GIMPLE_OMP_FOR
764 && gimple_omp_for_kind (g
) == GF_OMP_FOR_KIND_TASKLOOP
);
765 struct omp_for_data fd
;
766 omp_extract_for_data (as_a
<gomp_for
*> (g
), &fd
, NULL
);
767 startvar
= omp_find_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
768 endvar
= omp_find_clause (OMP_CLAUSE_CHAIN (startvar
),
769 OMP_CLAUSE__LOOPTEMP_
);
770 startvar
= OMP_CLAUSE_DECL (startvar
);
771 endvar
= OMP_CLAUSE_DECL (endvar
);
772 step
= fold_convert_loc (loc
, fd
.iter_type
, fd
.loop
.step
);
773 if (fd
.loop
.cond_code
== LT_EXPR
)
774 iflags
|= GOMP_TASK_FLAG_UP
;
775 tree tclauses
= gimple_omp_for_clauses (g
);
776 num_tasks
= omp_find_clause (tclauses
, OMP_CLAUSE_NUM_TASKS
);
778 num_tasks
= OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks
);
781 num_tasks
= omp_find_clause (tclauses
, OMP_CLAUSE_GRAINSIZE
);
784 iflags
|= GOMP_TASK_FLAG_GRAINSIZE
;
785 num_tasks
= OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks
);
788 num_tasks
= integer_zero_node
;
790 num_tasks
= fold_convert_loc (loc
, long_integer_type_node
, num_tasks
);
791 if (ifc
== NULL_TREE
)
792 iflags
|= GOMP_TASK_FLAG_IF
;
793 if (omp_find_clause (tclauses
, OMP_CLAUSE_NOGROUP
))
794 iflags
|= GOMP_TASK_FLAG_NOGROUP
;
795 ull
= fd
.iter_type
== long_long_unsigned_type_node
;
798 iflags
|= GOMP_TASK_FLAG_PRIORITY
;
800 tree flags
= build_int_cst (unsigned_type_node
, iflags
);
802 tree cond
= boolean_true_node
;
807 tree t
= gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc
));
808 t
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, t
,
809 build_int_cst (unsigned_type_node
,
811 build_int_cst (unsigned_type_node
, 0));
812 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
,
816 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc
));
821 tree t
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc
));
822 t
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, t
,
823 build_int_cst (unsigned_type_node
,
824 GOMP_TASK_FLAG_FINAL
),
825 build_int_cst (unsigned_type_node
, 0));
826 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, t
);
829 depend
= OMP_CLAUSE_DECL (depend
);
831 depend
= build_int_cst (ptr_type_node
, 0);
833 priority
= fold_convert (integer_type_node
,
834 OMP_CLAUSE_PRIORITY_EXPR (priority
));
836 priority
= integer_zero_node
;
838 gsi
= gsi_last_bb (bb
);
839 tree t
= gimple_omp_task_data_arg (entry_stmt
);
841 t2
= null_pointer_node
;
843 t2
= build_fold_addr_expr_loc (loc
, t
);
844 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
845 t
= gimple_omp_task_copy_fn (entry_stmt
);
847 t3
= null_pointer_node
;
849 t3
= build_fold_addr_expr_loc (loc
, t
);
852 t
= build_call_expr (ull
853 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL
)
854 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP
),
856 gimple_omp_task_arg_size (entry_stmt
),
857 gimple_omp_task_arg_align (entry_stmt
), flags
,
858 num_tasks
, priority
, startvar
, endvar
, step
);
860 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
862 gimple_omp_task_arg_size (entry_stmt
),
863 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
,
866 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
867 false, GSI_CONTINUE_LINKING
);
870 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
873 vec2chain (vec
<tree
, va_gc
> *v
)
875 tree chain
= NULL_TREE
, t
;
878 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
880 DECL_CHAIN (t
) = chain
;
887 /* Remove barriers in REGION->EXIT's block. Note that this is only
888 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
889 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
890 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
894 remove_exit_barrier (struct omp_region
*region
)
896 gimple_stmt_iterator gsi
;
901 int any_addressable_vars
= -1;
903 exit_bb
= region
->exit
;
905 /* If the parallel region doesn't return, we don't have REGION->EXIT
910 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
911 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
912 statements that can appear in between are extremely limited -- no
913 memory operations at all. Here, we allow nothing at all, so the
914 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
915 gsi
= gsi_last_bb (exit_bb
);
916 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
918 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
921 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
923 gsi
= gsi_last_bb (e
->src
);
926 stmt
= gsi_stmt (gsi
);
927 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
928 && !gimple_omp_return_nowait_p (stmt
))
930 /* OpenMP 3.0 tasks unfortunately prevent this optimization
931 in many cases. If there could be tasks queued, the barrier
932 might be needed to let the tasks run before some local
933 variable of the parallel that the task uses as shared
934 runs out of scope. The task can be spawned either
935 from within current function (this would be easy to check)
936 or from some function it calls and gets passed an address
937 of such a variable. */
938 if (any_addressable_vars
< 0)
940 gomp_parallel
*parallel_stmt
941 = as_a
<gomp_parallel
*> (last_stmt (region
->entry
));
942 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
943 tree local_decls
, block
, decl
;
946 any_addressable_vars
= 0;
947 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
948 if (TREE_ADDRESSABLE (decl
))
950 any_addressable_vars
= 1;
953 for (block
= gimple_block (stmt
);
954 !any_addressable_vars
956 && TREE_CODE (block
) == BLOCK
;
957 block
= BLOCK_SUPERCONTEXT (block
))
959 for (local_decls
= BLOCK_VARS (block
);
961 local_decls
= DECL_CHAIN (local_decls
))
962 if (TREE_ADDRESSABLE (local_decls
))
964 any_addressable_vars
= 1;
967 if (block
== gimple_block (parallel_stmt
))
971 if (!any_addressable_vars
)
972 gimple_omp_return_set_nowait (stmt
);
978 remove_exit_barriers (struct omp_region
*region
)
980 if (region
->type
== GIMPLE_OMP_PARALLEL
)
981 remove_exit_barrier (region
);
985 region
= region
->inner
;
986 remove_exit_barriers (region
);
989 region
= region
->next
;
990 remove_exit_barriers (region
);
995 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
996 calls. These can't be declared as const functions, but
997 within one parallel body they are constant, so they can be
998 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
999 which are declared const. Similarly for task body, except
1000 that in untied task omp_get_thread_num () can change at any task
1001 scheduling point. */
1004 optimize_omp_library_calls (gimple
*entry_stmt
)
1007 gimple_stmt_iterator gsi
;
1008 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
1009 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
1010 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
1011 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
1012 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
1013 && omp_find_clause (gimple_omp_task_clauses (entry_stmt
),
1014 OMP_CLAUSE_UNTIED
) != NULL
);
1016 FOR_EACH_BB_FN (bb
, cfun
)
1017 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1019 gimple
*call
= gsi_stmt (gsi
);
1022 if (is_gimple_call (call
)
1023 && (decl
= gimple_call_fndecl (call
))
1024 && DECL_EXTERNAL (decl
)
1025 && TREE_PUBLIC (decl
)
1026 && DECL_INITIAL (decl
) == NULL
)
1030 if (DECL_NAME (decl
) == thr_num_id
)
1032 /* In #pragma omp task untied omp_get_thread_num () can change
1033 during the execution of the task region. */
1036 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
1038 else if (DECL_NAME (decl
) == num_thr_id
)
1039 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
1043 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
1044 || gimple_call_num_args (call
) != 0)
1047 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
1050 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
1051 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
1052 TREE_TYPE (TREE_TYPE (built_in
))))
1055 gimple_call_set_fndecl (call
, built_in
);
1060 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
1064 expand_omp_regimplify_p (tree
*tp
, int *walk_subtrees
, void *)
1068 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
1069 if (VAR_P (t
) && DECL_HAS_VALUE_EXPR_P (t
))
1072 if (TREE_CODE (t
) == ADDR_EXPR
)
1073 recompute_tree_invariant_for_addr_expr (t
);
1075 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
1079 /* Prepend or append TO = FROM assignment before or after *GSI_P. */
1082 expand_omp_build_assign (gimple_stmt_iterator
*gsi_p
, tree to
, tree from
,
1085 bool simple_p
= DECL_P (to
) && TREE_ADDRESSABLE (to
);
1086 from
= force_gimple_operand_gsi (gsi_p
, from
, simple_p
, NULL_TREE
,
1087 !after
, after
? GSI_CONTINUE_LINKING
1089 gimple
*stmt
= gimple_build_assign (to
, from
);
1091 gsi_insert_after (gsi_p
, stmt
, GSI_CONTINUE_LINKING
);
1093 gsi_insert_before (gsi_p
, stmt
, GSI_SAME_STMT
);
1094 if (walk_tree (&from
, expand_omp_regimplify_p
, NULL
, NULL
)
1095 || walk_tree (&to
, expand_omp_regimplify_p
, NULL
, NULL
))
1097 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1098 gimple_regimplify_operands (stmt
, &gsi
);
1102 /* Expand the OpenMP parallel or task directive starting at REGION. */
1105 expand_omp_taskreg (struct omp_region
*region
)
1107 basic_block entry_bb
, exit_bb
, new_bb
;
1108 struct function
*child_cfun
;
1109 tree child_fn
, block
, t
;
1110 gimple_stmt_iterator gsi
;
1111 gimple
*entry_stmt
, *stmt
;
1113 vec
<tree
, va_gc
> *ws_args
;
1115 entry_stmt
= last_stmt (region
->entry
);
1116 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
1117 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1119 entry_bb
= region
->entry
;
1120 if (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
)
1121 exit_bb
= region
->cont
;
1123 exit_bb
= region
->exit
;
1127 && gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
1128 && omp_find_clause (gimple_omp_parallel_clauses (entry_stmt
),
1129 OMP_CLAUSE__CILK_FOR_COUNT_
) != NULL_TREE
);
1132 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
1133 and the inner statement contains the name of the built-in function
1135 ws_args
= region
->inner
->ws_args
;
1136 else if (is_combined_parallel (region
))
1137 ws_args
= region
->ws_args
;
1141 if (child_cfun
->cfg
)
1143 /* Due to inlining, it may happen that we have already outlined
1144 the region, in which case all we need to do is make the
1145 sub-graph unreachable and emit the parallel call. */
1146 edge entry_succ_e
, exit_succ_e
;
1148 entry_succ_e
= single_succ_edge (entry_bb
);
1150 gsi
= gsi_last_bb (entry_bb
);
1151 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
1152 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
1153 gsi_remove (&gsi
, true);
1158 exit_succ_e
= single_succ_edge (exit_bb
);
1159 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
1161 remove_edge_and_dominated_blocks (entry_succ_e
);
1165 unsigned srcidx
, dstidx
, num
;
1167 /* If the parallel region needs data sent from the parent
1168 function, then the very first statement (except possible
1169 tree profile counter updates) of the parallel body
1170 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
1171 &.OMP_DATA_O is passed as an argument to the child function,
1172 we need to replace it with the argument as seen by the child
1175 In most cases, this will end up being the identity assignment
1176 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
1177 a function call that has been inlined, the original PARM_DECL
1178 .OMP_DATA_I may have been converted into a different local
1179 variable. In which case, we need to keep the assignment. */
1180 if (gimple_omp_taskreg_data_arg (entry_stmt
))
1182 basic_block entry_succ_bb
1183 = single_succ_p (entry_bb
) ? single_succ (entry_bb
)
1184 : FALLTHRU_EDGE (entry_bb
)->dest
;
1186 gimple
*parcopy_stmt
= NULL
;
1188 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
1192 gcc_assert (!gsi_end_p (gsi
));
1193 stmt
= gsi_stmt (gsi
);
1194 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1197 if (gimple_num_ops (stmt
) == 2)
1199 tree arg
= gimple_assign_rhs1 (stmt
);
1201 /* We're ignore the subcode because we're
1202 effectively doing a STRIP_NOPS. */
1204 if (TREE_CODE (arg
) == ADDR_EXPR
1205 && TREE_OPERAND (arg
, 0)
1206 == gimple_omp_taskreg_data_arg (entry_stmt
))
1208 parcopy_stmt
= stmt
;
1214 gcc_assert (parcopy_stmt
!= NULL
);
1215 arg
= DECL_ARGUMENTS (child_fn
);
1217 if (!gimple_in_ssa_p (cfun
))
1219 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
1220 gsi_remove (&gsi
, true);
1223 /* ?? Is setting the subcode really necessary ?? */
1224 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
1225 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
1230 tree lhs
= gimple_assign_lhs (parcopy_stmt
);
1231 gcc_assert (SSA_NAME_VAR (lhs
) == arg
);
1232 /* We'd like to set the rhs to the default def in the child_fn,
1233 but it's too early to create ssa names in the child_fn.
1234 Instead, we set the rhs to the parm. In
1235 move_sese_region_to_fn, we introduce a default def for the
1236 parm, map the parm to it's default def, and once we encounter
1237 this stmt, replace the parm with the default def. */
1238 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
1239 update_stmt (parcopy_stmt
);
1243 /* Declare local variables needed in CHILD_CFUN. */
1244 block
= DECL_INITIAL (child_fn
);
1245 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
1246 /* The gimplifier could record temporaries in parallel/task block
1247 rather than in containing function's local_decls chain,
1248 which would mean cgraph missed finalizing them. Do it now. */
1249 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
1250 if (VAR_P (t
) && TREE_STATIC (t
) && !DECL_EXTERNAL (t
))
1251 varpool_node::finalize_decl (t
);
1252 DECL_SAVED_TREE (child_fn
) = NULL
;
1253 /* We'll create a CFG for child_fn, so no gimple body is needed. */
1254 gimple_set_body (child_fn
, NULL
);
1255 TREE_USED (block
) = 1;
1257 /* Reset DECL_CONTEXT on function arguments. */
1258 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
1259 DECL_CONTEXT (t
) = child_fn
;
1261 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
1262 so that it can be moved to the child function. */
1263 gsi
= gsi_last_bb (entry_bb
);
1264 stmt
= gsi_stmt (gsi
);
1265 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
1266 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
1267 e
= split_block (entry_bb
, stmt
);
1268 gsi_remove (&gsi
, true);
1271 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
1272 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
1275 e2
= make_edge (e
->src
, BRANCH_EDGE (entry_bb
)->dest
, EDGE_ABNORMAL
);
1276 gcc_assert (e2
->dest
== region
->exit
);
1277 remove_edge (BRANCH_EDGE (entry_bb
));
1278 set_immediate_dominator (CDI_DOMINATORS
, e2
->dest
, e
->src
);
1279 gsi
= gsi_last_bb (region
->exit
);
1280 gcc_assert (!gsi_end_p (gsi
)
1281 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
1282 gsi_remove (&gsi
, true);
1285 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
1288 gsi
= gsi_last_bb (exit_bb
);
1289 gcc_assert (!gsi_end_p (gsi
)
1290 && (gimple_code (gsi_stmt (gsi
))
1291 == (e2
? GIMPLE_OMP_CONTINUE
: GIMPLE_OMP_RETURN
)));
1292 stmt
= gimple_build_return (NULL
);
1293 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
1294 gsi_remove (&gsi
, true);
1297 /* Move the parallel region into CHILD_CFUN. */
1299 if (gimple_in_ssa_p (cfun
))
1301 init_tree_ssa (child_cfun
);
1302 init_ssa_operands (child_cfun
);
1303 child_cfun
->gimple_df
->in_ssa_p
= true;
1307 block
= gimple_block (entry_stmt
);
1309 /* Make sure to generate early debug for the function before
1310 outlining anything. */
1311 if (! gimple_in_ssa_p (cfun
))
1312 (*debug_hooks
->early_global_decl
) (cfun
->decl
);
1314 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
1316 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
1319 basic_block dest_bb
= e2
->dest
;
1321 make_edge (new_bb
, dest_bb
, EDGE_FALLTHRU
);
1323 set_immediate_dominator (CDI_DOMINATORS
, dest_bb
, new_bb
);
1325 /* When the OMP expansion process cannot guarantee an up-to-date
1326 loop tree arrange for the child function to fixup loops. */
1327 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
1328 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
1330 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
1331 num
= vec_safe_length (child_cfun
->local_decls
);
1332 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
1334 t
= (*child_cfun
->local_decls
)[srcidx
];
1335 if (DECL_CONTEXT (t
) == cfun
->decl
)
1337 if (srcidx
!= dstidx
)
1338 (*child_cfun
->local_decls
)[dstidx
] = t
;
1342 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
1344 /* Inform the callgraph about the new function. */
1345 child_cfun
->curr_properties
= cfun
->curr_properties
;
1346 child_cfun
->has_simduid_loops
|= cfun
->has_simduid_loops
;
1347 child_cfun
->has_force_vectorize_loops
|= cfun
->has_force_vectorize_loops
;
1348 cgraph_node
*node
= cgraph_node::get_create (child_fn
);
1349 node
->parallelized_function
= 1;
1350 cgraph_node::add_new_function (child_fn
, true);
1352 bool need_asm
= DECL_ASSEMBLER_NAME_SET_P (current_function_decl
)
1353 && !DECL_ASSEMBLER_NAME_SET_P (child_fn
);
1355 /* Fix the callgraph edges for child_cfun. Those for cfun will be
1356 fixed in a following pass. */
1357 push_cfun (child_cfun
);
1359 assign_assembler_name_if_needed (child_fn
);
1362 optimize_omp_library_calls (entry_stmt
);
1363 cgraph_edge::rebuild_edges ();
1365 /* Some EH regions might become dead, see PR34608. If
1366 pass_cleanup_cfg isn't the first pass to happen with the
1367 new child, these dead EH edges might cause problems.
1368 Clean them up now. */
1369 if (flag_exceptions
)
1372 bool changed
= false;
1374 FOR_EACH_BB_FN (bb
, cfun
)
1375 changed
|= gimple_purge_dead_eh_edges (bb
);
1377 cleanup_tree_cfg ();
1379 if (gimple_in_ssa_p (cfun
))
1380 update_ssa (TODO_update_ssa
);
1381 if (flag_checking
&& !loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
1382 verify_loop_structure ();
1385 if (dump_file
&& !gimple_in_ssa_p (cfun
))
1387 omp_any_child_fn_dumped
= true;
1388 dump_function_header (dump_file
, child_fn
, dump_flags
);
1389 dump_function_to_file (child_fn
, dump_file
, dump_flags
);
1393 /* Emit a library call to launch the children threads. */
1395 expand_cilk_for_call (new_bb
,
1396 as_a
<gomp_parallel
*> (entry_stmt
), ws_args
);
1397 else if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
1398 expand_parallel_call (region
, new_bb
,
1399 as_a
<gomp_parallel
*> (entry_stmt
), ws_args
);
1401 expand_task_call (region
, new_bb
, as_a
<gomp_task
*> (entry_stmt
));
1402 if (gimple_in_ssa_p (cfun
))
1403 update_ssa (TODO_update_ssa_only_virtuals
);
1406 /* Information about members of an OpenACC collapsed loop nest. */
1408 struct oacc_collapse
1410 tree base
; /* Base value. */
1411 tree iters
; /* Number of steps. */
1412 tree step
; /* step size. */
1415 /* Helper for expand_oacc_for. Determine collapsed loop information.
1416 Fill in COUNTS array. Emit any initialization code before GSI.
1417 Return the calculated outer loop bound of BOUND_TYPE. */
1420 expand_oacc_collapse_init (const struct omp_for_data
*fd
,
1421 gimple_stmt_iterator
*gsi
,
1422 oacc_collapse
*counts
, tree bound_type
)
1424 tree total
= build_int_cst (bound_type
, 1);
1427 gcc_assert (integer_onep (fd
->loop
.step
));
1428 gcc_assert (integer_zerop (fd
->loop
.n1
));
1430 for (ix
= 0; ix
!= fd
->collapse
; ix
++)
1432 const omp_for_data_loop
*loop
= &fd
->loops
[ix
];
1434 tree iter_type
= TREE_TYPE (loop
->v
);
1435 tree diff_type
= iter_type
;
1436 tree plus_type
= iter_type
;
1438 gcc_assert (loop
->cond_code
== fd
->loop
.cond_code
);
1440 if (POINTER_TYPE_P (iter_type
))
1441 plus_type
= sizetype
;
1442 if (POINTER_TYPE_P (diff_type
) || TYPE_UNSIGNED (diff_type
))
1443 diff_type
= signed_type_for (diff_type
);
1447 tree s
= loop
->step
;
1448 bool up
= loop
->cond_code
== LT_EXPR
;
1449 tree dir
= build_int_cst (diff_type
, up
? +1 : -1);
1453 b
= force_gimple_operand_gsi (gsi
, b
, true, NULL_TREE
,
1454 true, GSI_SAME_STMT
);
1455 e
= force_gimple_operand_gsi (gsi
, e
, true, NULL_TREE
,
1456 true, GSI_SAME_STMT
);
1458 /* Convert the step, avoiding possible unsigned->signed overflow. */
1459 negating
= !up
&& TYPE_UNSIGNED (TREE_TYPE (s
));
1461 s
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (s
), s
);
1462 s
= fold_convert (diff_type
, s
);
1464 s
= fold_build1 (NEGATE_EXPR
, diff_type
, s
);
1465 s
= force_gimple_operand_gsi (gsi
, s
, true, NULL_TREE
,
1466 true, GSI_SAME_STMT
);
1468 /* Determine the range, avoiding possible unsigned->signed overflow. */
1469 negating
= !up
&& TYPE_UNSIGNED (iter_type
);
1470 expr
= fold_build2 (MINUS_EXPR
, plus_type
,
1471 fold_convert (plus_type
, negating
? b
: e
),
1472 fold_convert (plus_type
, negating
? e
: b
));
1473 expr
= fold_convert (diff_type
, expr
);
1475 expr
= fold_build1 (NEGATE_EXPR
, diff_type
, expr
);
1476 tree range
= force_gimple_operand_gsi
1477 (gsi
, expr
, true, NULL_TREE
, true, GSI_SAME_STMT
);
1479 /* Determine number of iterations. */
1480 expr
= fold_build2 (MINUS_EXPR
, diff_type
, range
, dir
);
1481 expr
= fold_build2 (PLUS_EXPR
, diff_type
, expr
, s
);
1482 expr
= fold_build2 (TRUNC_DIV_EXPR
, diff_type
, expr
, s
);
1484 tree iters
= force_gimple_operand_gsi (gsi
, expr
, true, NULL_TREE
,
1485 true, GSI_SAME_STMT
);
1487 counts
[ix
].base
= b
;
1488 counts
[ix
].iters
= iters
;
1489 counts
[ix
].step
= s
;
1491 total
= fold_build2 (MULT_EXPR
, bound_type
, total
,
1492 fold_convert (bound_type
, iters
));
1498 /* Emit initializers for collapsed loop members. IVAR is the outer
1499 loop iteration variable, from which collapsed loop iteration values
1500 are calculated. COUNTS array has been initialized by
1501 expand_oacc_collapse_inits. */
1504 expand_oacc_collapse_vars (const struct omp_for_data
*fd
,
1505 gimple_stmt_iterator
*gsi
,
1506 const oacc_collapse
*counts
, tree ivar
)
1508 tree ivar_type
= TREE_TYPE (ivar
);
1510 /* The most rapidly changing iteration variable is the innermost
1512 for (int ix
= fd
->collapse
; ix
--;)
1514 const omp_for_data_loop
*loop
= &fd
->loops
[ix
];
1515 const oacc_collapse
*collapse
= &counts
[ix
];
1516 tree iter_type
= TREE_TYPE (loop
->v
);
1517 tree diff_type
= TREE_TYPE (collapse
->step
);
1518 tree plus_type
= iter_type
;
1519 enum tree_code plus_code
= PLUS_EXPR
;
1522 if (POINTER_TYPE_P (iter_type
))
1524 plus_code
= POINTER_PLUS_EXPR
;
1525 plus_type
= sizetype
;
1528 expr
= fold_build2 (TRUNC_MOD_EXPR
, ivar_type
, ivar
,
1529 fold_convert (ivar_type
, collapse
->iters
));
1530 expr
= fold_build2 (MULT_EXPR
, diff_type
, fold_convert (diff_type
, expr
),
1532 expr
= fold_build2 (plus_code
, iter_type
, collapse
->base
,
1533 fold_convert (plus_type
, expr
));
1534 expr
= force_gimple_operand_gsi (gsi
, expr
, false, NULL_TREE
,
1535 true, GSI_SAME_STMT
);
1536 gassign
*ass
= gimple_build_assign (loop
->v
, expr
);
1537 gsi_insert_before (gsi
, ass
, GSI_SAME_STMT
);
1541 expr
= fold_build2 (TRUNC_DIV_EXPR
, ivar_type
, ivar
,
1542 fold_convert (ivar_type
, collapse
->iters
));
1543 ivar
= force_gimple_operand_gsi (gsi
, expr
, true, NULL_TREE
,
1544 true, GSI_SAME_STMT
);
1549 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
1550 of the combined collapse > 1 loop constructs, generate code like:
1551 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
1556 count3 = (adj + N32 - N31) / STEP3;
1557 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
1562 count2 = (adj + N22 - N21) / STEP2;
1563 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
1568 count1 = (adj + N12 - N11) / STEP1;
1569 count = count1 * count2 * count3;
1570 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
1572 and set ZERO_ITER_BB to that bb. If this isn't the outermost
1573 of the combined loop constructs, just initialize COUNTS array
1574 from the _looptemp_ clauses. */
1576 /* NOTE: It *could* be better to moosh all of the BBs together,
1577 creating one larger BB with all the computation and the unexpected
1578 jump at the end. I.e.
1580 bool zero3, zero2, zero1, zero;
1583 count3 = (N32 - N31) /[cl] STEP3;
1585 count2 = (N22 - N21) /[cl] STEP2;
1587 count1 = (N12 - N11) /[cl] STEP1;
1588 zero = zero3 || zero2 || zero1;
1589 count = count1 * count2 * count3;
1590 if (__builtin_expect(zero, false)) goto zero_iter_bb;
1592 After all, we expect the zero=false, and thus we expect to have to
1593 evaluate all of the comparison expressions, so short-circuiting
1594 oughtn't be a win. Since the condition isn't protecting a
1595 denominator, we're not concerned about divide-by-zero, so we can
1596 fully evaluate count even if a numerator turned out to be wrong.
1598 It seems like putting this all together would create much better
1599 scheduling opportunities, and less pressure on the chip's branch
1603 expand_omp_for_init_counts (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
1604 basic_block
&entry_bb
, tree
*counts
,
1605 basic_block
&zero_iter1_bb
, int &first_zero_iter1
,
1606 basic_block
&zero_iter2_bb
, int &first_zero_iter2
,
1607 basic_block
&l2_dom_bb
)
1609 tree t
, type
= TREE_TYPE (fd
->loop
.v
);
1613 /* Collapsed loops need work for expansion into SSA form. */
1614 gcc_assert (!gimple_in_ssa_p (cfun
));
1616 if (gimple_omp_for_combined_into_p (fd
->for_stmt
)
1617 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
)
1619 gcc_assert (fd
->ordered
== 0);
1620 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1621 isn't supposed to be handled, as the inner loop doesn't
1623 tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
1624 OMP_CLAUSE__LOOPTEMP_
);
1625 gcc_assert (innerc
);
1626 for (i
= 0; i
< fd
->collapse
; i
++)
1628 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
1629 OMP_CLAUSE__LOOPTEMP_
);
1630 gcc_assert (innerc
);
1632 counts
[i
] = OMP_CLAUSE_DECL (innerc
);
1634 counts
[0] = NULL_TREE
;
1639 for (i
= fd
->collapse
; i
< fd
->ordered
; i
++)
1641 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
1642 counts
[i
] = NULL_TREE
;
1643 t
= fold_binary (fd
->loops
[i
].cond_code
, boolean_type_node
,
1644 fold_convert (itype
, fd
->loops
[i
].n1
),
1645 fold_convert (itype
, fd
->loops
[i
].n2
));
1646 if (t
&& integer_zerop (t
))
1648 for (i
= fd
->collapse
; i
< fd
->ordered
; i
++)
1649 counts
[i
] = build_int_cst (type
, 0);
1653 for (i
= 0; i
< (fd
->ordered
? fd
->ordered
: fd
->collapse
); i
++)
1655 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
1657 if (i
>= fd
->collapse
&& counts
[i
])
1659 if ((SSA_VAR_P (fd
->loop
.n2
) || i
>= fd
->collapse
)
1660 && ((t
= fold_binary (fd
->loops
[i
].cond_code
, boolean_type_node
,
1661 fold_convert (itype
, fd
->loops
[i
].n1
),
1662 fold_convert (itype
, fd
->loops
[i
].n2
)))
1663 == NULL_TREE
|| !integer_onep (t
)))
1667 n1
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n1
));
1668 n1
= force_gimple_operand_gsi (gsi
, n1
, true, NULL_TREE
,
1669 true, GSI_SAME_STMT
);
1670 n2
= fold_convert (itype
, unshare_expr (fd
->loops
[i
].n2
));
1671 n2
= force_gimple_operand_gsi (gsi
, n2
, true, NULL_TREE
,
1672 true, GSI_SAME_STMT
);
1673 cond_stmt
= gimple_build_cond (fd
->loops
[i
].cond_code
, n1
, n2
,
1674 NULL_TREE
, NULL_TREE
);
1675 gsi_insert_before (gsi
, cond_stmt
, GSI_SAME_STMT
);
1676 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
1677 expand_omp_regimplify_p
, NULL
, NULL
)
1678 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
1679 expand_omp_regimplify_p
, NULL
, NULL
))
1681 *gsi
= gsi_for_stmt (cond_stmt
);
1682 gimple_regimplify_operands (cond_stmt
, gsi
);
1684 e
= split_block (entry_bb
, cond_stmt
);
1685 basic_block
&zero_iter_bb
1686 = i
< fd
->collapse
? zero_iter1_bb
: zero_iter2_bb
;
1687 int &first_zero_iter
1688 = i
< fd
->collapse
? first_zero_iter1
: first_zero_iter2
;
1689 if (zero_iter_bb
== NULL
)
1691 gassign
*assign_stmt
;
1692 first_zero_iter
= i
;
1693 zero_iter_bb
= create_empty_bb (entry_bb
);
1694 add_bb_to_loop (zero_iter_bb
, entry_bb
->loop_father
);
1695 *gsi
= gsi_after_labels (zero_iter_bb
);
1696 if (i
< fd
->collapse
)
1697 assign_stmt
= gimple_build_assign (fd
->loop
.n2
,
1698 build_zero_cst (type
));
1701 counts
[i
] = create_tmp_reg (type
, ".count");
1703 = gimple_build_assign (counts
[i
], build_zero_cst (type
));
1705 gsi_insert_before (gsi
, assign_stmt
, GSI_SAME_STMT
);
1706 set_immediate_dominator (CDI_DOMINATORS
, zero_iter_bb
,
1709 ne
= make_edge (entry_bb
, zero_iter_bb
, EDGE_FALSE_VALUE
);
1710 ne
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
1711 e
->flags
= EDGE_TRUE_VALUE
;
1712 e
->probability
= REG_BR_PROB_BASE
- ne
->probability
;
1713 if (l2_dom_bb
== NULL
)
1714 l2_dom_bb
= entry_bb
;
1716 *gsi
= gsi_last_bb (entry_bb
);
1719 if (POINTER_TYPE_P (itype
))
1720 itype
= signed_type_for (itype
);
1721 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
1723 t
= fold_build2 (PLUS_EXPR
, itype
,
1724 fold_convert (itype
, fd
->loops
[i
].step
), t
);
1725 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
1726 fold_convert (itype
, fd
->loops
[i
].n2
));
1727 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
1728 fold_convert (itype
, fd
->loops
[i
].n1
));
1729 /* ?? We could probably use CEIL_DIV_EXPR instead of
1730 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
1731 generate the same code in the end because generically we
1732 don't know that the values involved must be negative for
1734 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
1735 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
1736 fold_build1 (NEGATE_EXPR
, itype
, t
),
1737 fold_build1 (NEGATE_EXPR
, itype
,
1738 fold_convert (itype
,
1739 fd
->loops
[i
].step
)));
1741 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
1742 fold_convert (itype
, fd
->loops
[i
].step
));
1743 t
= fold_convert (type
, t
);
1744 if (TREE_CODE (t
) == INTEGER_CST
)
1748 if (i
< fd
->collapse
|| i
!= first_zero_iter2
)
1749 counts
[i
] = create_tmp_reg (type
, ".count");
1750 expand_omp_build_assign (gsi
, counts
[i
], t
);
1752 if (SSA_VAR_P (fd
->loop
.n2
) && i
< fd
->collapse
)
1757 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
1758 expand_omp_build_assign (gsi
, fd
->loop
.n2
, t
);
1763 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
1765 V3 = N31 + (T % count3) * STEP3;
1767 V2 = N21 + (T % count2) * STEP2;
1769 V1 = N11 + T * STEP1;
1770 if this loop doesn't have an inner loop construct combined with it.
1771 If it does have an inner loop construct combined with it and the
1772 iteration count isn't known constant, store values from counts array
1773 into its _looptemp_ temporaries instead. */
1776 expand_omp_for_init_vars (struct omp_for_data
*fd
, gimple_stmt_iterator
*gsi
,
1777 tree
*counts
, gimple
*inner_stmt
, tree startvar
)
1780 if (gimple_omp_for_combined_p (fd
->for_stmt
))
1782 /* If fd->loop.n2 is constant, then no propagation of the counts
1783 is needed, they are constant. */
1784 if (TREE_CODE (fd
->loop
.n2
) == INTEGER_CST
)
1787 tree clauses
= gimple_code (inner_stmt
) != GIMPLE_OMP_FOR
1788 ? gimple_omp_taskreg_clauses (inner_stmt
)
1789 : gimple_omp_for_clauses (inner_stmt
);
1790 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1791 isn't supposed to be handled, as the inner loop doesn't
1793 tree innerc
= omp_find_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
1794 gcc_assert (innerc
);
1795 for (i
= 0; i
< fd
->collapse
; i
++)
1797 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
1798 OMP_CLAUSE__LOOPTEMP_
);
1799 gcc_assert (innerc
);
1802 tree tem
= OMP_CLAUSE_DECL (innerc
);
1803 tree t
= fold_convert (TREE_TYPE (tem
), counts
[i
]);
1804 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
1805 false, GSI_CONTINUE_LINKING
);
1806 gassign
*stmt
= gimple_build_assign (tem
, t
);
1807 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
1813 tree type
= TREE_TYPE (fd
->loop
.v
);
1814 tree tem
= create_tmp_reg (type
, ".tem");
1815 gassign
*stmt
= gimple_build_assign (tem
, startvar
);
1816 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
1818 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
1820 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
, t
;
1822 if (POINTER_TYPE_P (vtype
))
1823 itype
= signed_type_for (vtype
);
1825 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
1828 t
= fold_convert (itype
, t
);
1829 t
= fold_build2 (MULT_EXPR
, itype
, t
,
1830 fold_convert (itype
, fd
->loops
[i
].step
));
1831 if (POINTER_TYPE_P (vtype
))
1832 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
1834 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
1835 t
= force_gimple_operand_gsi (gsi
, t
,
1836 DECL_P (fd
->loops
[i
].v
)
1837 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
1839 GSI_CONTINUE_LINKING
);
1840 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
1841 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
1844 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
1845 t
= force_gimple_operand_gsi (gsi
, t
, false, NULL_TREE
,
1846 false, GSI_CONTINUE_LINKING
);
1847 stmt
= gimple_build_assign (tem
, t
);
1848 gsi_insert_after (gsi
, stmt
, GSI_CONTINUE_LINKING
);
1853 /* Helper function for expand_omp_for_*. Generate code like:
1856 if (V3 cond3 N32) goto BODY_BB; else goto L11;
1860 if (V2 cond2 N22) goto BODY_BB; else goto L12;
1867 extract_omp_for_update_vars (struct omp_for_data
*fd
, basic_block cont_bb
,
1868 basic_block body_bb
)
1870 basic_block last_bb
, bb
, collapse_bb
= NULL
;
1872 gimple_stmt_iterator gsi
;
1878 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
1880 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
1882 bb
= create_empty_bb (last_bb
);
1883 add_bb_to_loop (bb
, last_bb
->loop_father
);
1884 gsi
= gsi_start_bb (bb
);
1886 if (i
< fd
->collapse
- 1)
1888 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
1889 e
->probability
= REG_BR_PROB_BASE
/ 8;
1891 t
= fd
->loops
[i
+ 1].n1
;
1892 t
= force_gimple_operand_gsi (&gsi
, t
,
1893 DECL_P (fd
->loops
[i
+ 1].v
)
1894 && TREE_ADDRESSABLE (fd
->loops
[i
1897 GSI_CONTINUE_LINKING
);
1898 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
1899 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1904 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
1906 if (POINTER_TYPE_P (vtype
))
1907 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
1909 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
, fd
->loops
[i
].step
);
1910 t
= force_gimple_operand_gsi (&gsi
, t
,
1911 DECL_P (fd
->loops
[i
].v
)
1912 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
1913 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
1914 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
1915 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1919 t
= fd
->loops
[i
].n2
;
1920 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
1921 false, GSI_CONTINUE_LINKING
);
1922 tree v
= fd
->loops
[i
].v
;
1923 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
1924 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
1925 false, GSI_CONTINUE_LINKING
);
1926 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
, v
, t
);
1927 stmt
= gimple_build_cond_empty (t
);
1928 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
1929 e
= make_edge (bb
, body_bb
, EDGE_TRUE_VALUE
);
1930 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
1933 make_edge (bb
, body_bb
, EDGE_FALLTHRU
);
1940 /* Expand #pragma omp ordered depend(source). */
1943 expand_omp_ordered_source (gimple_stmt_iterator
*gsi
, struct omp_for_data
*fd
,
1944 tree
*counts
, location_t loc
)
1946 enum built_in_function source_ix
1947 = fd
->iter_type
== long_integer_type_node
1948 ? BUILT_IN_GOMP_DOACROSS_POST
: BUILT_IN_GOMP_DOACROSS_ULL_POST
;
1950 = gimple_build_call (builtin_decl_explicit (source_ix
), 1,
1951 build_fold_addr_expr (counts
[fd
->ordered
]));
1952 gimple_set_location (g
, loc
);
1953 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
1956 /* Expand a single depend from #pragma omp ordered depend(sink:...). */
1959 expand_omp_ordered_sink (gimple_stmt_iterator
*gsi
, struct omp_for_data
*fd
,
1960 tree
*counts
, tree c
, location_t loc
)
1962 auto_vec
<tree
, 10> args
;
1963 enum built_in_function sink_ix
1964 = fd
->iter_type
== long_integer_type_node
1965 ? BUILT_IN_GOMP_DOACROSS_WAIT
: BUILT_IN_GOMP_DOACROSS_ULL_WAIT
;
1966 tree t
, off
, coff
= NULL_TREE
, deps
= OMP_CLAUSE_DECL (c
), cond
= NULL_TREE
;
1968 gimple_stmt_iterator gsi2
= *gsi
;
1969 bool warned_step
= false;
1971 for (i
= 0; i
< fd
->ordered
; i
++)
1973 tree step
= NULL_TREE
;
1974 off
= TREE_PURPOSE (deps
);
1975 if (TREE_CODE (off
) == TRUNC_DIV_EXPR
)
1977 step
= TREE_OPERAND (off
, 1);
1978 off
= TREE_OPERAND (off
, 0);
1980 if (!integer_zerop (off
))
1982 gcc_assert (fd
->loops
[i
].cond_code
== LT_EXPR
1983 || fd
->loops
[i
].cond_code
== GT_EXPR
);
1984 bool forward
= fd
->loops
[i
].cond_code
== LT_EXPR
;
1987 /* Non-simple Fortran DO loops. If step is variable,
1988 we don't know at compile even the direction, so can't
1990 if (TREE_CODE (step
) != INTEGER_CST
)
1992 forward
= tree_int_cst_sgn (step
) != -1;
1994 if (forward
^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
1995 warning_at (loc
, 0, "%<depend(sink)%> clause waiting for "
1996 "lexically later iteration");
1999 deps
= TREE_CHAIN (deps
);
2001 /* If all offsets corresponding to the collapsed loops are zero,
2002 this depend clause can be ignored. FIXME: but there is still a
2003 flush needed. We need to emit one __sync_synchronize () for it
2004 though (perhaps conditionally)? Solve this together with the
2005 conservative dependence folding optimization.
2006 if (i >= fd->collapse)
2009 deps
= OMP_CLAUSE_DECL (c
);
2011 edge e1
= split_block (gsi_bb (gsi2
), gsi_stmt (gsi2
));
2012 edge e2
= split_block_after_labels (e1
->dest
);
2014 gsi2
= gsi_after_labels (e1
->dest
);
2015 *gsi
= gsi_last_bb (e1
->src
);
2016 for (i
= 0; i
< fd
->ordered
; i
++)
2018 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
2019 tree step
= NULL_TREE
;
2020 tree orig_off
= NULL_TREE
;
2021 if (POINTER_TYPE_P (itype
))
2024 deps
= TREE_CHAIN (deps
);
2025 off
= TREE_PURPOSE (deps
);
2026 if (TREE_CODE (off
) == TRUNC_DIV_EXPR
)
2028 step
= TREE_OPERAND (off
, 1);
2029 off
= TREE_OPERAND (off
, 0);
2030 gcc_assert (fd
->loops
[i
].cond_code
== LT_EXPR
2031 && integer_onep (fd
->loops
[i
].step
)
2032 && !POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)));
2034 tree s
= fold_convert_loc (loc
, itype
, step
? step
: fd
->loops
[i
].step
);
2037 off
= fold_convert_loc (loc
, itype
, off
);
2039 off
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, off
, s
);
2042 if (integer_zerop (off
))
2043 t
= boolean_true_node
;
2047 tree co
= fold_convert_loc (loc
, itype
, off
);
2048 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)))
2050 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2051 co
= fold_build1_loc (loc
, NEGATE_EXPR
, itype
, co
);
2052 a
= fold_build2_loc (loc
, POINTER_PLUS_EXPR
,
2053 TREE_TYPE (fd
->loops
[i
].v
), fd
->loops
[i
].v
,
2056 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2057 a
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
2058 fd
->loops
[i
].v
, co
);
2060 a
= fold_build2_loc (loc
, PLUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
2061 fd
->loops
[i
].v
, co
);
2065 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2066 t1
= fold_build2_loc (loc
, GE_EXPR
, boolean_type_node
, a
,
2069 t1
= fold_build2_loc (loc
, LT_EXPR
, boolean_type_node
, a
,
2071 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2072 t2
= fold_build2_loc (loc
, LT_EXPR
, boolean_type_node
, a
,
2075 t2
= fold_build2_loc (loc
, GE_EXPR
, boolean_type_node
, a
,
2077 t
= fold_build2_loc (loc
, LT_EXPR
, boolean_type_node
,
2078 step
, build_int_cst (TREE_TYPE (step
), 0));
2079 if (TREE_CODE (step
) != INTEGER_CST
)
2081 t1
= unshare_expr (t1
);
2082 t1
= force_gimple_operand_gsi (gsi
, t1
, true, NULL_TREE
,
2083 false, GSI_CONTINUE_LINKING
);
2084 t2
= unshare_expr (t2
);
2085 t2
= force_gimple_operand_gsi (gsi
, t2
, true, NULL_TREE
,
2086 false, GSI_CONTINUE_LINKING
);
2088 t
= fold_build3_loc (loc
, COND_EXPR
, boolean_type_node
,
2091 else if (fd
->loops
[i
].cond_code
== LT_EXPR
)
2093 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2094 t
= fold_build2_loc (loc
, GE_EXPR
, boolean_type_node
, a
,
2097 t
= fold_build2_loc (loc
, LT_EXPR
, boolean_type_node
, a
,
2100 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2101 t
= fold_build2_loc (loc
, GT_EXPR
, boolean_type_node
, a
,
2104 t
= fold_build2_loc (loc
, LE_EXPR
, boolean_type_node
, a
,
2108 cond
= fold_build2_loc (loc
, BIT_AND_EXPR
, boolean_type_node
, cond
, t
);
2112 off
= fold_convert_loc (loc
, itype
, off
);
2115 || (fd
->loops
[i
].cond_code
== LT_EXPR
2116 ? !integer_onep (fd
->loops
[i
].step
)
2117 : !integer_minus_onep (fd
->loops
[i
].step
)))
2119 if (step
== NULL_TREE
2120 && TYPE_UNSIGNED (itype
)
2121 && fd
->loops
[i
].cond_code
== GT_EXPR
)
2122 t
= fold_build2_loc (loc
, TRUNC_MOD_EXPR
, itype
, off
,
2123 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
2126 t
= fold_build2_loc (loc
, TRUNC_MOD_EXPR
, itype
,
2127 orig_off
? orig_off
: off
, s
);
2128 t
= fold_build2_loc (loc
, EQ_EXPR
, boolean_type_node
, t
,
2129 build_int_cst (itype
, 0));
2130 if (integer_zerop (t
) && !warned_step
)
2132 warning_at (loc
, 0, "%<depend(sink)%> refers to iteration never "
2133 "in the iteration space");
2136 cond
= fold_build2_loc (loc
, BIT_AND_EXPR
, boolean_type_node
,
2140 if (i
<= fd
->collapse
- 1 && fd
->collapse
> 1)
2146 t
= fold_build2_loc (loc
, MINUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
2147 fd
->loops
[i
].v
, fd
->loops
[i
].n1
);
2148 t
= fold_convert_loc (loc
, fd
->iter_type
, t
);
2151 /* We have divided off by step already earlier. */;
2152 else if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
2153 off
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, off
,
2154 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
2157 off
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, off
, s
);
2158 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps
))
2159 off
= fold_build1_loc (loc
, NEGATE_EXPR
, itype
, off
);
2160 off
= fold_convert_loc (loc
, fd
->iter_type
, off
);
2161 if (i
<= fd
->collapse
- 1 && fd
->collapse
> 1)
2164 off
= fold_build2_loc (loc
, PLUS_EXPR
, fd
->iter_type
, coff
,
2166 if (i
< fd
->collapse
- 1)
2168 coff
= fold_build2_loc (loc
, MULT_EXPR
, fd
->iter_type
, off
,
2173 off
= unshare_expr (off
);
2174 t
= fold_build2_loc (loc
, PLUS_EXPR
, fd
->iter_type
, t
, off
);
2175 t
= force_gimple_operand_gsi (&gsi2
, t
, true, NULL_TREE
,
2176 true, GSI_SAME_STMT
);
2179 gimple
*g
= gimple_build_call_vec (builtin_decl_explicit (sink_ix
), args
);
2180 gimple_set_location (g
, loc
);
2181 gsi_insert_before (&gsi2
, g
, GSI_SAME_STMT
);
2183 cond
= unshare_expr (cond
);
2184 cond
= force_gimple_operand_gsi (gsi
, cond
, true, NULL_TREE
, false,
2185 GSI_CONTINUE_LINKING
);
2186 gsi_insert_after (gsi
, gimple_build_cond_empty (cond
), GSI_NEW_STMT
);
2187 edge e3
= make_edge (e1
->src
, e2
->dest
, EDGE_FALSE_VALUE
);
2188 e3
->probability
= REG_BR_PROB_BASE
/ 8;
2189 e1
->probability
= REG_BR_PROB_BASE
- e3
->probability
;
2190 e1
->flags
= EDGE_TRUE_VALUE
;
2191 set_immediate_dominator (CDI_DOMINATORS
, e2
->dest
, e1
->src
);
2193 *gsi
= gsi_after_labels (e2
->dest
);
2196 /* Expand all #pragma omp ordered depend(source) and
2197 #pragma omp ordered depend(sink:...) constructs in the current
2198 #pragma omp for ordered(n) region. */
2201 expand_omp_ordered_source_sink (struct omp_region
*region
,
2202 struct omp_for_data
*fd
, tree
*counts
,
2203 basic_block cont_bb
)
2205 struct omp_region
*inner
;
2207 for (i
= fd
->collapse
- 1; i
< fd
->ordered
; i
++)
2208 if (i
== fd
->collapse
- 1 && fd
->collapse
> 1)
2209 counts
[i
] = NULL_TREE
;
2210 else if (i
>= fd
->collapse
&& !cont_bb
)
2211 counts
[i
] = build_zero_cst (fd
->iter_type
);
2212 else if (!POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
))
2213 && integer_onep (fd
->loops
[i
].step
))
2214 counts
[i
] = NULL_TREE
;
2216 counts
[i
] = create_tmp_var (fd
->iter_type
, ".orditer");
2218 = build_array_type_nelts (fd
->iter_type
, fd
->ordered
- fd
->collapse
+ 1);
2219 counts
[fd
->ordered
] = create_tmp_var (atype
, ".orditera");
2220 TREE_ADDRESSABLE (counts
[fd
->ordered
]) = 1;
2222 for (inner
= region
->inner
; inner
; inner
= inner
->next
)
2223 if (inner
->type
== GIMPLE_OMP_ORDERED
)
2225 gomp_ordered
*ord_stmt
= inner
->ord_stmt
;
2226 gimple_stmt_iterator gsi
= gsi_for_stmt (ord_stmt
);
2227 location_t loc
= gimple_location (ord_stmt
);
2229 for (c
= gimple_omp_ordered_clauses (ord_stmt
);
2230 c
; c
= OMP_CLAUSE_CHAIN (c
))
2231 if (OMP_CLAUSE_DEPEND_KIND (c
) == OMP_CLAUSE_DEPEND_SOURCE
)
2234 expand_omp_ordered_source (&gsi
, fd
, counts
, loc
);
2235 for (c
= gimple_omp_ordered_clauses (ord_stmt
);
2236 c
; c
= OMP_CLAUSE_CHAIN (c
))
2237 if (OMP_CLAUSE_DEPEND_KIND (c
) == OMP_CLAUSE_DEPEND_SINK
)
2238 expand_omp_ordered_sink (&gsi
, fd
, counts
, c
, loc
);
2239 gsi_remove (&gsi
, true);
2243 /* Wrap the body into fd->ordered - fd->collapse loops that aren't
2247 expand_omp_for_ordered_loops (struct omp_for_data
*fd
, tree
*counts
,
2248 basic_block cont_bb
, basic_block body_bb
,
2249 bool ordered_lastprivate
)
2251 if (fd
->ordered
== fd
->collapse
)
2256 gimple_stmt_iterator gsi
= gsi_after_labels (body_bb
);
2257 for (int i
= fd
->collapse
; i
< fd
->ordered
; i
++)
2259 tree type
= TREE_TYPE (fd
->loops
[i
].v
);
2260 tree n1
= fold_convert (type
, fd
->loops
[i
].n1
);
2261 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, n1
);
2262 tree aref
= build4 (ARRAY_REF
, fd
->iter_type
, counts
[fd
->ordered
],
2263 size_int (i
- fd
->collapse
+ 1),
2264 NULL_TREE
, NULL_TREE
);
2265 expand_omp_build_assign (&gsi
, aref
, build_zero_cst (fd
->iter_type
));
2270 for (int i
= fd
->ordered
- 1; i
>= fd
->collapse
; i
--)
2272 tree t
, type
= TREE_TYPE (fd
->loops
[i
].v
);
2273 gimple_stmt_iterator gsi
= gsi_after_labels (body_bb
);
2274 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
,
2275 fold_convert (type
, fd
->loops
[i
].n1
));
2277 expand_omp_build_assign (&gsi
, counts
[i
],
2278 build_zero_cst (fd
->iter_type
));
2279 tree aref
= build4 (ARRAY_REF
, fd
->iter_type
, counts
[fd
->ordered
],
2280 size_int (i
- fd
->collapse
+ 1),
2281 NULL_TREE
, NULL_TREE
);
2282 expand_omp_build_assign (&gsi
, aref
, build_zero_cst (fd
->iter_type
));
2283 if (!gsi_end_p (gsi
))
2286 gsi
= gsi_last_bb (body_bb
);
2287 edge e1
= split_block (body_bb
, gsi_stmt (gsi
));
2288 basic_block new_body
= e1
->dest
;
2289 if (body_bb
== cont_bb
)
2292 basic_block new_header
;
2293 if (EDGE_COUNT (cont_bb
->preds
) > 0)
2295 gsi
= gsi_last_bb (cont_bb
);
2296 if (POINTER_TYPE_P (type
))
2297 t
= fold_build_pointer_plus (fd
->loops
[i
].v
,
2298 fold_convert (sizetype
,
2299 fd
->loops
[i
].step
));
2301 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loops
[i
].v
,
2302 fold_convert (type
, fd
->loops
[i
].step
));
2303 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
2306 t
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, counts
[i
],
2307 build_int_cst (fd
->iter_type
, 1));
2308 expand_omp_build_assign (&gsi
, counts
[i
], t
);
2313 t
= fold_build2 (MINUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
2314 fd
->loops
[i
].v
, fd
->loops
[i
].n1
);
2315 t
= fold_convert (fd
->iter_type
, t
);
2316 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
2317 true, GSI_SAME_STMT
);
2319 aref
= build4 (ARRAY_REF
, fd
->iter_type
, counts
[fd
->ordered
],
2320 size_int (i
- fd
->collapse
+ 1),
2321 NULL_TREE
, NULL_TREE
);
2322 expand_omp_build_assign (&gsi
, aref
, t
);
2324 e2
= split_block (cont_bb
, gsi_stmt (gsi
));
2325 new_header
= e2
->dest
;
2328 new_header
= cont_bb
;
2329 gsi
= gsi_after_labels (new_header
);
2330 tree v
= force_gimple_operand_gsi (&gsi
, fd
->loops
[i
].v
, true, NULL_TREE
,
2331 true, GSI_SAME_STMT
);
2333 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loops
[i
].n2
),
2334 true, NULL_TREE
, true, GSI_SAME_STMT
);
2335 t
= build2 (fd
->loops
[i
].cond_code
, boolean_type_node
, v
, n2
);
2336 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_NEW_STMT
);
2337 edge e3
= split_block (new_header
, gsi_stmt (gsi
));
2340 make_edge (body_bb
, new_header
, EDGE_FALLTHRU
);
2341 e3
->flags
= EDGE_FALSE_VALUE
;
2342 e3
->probability
= REG_BR_PROB_BASE
/ 8;
2343 e1
= make_edge (new_header
, new_body
, EDGE_TRUE_VALUE
);
2344 e1
->probability
= REG_BR_PROB_BASE
- e3
->probability
;
2346 set_immediate_dominator (CDI_DOMINATORS
, new_header
, body_bb
);
2347 set_immediate_dominator (CDI_DOMINATORS
, new_body
, new_header
);
2351 struct loop
*loop
= alloc_loop ();
2352 loop
->header
= new_header
;
2353 loop
->latch
= e2
->src
;
2354 add_loop (loop
, body_bb
->loop_father
);
2358 /* If there are any lastprivate clauses and it is possible some loops
2359 might have zero iterations, ensure all the decls are initialized,
2360 otherwise we could crash evaluating C++ class iterators with lastprivate
2362 bool need_inits
= false;
2363 for (int i
= fd
->collapse
; ordered_lastprivate
&& i
< fd
->ordered
; i
++)
2366 tree type
= TREE_TYPE (fd
->loops
[i
].v
);
2367 gimple_stmt_iterator gsi
= gsi_after_labels (body_bb
);
2368 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
,
2369 fold_convert (type
, fd
->loops
[i
].n1
));
2373 tree type
= TREE_TYPE (fd
->loops
[i
].v
);
2374 tree this_cond
= fold_build2 (fd
->loops
[i
].cond_code
,
2376 fold_convert (type
, fd
->loops
[i
].n1
),
2377 fold_convert (type
, fd
->loops
[i
].n2
));
2378 if (!integer_onep (this_cond
))
2385 /* A subroutine of expand_omp_for. Generate code for a parallel
2386 loop with any schedule. Given parameters:
2388 for (V = N1; V cond N2; V += STEP) BODY;
2390 where COND is "<" or ">", we generate pseudocode
2392 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2393 if (more) goto L0; else goto L3;
2400 if (V cond iend) goto L1; else goto L2;
2402 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2405 If this is a combined omp parallel loop, instead of the call to
2406 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
2407 If this is gimple_omp_for_combined_p loop, then instead of assigning
2408 V and iend in L0 we assign the first two _looptemp_ clause decls of the
2409 inner GIMPLE_OMP_FOR and V += STEP; and
2410 if (V cond iend) goto L1; else goto L2; are removed.
2412 For collapsed loops, given parameters:
2414 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
2415 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
2416 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
2419 we generate pseudocode
2421 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
2426 count3 = (adj + N32 - N31) / STEP3;
2427 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
2432 count2 = (adj + N22 - N21) / STEP2;
2433 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
2438 count1 = (adj + N12 - N11) / STEP1;
2439 count = count1 * count2 * count3;
2444 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
2445 if (more) goto L0; else goto L3;
2449 V3 = N31 + (T % count3) * STEP3;
2451 V2 = N21 + (T % count2) * STEP2;
2453 V1 = N11 + T * STEP1;
2458 if (V < iend) goto L10; else goto L2;
2461 if (V3 cond3 N32) goto L1; else goto L11;
2465 if (V2 cond2 N22) goto L1; else goto L12;
2471 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2477 expand_omp_for_generic (struct omp_region
*region
,
2478 struct omp_for_data
*fd
,
2479 enum built_in_function start_fn
,
2480 enum built_in_function next_fn
,
2483 tree type
, istart0
, iend0
, iend
;
2484 tree t
, vmain
, vback
, bias
= NULL_TREE
;
2485 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
2486 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
2487 gimple_stmt_iterator gsi
;
2488 gassign
*assign_stmt
;
2489 bool in_combined_parallel
= is_combined_parallel (region
);
2490 bool broken_loop
= region
->cont
== NULL
;
2492 tree
*counts
= NULL
;
2494 bool ordered_lastprivate
= false;
2496 gcc_assert (!broken_loop
|| !in_combined_parallel
);
2497 gcc_assert (fd
->iter_type
== long_integer_type_node
2498 || !in_combined_parallel
);
2500 entry_bb
= region
->entry
;
2501 cont_bb
= region
->cont
;
2503 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
2504 gcc_assert (broken_loop
2505 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
2506 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
2507 l1_bb
= single_succ (l0_bb
);
2510 l2_bb
= create_empty_bb (cont_bb
);
2511 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
2512 || (single_succ_edge (BRANCH_EDGE (cont_bb
)->dest
)->dest
2514 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
2518 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
2519 exit_bb
= region
->exit
;
2521 gsi
= gsi_last_bb (entry_bb
);
2523 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
2525 && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi
)),
2526 OMP_CLAUSE_LASTPRIVATE
))
2527 ordered_lastprivate
= false;
2528 if (fd
->collapse
> 1 || fd
->ordered
)
2530 int first_zero_iter1
= -1, first_zero_iter2
= -1;
2531 basic_block zero_iter1_bb
= NULL
, zero_iter2_bb
= NULL
, l2_dom_bb
= NULL
;
2533 counts
= XALLOCAVEC (tree
, fd
->ordered
? fd
->ordered
+ 1 : fd
->collapse
);
2534 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
2535 zero_iter1_bb
, first_zero_iter1
,
2536 zero_iter2_bb
, first_zero_iter2
, l2_dom_bb
);
2540 /* Some counts[i] vars might be uninitialized if
2541 some loop has zero iterations. But the body shouldn't
2542 be executed in that case, so just avoid uninit warnings. */
2543 for (i
= first_zero_iter1
;
2544 i
< (fd
->ordered
? fd
->ordered
: fd
->collapse
); i
++)
2545 if (SSA_VAR_P (counts
[i
]))
2546 TREE_NO_WARNING (counts
[i
]) = 1;
2548 e
= split_block (entry_bb
, gsi_stmt (gsi
));
2550 make_edge (zero_iter1_bb
, entry_bb
, EDGE_FALLTHRU
);
2551 gsi
= gsi_last_bb (entry_bb
);
2552 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
2553 get_immediate_dominator (CDI_DOMINATORS
,
2558 /* Some counts[i] vars might be uninitialized if
2559 some loop has zero iterations. But the body shouldn't
2560 be executed in that case, so just avoid uninit warnings. */
2561 for (i
= first_zero_iter2
; i
< fd
->ordered
; i
++)
2562 if (SSA_VAR_P (counts
[i
]))
2563 TREE_NO_WARNING (counts
[i
]) = 1;
2565 make_edge (zero_iter2_bb
, entry_bb
, EDGE_FALLTHRU
);
2569 e
= split_block (entry_bb
, gsi_stmt (gsi
));
2571 make_edge (zero_iter2_bb
, entry_bb
, EDGE_FALLTHRU
);
2572 gsi
= gsi_last_bb (entry_bb
);
2573 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
2574 get_immediate_dominator
2575 (CDI_DOMINATORS
, zero_iter2_bb
));
2578 if (fd
->collapse
== 1)
2580 counts
[0] = fd
->loop
.n2
;
2581 fd
->loop
= fd
->loops
[0];
2585 type
= TREE_TYPE (fd
->loop
.v
);
2586 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
2587 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
2588 TREE_ADDRESSABLE (istart0
) = 1;
2589 TREE_ADDRESSABLE (iend0
) = 1;
2591 /* See if we need to bias by LLONG_MIN. */
2592 if (fd
->iter_type
== long_long_unsigned_type_node
2593 && TREE_CODE (type
) == INTEGER_TYPE
2594 && !TYPE_UNSIGNED (type
)
2595 && fd
->ordered
== 0)
2599 if (fd
->loop
.cond_code
== LT_EXPR
)
2602 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
2606 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
2609 if (TREE_CODE (n1
) != INTEGER_CST
2610 || TREE_CODE (n2
) != INTEGER_CST
2611 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
2612 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
2615 gimple_stmt_iterator gsif
= gsi
;
2618 tree arr
= NULL_TREE
;
2619 if (in_combined_parallel
)
2621 gcc_assert (fd
->ordered
== 0);
2622 /* In a combined parallel loop, emit a call to
2623 GOMP_loop_foo_next. */
2624 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
2625 build_fold_addr_expr (istart0
),
2626 build_fold_addr_expr (iend0
));
2630 tree t0
, t1
, t2
, t3
, t4
;
2631 /* If this is not a combined parallel loop, emit a call to
2632 GOMP_loop_foo_start in ENTRY_BB. */
2633 t4
= build_fold_addr_expr (iend0
);
2634 t3
= build_fold_addr_expr (istart0
);
2637 t0
= build_int_cst (unsigned_type_node
,
2638 fd
->ordered
- fd
->collapse
+ 1);
2639 arr
= create_tmp_var (build_array_type_nelts (fd
->iter_type
,
2641 - fd
->collapse
+ 1),
2643 DECL_NAMELESS (arr
) = 1;
2644 TREE_ADDRESSABLE (arr
) = 1;
2645 TREE_STATIC (arr
) = 1;
2646 vec
<constructor_elt
, va_gc
> *v
;
2647 vec_alloc (v
, fd
->ordered
- fd
->collapse
+ 1);
2650 for (idx
= 0; idx
< fd
->ordered
- fd
->collapse
+ 1; idx
++)
2653 if (idx
== 0 && fd
->collapse
> 1)
2656 c
= counts
[idx
+ fd
->collapse
- 1];
2657 tree purpose
= size_int (idx
);
2658 CONSTRUCTOR_APPEND_ELT (v
, purpose
, c
);
2659 if (TREE_CODE (c
) != INTEGER_CST
)
2660 TREE_STATIC (arr
) = 0;
2663 DECL_INITIAL (arr
) = build_constructor (TREE_TYPE (arr
), v
);
2664 if (!TREE_STATIC (arr
))
2665 force_gimple_operand_gsi (&gsi
, build1 (DECL_EXPR
,
2666 void_type_node
, arr
),
2667 true, NULL_TREE
, true, GSI_SAME_STMT
);
2668 t1
= build_fold_addr_expr (arr
);
2673 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
2676 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
2679 = omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
2680 OMP_CLAUSE__LOOPTEMP_
);
2681 gcc_assert (innerc
);
2682 t0
= OMP_CLAUSE_DECL (innerc
);
2683 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
2684 OMP_CLAUSE__LOOPTEMP_
);
2685 gcc_assert (innerc
);
2686 t1
= OMP_CLAUSE_DECL (innerc
);
2688 if (POINTER_TYPE_P (TREE_TYPE (t0
))
2689 && TYPE_PRECISION (TREE_TYPE (t0
))
2690 != TYPE_PRECISION (fd
->iter_type
))
2692 /* Avoid casting pointers to integer of a different size. */
2693 tree itype
= signed_type_for (type
);
2694 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, t1
));
2695 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, t0
));
2699 t1
= fold_convert (fd
->iter_type
, t1
);
2700 t0
= fold_convert (fd
->iter_type
, t0
);
2704 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
2705 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
2708 if (fd
->iter_type
== long_integer_type_node
|| fd
->ordered
)
2712 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
2713 t
= omp_adjust_chunk_size (t
, fd
->simd_schedule
);
2715 t
= build_call_expr (builtin_decl_explicit (start_fn
),
2716 5, t0
, t1
, t
, t3
, t4
);
2718 t
= build_call_expr (builtin_decl_explicit (start_fn
),
2719 6, t0
, t1
, t2
, t
, t3
, t4
);
2721 else if (fd
->ordered
)
2722 t
= build_call_expr (builtin_decl_explicit (start_fn
),
2725 t
= build_call_expr (builtin_decl_explicit (start_fn
),
2726 5, t0
, t1
, t2
, t3
, t4
);
2734 /* The GOMP_loop_ull_*start functions have additional boolean
2735 argument, true for < loops and false for > loops.
2736 In Fortran, the C bool type can be different from
2737 boolean_type_node. */
2738 bfn_decl
= builtin_decl_explicit (start_fn
);
2739 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
2740 t5
= build_int_cst (c_bool_type
,
2741 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
2744 tree bfn_decl
= builtin_decl_explicit (start_fn
);
2745 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
2746 t
= omp_adjust_chunk_size (t
, fd
->simd_schedule
);
2747 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
2750 t
= build_call_expr (builtin_decl_explicit (start_fn
),
2751 6, t5
, t0
, t1
, t2
, t3
, t4
);
2754 if (TREE_TYPE (t
) != boolean_type_node
)
2755 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
2756 t
, build_int_cst (TREE_TYPE (t
), 0));
2757 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
2758 true, GSI_SAME_STMT
);
2759 if (arr
&& !TREE_STATIC (arr
))
2761 tree clobber
= build_constructor (TREE_TYPE (arr
), NULL
);
2762 TREE_THIS_VOLATILE (clobber
) = 1;
2763 gsi_insert_before (&gsi
, gimple_build_assign (arr
, clobber
),
2766 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
2768 /* Remove the GIMPLE_OMP_FOR statement. */
2769 gsi_remove (&gsi
, true);
2771 if (gsi_end_p (gsif
))
2772 gsif
= gsi_after_labels (gsi_bb (gsif
));
2775 /* Iteration setup for sequential loop goes in L0_BB. */
2776 tree startvar
= fd
->loop
.v
;
2777 tree endvar
= NULL_TREE
;
2779 if (gimple_omp_for_combined_p (fd
->for_stmt
))
2781 gcc_assert (gimple_code (inner_stmt
) == GIMPLE_OMP_FOR
2782 && gimple_omp_for_kind (inner_stmt
)
2783 == GF_OMP_FOR_KIND_SIMD
);
2784 tree innerc
= omp_find_clause (gimple_omp_for_clauses (inner_stmt
),
2785 OMP_CLAUSE__LOOPTEMP_
);
2786 gcc_assert (innerc
);
2787 startvar
= OMP_CLAUSE_DECL (innerc
);
2788 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
2789 OMP_CLAUSE__LOOPTEMP_
);
2790 gcc_assert (innerc
);
2791 endvar
= OMP_CLAUSE_DECL (innerc
);
2794 gsi
= gsi_start_bb (l0_bb
);
2796 if (fd
->ordered
&& fd
->collapse
== 1)
2797 t
= fold_build2 (MULT_EXPR
, fd
->iter_type
, t
,
2798 fold_convert (fd
->iter_type
, fd
->loop
.step
));
2800 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
2801 if (fd
->ordered
&& fd
->collapse
== 1)
2803 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
2804 t
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (startvar
),
2805 fd
->loop
.n1
, fold_convert (sizetype
, t
));
2808 t
= fold_convert (TREE_TYPE (startvar
), t
);
2809 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (startvar
),
2815 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
2816 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
2817 t
= fold_convert (TREE_TYPE (startvar
), t
);
2819 t
= force_gimple_operand_gsi (&gsi
, t
,
2821 && TREE_ADDRESSABLE (startvar
),
2822 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
2823 assign_stmt
= gimple_build_assign (startvar
, t
);
2824 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2827 if (fd
->ordered
&& fd
->collapse
== 1)
2828 t
= fold_build2 (MULT_EXPR
, fd
->iter_type
, t
,
2829 fold_convert (fd
->iter_type
, fd
->loop
.step
));
2831 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
2832 if (fd
->ordered
&& fd
->collapse
== 1)
2834 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
2835 t
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (startvar
),
2836 fd
->loop
.n1
, fold_convert (sizetype
, t
));
2839 t
= fold_convert (TREE_TYPE (startvar
), t
);
2840 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (startvar
),
2846 if (POINTER_TYPE_P (TREE_TYPE (startvar
)))
2847 t
= fold_convert (signed_type_for (TREE_TYPE (startvar
)), t
);
2848 t
= fold_convert (TREE_TYPE (startvar
), t
);
2850 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
2851 false, GSI_CONTINUE_LINKING
);
2854 assign_stmt
= gimple_build_assign (endvar
, iend
);
2855 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2856 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (iend
)))
2857 assign_stmt
= gimple_build_assign (fd
->loop
.v
, iend
);
2859 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, iend
);
2860 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2862 /* Handle linear clause adjustments. */
2863 tree itercnt
= NULL_TREE
;
2864 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_FOR
)
2865 for (tree c
= gimple_omp_for_clauses (fd
->for_stmt
);
2866 c
; c
= OMP_CLAUSE_CHAIN (c
))
2867 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
2868 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
2870 tree d
= OMP_CLAUSE_DECL (c
);
2871 bool is_ref
= omp_is_reference (d
);
2872 tree t
= d
, a
, dest
;
2874 t
= build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c
), t
);
2875 tree type
= TREE_TYPE (t
);
2876 if (POINTER_TYPE_P (type
))
2878 dest
= unshare_expr (t
);
2879 tree v
= create_tmp_var (TREE_TYPE (t
), NULL
);
2880 expand_omp_build_assign (&gsif
, v
, t
);
2881 if (itercnt
== NULL_TREE
)
2884 tree n1
= fd
->loop
.n1
;
2885 if (POINTER_TYPE_P (TREE_TYPE (itercnt
)))
2888 = fold_convert (signed_type_for (TREE_TYPE (itercnt
)),
2890 n1
= fold_convert (TREE_TYPE (itercnt
), n1
);
2892 itercnt
= fold_build2 (MINUS_EXPR
, TREE_TYPE (itercnt
),
2894 itercnt
= fold_build2 (EXACT_DIV_EXPR
, TREE_TYPE (itercnt
),
2895 itercnt
, fd
->loop
.step
);
2896 itercnt
= force_gimple_operand_gsi (&gsi
, itercnt
, true,
2898 GSI_CONTINUE_LINKING
);
2900 a
= fold_build2 (MULT_EXPR
, type
,
2901 fold_convert (type
, itercnt
),
2902 fold_convert (type
, OMP_CLAUSE_LINEAR_STEP (c
)));
2903 t
= fold_build2 (type
== TREE_TYPE (t
) ? PLUS_EXPR
2904 : POINTER_PLUS_EXPR
, TREE_TYPE (t
), v
, a
);
2905 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
2906 false, GSI_CONTINUE_LINKING
);
2907 assign_stmt
= gimple_build_assign (dest
, t
);
2908 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
2910 if (fd
->collapse
> 1)
2911 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
2915 /* Until now, counts array contained number of iterations or
2916 variable containing it for ith loop. From now on, we need
2917 those counts only for collapsed loops, and only for the 2nd
2918 till the last collapsed one. Move those one element earlier,
2919 we'll use counts[fd->collapse - 1] for the first source/sink
2920 iteration counter and so on and counts[fd->ordered]
2921 as the array holding the current counter values for
2923 if (fd
->collapse
> 1)
2924 memmove (counts
, counts
+ 1, (fd
->collapse
- 1) * sizeof (counts
[0]));
2928 for (i
= fd
->collapse
; i
< fd
->ordered
; i
++)
2930 tree type
= TREE_TYPE (fd
->loops
[i
].v
);
2932 = fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
2933 fold_convert (type
, fd
->loops
[i
].n1
),
2934 fold_convert (type
, fd
->loops
[i
].n2
));
2935 if (!integer_onep (this_cond
))
2938 if (i
< fd
->ordered
)
2941 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
);
2942 add_bb_to_loop (cont_bb
, l1_bb
->loop_father
);
2943 gimple_stmt_iterator gsi
= gsi_after_labels (cont_bb
);
2944 gimple
*g
= gimple_build_omp_continue (fd
->loop
.v
, fd
->loop
.v
);
2945 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
2946 make_edge (cont_bb
, l3_bb
, EDGE_FALLTHRU
);
2947 make_edge (cont_bb
, l1_bb
, 0);
2948 l2_bb
= create_empty_bb (cont_bb
);
2949 broken_loop
= false;
2952 expand_omp_ordered_source_sink (region
, fd
, counts
, cont_bb
);
2953 cont_bb
= expand_omp_for_ordered_loops (fd
, counts
, cont_bb
, l1_bb
,
2954 ordered_lastprivate
);
2955 if (counts
[fd
->collapse
- 1])
2957 gcc_assert (fd
->collapse
== 1);
2958 gsi
= gsi_last_bb (l0_bb
);
2959 expand_omp_build_assign (&gsi
, counts
[fd
->collapse
- 1],
2961 gsi
= gsi_last_bb (cont_bb
);
2962 t
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, counts
[fd
->collapse
- 1],
2963 build_int_cst (fd
->iter_type
, 1));
2964 expand_omp_build_assign (&gsi
, counts
[fd
->collapse
- 1], t
);
2965 tree aref
= build4 (ARRAY_REF
, fd
->iter_type
, counts
[fd
->ordered
],
2966 size_zero_node
, NULL_TREE
, NULL_TREE
);
2967 expand_omp_build_assign (&gsi
, aref
, counts
[fd
->collapse
- 1]);
2968 t
= counts
[fd
->collapse
- 1];
2970 else if (fd
->collapse
> 1)
2974 t
= fold_build2 (MINUS_EXPR
, TREE_TYPE (fd
->loops
[0].v
),
2975 fd
->loops
[0].v
, fd
->loops
[0].n1
);
2976 t
= fold_convert (fd
->iter_type
, t
);
2978 gsi
= gsi_last_bb (l0_bb
);
2979 tree aref
= build4 (ARRAY_REF
, fd
->iter_type
, counts
[fd
->ordered
],
2980 size_zero_node
, NULL_TREE
, NULL_TREE
);
2981 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
2982 false, GSI_CONTINUE_LINKING
);
2983 expand_omp_build_assign (&gsi
, aref
, t
, true);
2988 /* Code to control the increment and predicate for the sequential
2989 loop goes in the CONT_BB. */
2990 gsi
= gsi_last_bb (cont_bb
);
2991 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
2992 gcc_assert (gimple_code (cont_stmt
) == GIMPLE_OMP_CONTINUE
);
2993 vmain
= gimple_omp_continue_control_use (cont_stmt
);
2994 vback
= gimple_omp_continue_control_def (cont_stmt
);
2996 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
2998 if (POINTER_TYPE_P (type
))
2999 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
3001 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3002 t
= force_gimple_operand_gsi (&gsi
, t
,
3004 && TREE_ADDRESSABLE (vback
),
3005 NULL_TREE
, true, GSI_SAME_STMT
);
3006 assign_stmt
= gimple_build_assign (vback
, t
);
3007 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
3009 if (fd
->ordered
&& counts
[fd
->collapse
- 1] == NULL_TREE
)
3011 if (fd
->collapse
> 1)
3015 t
= fold_build2 (MINUS_EXPR
, TREE_TYPE (fd
->loops
[0].v
),
3016 fd
->loops
[0].v
, fd
->loops
[0].n1
);
3017 t
= fold_convert (fd
->iter_type
, t
);
3019 tree aref
= build4 (ARRAY_REF
, fd
->iter_type
,
3020 counts
[fd
->ordered
], size_zero_node
,
3021 NULL_TREE
, NULL_TREE
);
3022 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3023 true, GSI_SAME_STMT
);
3024 expand_omp_build_assign (&gsi
, aref
, t
);
3027 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
3028 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
3030 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
3031 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
3034 /* Remove GIMPLE_OMP_CONTINUE. */
3035 gsi_remove (&gsi
, true);
3037 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
3038 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, l1_bb
);
3040 /* Emit code to get the next parallel iteration in L2_BB. */
3041 gsi
= gsi_start_bb (l2_bb
);
3043 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
3044 build_fold_addr_expr (istart0
),
3045 build_fold_addr_expr (iend0
));
3046 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3047 false, GSI_CONTINUE_LINKING
);
3048 if (TREE_TYPE (t
) != boolean_type_node
)
3049 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3050 t
, build_int_cst (TREE_TYPE (t
), 0));
3051 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
3052 gsi_insert_after (&gsi
, cond_stmt
, GSI_CONTINUE_LINKING
);
3055 /* Add the loop cleanup function. */
3056 gsi
= gsi_last_bb (exit_bb
);
3057 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
3058 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
3059 else if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
3060 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL
);
3062 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
3063 gcall
*call_stmt
= gimple_build_call (t
, 0);
3064 if (gimple_omp_return_lhs (gsi_stmt (gsi
)))
3065 gimple_call_set_lhs (call_stmt
, gimple_omp_return_lhs (gsi_stmt (gsi
)));
3066 gsi_insert_after (&gsi
, call_stmt
, GSI_SAME_STMT
);
3069 tree arr
= counts
[fd
->ordered
];
3070 tree clobber
= build_constructor (TREE_TYPE (arr
), NULL
);
3071 TREE_THIS_VOLATILE (clobber
) = 1;
3072 gsi_insert_after (&gsi
, gimple_build_assign (arr
, clobber
),
3075 gsi_remove (&gsi
, true);
3077 /* Connect the new blocks. */
3078 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
3079 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
3085 e
= find_edge (cont_bb
, l3_bb
);
3086 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
3088 phis
= phi_nodes (l3_bb
);
3089 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3091 gimple
*phi
= gsi_stmt (gsi
);
3092 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
3093 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
3097 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
3098 e
= find_edge (cont_bb
, l1_bb
);
3101 e
= BRANCH_EDGE (cont_bb
);
3102 gcc_assert (single_succ (e
->dest
) == l1_bb
);
3104 if (gimple_omp_for_combined_p (fd
->for_stmt
))
3109 else if (fd
->collapse
> 1)
3112 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
3115 e
->flags
= EDGE_TRUE_VALUE
;
3118 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
3119 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
3123 e
= find_edge (cont_bb
, l2_bb
);
3124 e
->flags
= EDGE_FALLTHRU
;
3126 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
3128 if (gimple_in_ssa_p (cfun
))
3130 /* Add phis to the outer loop that connect to the phis in the inner,
3131 original loop, and move the loop entry value of the inner phi to
3132 the loop entry value of the outer phi. */
3134 for (psi
= gsi_start_phis (l3_bb
); !gsi_end_p (psi
); gsi_next (&psi
))
3136 source_location locus
;
3138 gphi
*exit_phi
= psi
.phi ();
3140 edge l2_to_l3
= find_edge (l2_bb
, l3_bb
);
3141 tree exit_res
= PHI_ARG_DEF_FROM_EDGE (exit_phi
, l2_to_l3
);
3143 basic_block latch
= BRANCH_EDGE (cont_bb
)->dest
;
3144 edge latch_to_l1
= find_edge (latch
, l1_bb
);
3146 = find_phi_with_arg_on_edge (exit_res
, latch_to_l1
);
3148 tree t
= gimple_phi_result (exit_phi
);
3149 tree new_res
= copy_ssa_name (t
, NULL
);
3150 nphi
= create_phi_node (new_res
, l0_bb
);
3152 edge l0_to_l1
= find_edge (l0_bb
, l1_bb
);
3153 t
= PHI_ARG_DEF_FROM_EDGE (inner_phi
, l0_to_l1
);
3154 locus
= gimple_phi_arg_location_from_edge (inner_phi
, l0_to_l1
);
3155 edge entry_to_l0
= find_edge (entry_bb
, l0_bb
);
3156 add_phi_arg (nphi
, t
, entry_to_l0
, locus
);
3158 edge l2_to_l0
= find_edge (l2_bb
, l0_bb
);
3159 add_phi_arg (nphi
, exit_res
, l2_to_l0
, UNKNOWN_LOCATION
);
3161 add_phi_arg (inner_phi
, new_res
, l0_to_l1
, UNKNOWN_LOCATION
);
3165 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
3166 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
3167 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
3168 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
3169 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
3170 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
3171 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
3172 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
3174 /* We enter expand_omp_for_generic with a loop. This original loop may
3175 have its own loop struct, or it may be part of an outer loop struct
3176 (which may be the fake loop). */
3177 struct loop
*outer_loop
= entry_bb
->loop_father
;
3178 bool orig_loop_has_loop_struct
= l1_bb
->loop_father
!= outer_loop
;
3180 add_bb_to_loop (l2_bb
, outer_loop
);
3182 /* We've added a new loop around the original loop. Allocate the
3183 corresponding loop struct. */
3184 struct loop
*new_loop
= alloc_loop ();
3185 new_loop
->header
= l0_bb
;
3186 new_loop
->latch
= l2_bb
;
3187 add_loop (new_loop
, outer_loop
);
3189 /* Allocate a loop structure for the original loop unless we already
3191 if (!orig_loop_has_loop_struct
3192 && !gimple_omp_for_combined_p (fd
->for_stmt
))
3194 struct loop
*orig_loop
= alloc_loop ();
3195 orig_loop
->header
= l1_bb
;
3196 /* The loop may have multiple latches. */
3197 add_loop (orig_loop
, new_loop
);
3202 /* A subroutine of expand_omp_for. Generate code for a parallel
3203 loop with static schedule and no specified chunk size. Given
3206 for (V = N1; V cond N2; V += STEP) BODY;
3208 where COND is "<" or ">", we generate pseudocode
3210 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3215 if ((__typeof (V)) -1 > 0 && cond is >)
3216 n = -(adj + N2 - N1) / -STEP;
3218 n = (adj + N2 - N1) / STEP;
3221 if (threadid < tt) goto L3; else goto L4;
3226 s0 = q * threadid + tt;
3229 if (s0 >= e0) goto L2; else goto L0;
3235 if (V cond e) goto L1;
3240 expand_omp_for_static_nochunk (struct omp_region
*region
,
3241 struct omp_for_data
*fd
,
3244 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
3245 tree type
, itype
, vmain
, vback
;
3246 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
3247 basic_block body_bb
, cont_bb
, collapse_bb
= NULL
;
3249 gimple_stmt_iterator gsi
;
3251 bool broken_loop
= region
->cont
== NULL
;
3252 tree
*counts
= NULL
;
3255 itype
= type
= TREE_TYPE (fd
->loop
.v
);
3256 if (POINTER_TYPE_P (type
))
3257 itype
= signed_type_for (type
);
3259 entry_bb
= region
->entry
;
3260 cont_bb
= region
->cont
;
3261 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3262 fin_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3263 gcc_assert (broken_loop
3264 || (fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
));
3265 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3266 body_bb
= single_succ (seq_start_bb
);
3269 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
3270 || single_succ (BRANCH_EDGE (cont_bb
)->dest
) == body_bb
);
3271 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3273 exit_bb
= region
->exit
;
3275 /* Iteration space partitioning goes in ENTRY_BB. */
3276 gsi
= gsi_last_bb (entry_bb
);
3277 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3279 if (fd
->collapse
> 1)
3281 int first_zero_iter
= -1, dummy
= -1;
3282 basic_block l2_dom_bb
= NULL
, dummy_bb
= NULL
;
3284 counts
= XALLOCAVEC (tree
, fd
->collapse
);
3285 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
3286 fin_bb
, first_zero_iter
,
3287 dummy_bb
, dummy
, l2_dom_bb
);
3290 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
3291 t
= integer_one_node
;
3293 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
3294 fold_convert (type
, fd
->loop
.n1
),
3295 fold_convert (type
, fd
->loop
.n2
));
3296 if (fd
->collapse
== 1
3297 && TYPE_UNSIGNED (type
)
3298 && (t
== NULL_TREE
|| !integer_onep (t
)))
3300 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
3301 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
3302 true, GSI_SAME_STMT
);
3303 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
3304 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
3305 true, GSI_SAME_STMT
);
3306 gcond
*cond_stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
3307 NULL_TREE
, NULL_TREE
);
3308 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
3309 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
3310 expand_omp_regimplify_p
, NULL
, NULL
)
3311 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
3312 expand_omp_regimplify_p
, NULL
, NULL
))
3314 gsi
= gsi_for_stmt (cond_stmt
);
3315 gimple_regimplify_operands (cond_stmt
, &gsi
);
3317 ep
= split_block (entry_bb
, cond_stmt
);
3318 ep
->flags
= EDGE_TRUE_VALUE
;
3319 entry_bb
= ep
->dest
;
3320 ep
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
3321 ep
= make_edge (ep
->src
, fin_bb
, EDGE_FALSE_VALUE
);
3322 ep
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
3323 if (gimple_in_ssa_p (cfun
))
3325 int dest_idx
= find_edge (entry_bb
, fin_bb
)->dest_idx
;
3326 for (gphi_iterator gpi
= gsi_start_phis (fin_bb
);
3327 !gsi_end_p (gpi
); gsi_next (&gpi
))
3329 gphi
*phi
= gpi
.phi ();
3330 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
3331 ep
, UNKNOWN_LOCATION
);
3334 gsi
= gsi_last_bb (entry_bb
);
3337 switch (gimple_omp_for_kind (fd
->for_stmt
))
3339 case GF_OMP_FOR_KIND_FOR
:
3340 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3341 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3343 case GF_OMP_FOR_KIND_DISTRIBUTE
:
3344 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS
);
3345 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM
);
3350 nthreads
= build_call_expr (nthreads
, 0);
3351 nthreads
= fold_convert (itype
, nthreads
);
3352 nthreads
= force_gimple_operand_gsi (&gsi
, nthreads
, true, NULL_TREE
,
3353 true, GSI_SAME_STMT
);
3354 threadid
= build_call_expr (threadid
, 0);
3355 threadid
= fold_convert (itype
, threadid
);
3356 threadid
= force_gimple_operand_gsi (&gsi
, threadid
, true, NULL_TREE
,
3357 true, GSI_SAME_STMT
);
3361 step
= fd
->loop
.step
;
3362 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
3364 tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
3365 OMP_CLAUSE__LOOPTEMP_
);
3366 gcc_assert (innerc
);
3367 n1
= OMP_CLAUSE_DECL (innerc
);
3368 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3369 OMP_CLAUSE__LOOPTEMP_
);
3370 gcc_assert (innerc
);
3371 n2
= OMP_CLAUSE_DECL (innerc
);
3373 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
3374 true, NULL_TREE
, true, GSI_SAME_STMT
);
3375 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
3376 true, NULL_TREE
, true, GSI_SAME_STMT
);
3377 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
3378 true, NULL_TREE
, true, GSI_SAME_STMT
);
3380 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
3381 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
3382 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
3383 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
3384 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
3385 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3386 fold_build1 (NEGATE_EXPR
, itype
, t
),
3387 fold_build1 (NEGATE_EXPR
, itype
, step
));
3389 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
3390 t
= fold_convert (itype
, t
);
3391 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
3393 q
= create_tmp_reg (itype
, "q");
3394 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
3395 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
3396 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
3398 tt
= create_tmp_reg (itype
, "tt");
3399 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
3400 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
3401 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
3403 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
3404 gcond
*cond_stmt
= gimple_build_cond_empty (t
);
3405 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
3407 second_bb
= split_block (entry_bb
, cond_stmt
)->dest
;
3408 gsi
= gsi_last_bb (second_bb
);
3409 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3411 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
3413 gassign
*assign_stmt
3414 = gimple_build_assign (q
, PLUS_EXPR
, q
, build_int_cst (itype
, 1));
3415 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
3417 third_bb
= split_block (second_bb
, assign_stmt
)->dest
;
3418 gsi
= gsi_last_bb (third_bb
);
3419 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3421 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
3422 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
3423 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
3425 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
3426 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
3428 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
3429 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3431 /* Remove the GIMPLE_OMP_FOR statement. */
3432 gsi_remove (&gsi
, true);
3434 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3435 gsi
= gsi_start_bb (seq_start_bb
);
3437 tree startvar
= fd
->loop
.v
;
3438 tree endvar
= NULL_TREE
;
3440 if (gimple_omp_for_combined_p (fd
->for_stmt
))
3442 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
3443 ? gimple_omp_parallel_clauses (inner_stmt
)
3444 : gimple_omp_for_clauses (inner_stmt
);
3445 tree innerc
= omp_find_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
3446 gcc_assert (innerc
);
3447 startvar
= OMP_CLAUSE_DECL (innerc
);
3448 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3449 OMP_CLAUSE__LOOPTEMP_
);
3450 gcc_assert (innerc
);
3451 endvar
= OMP_CLAUSE_DECL (innerc
);
3452 if (fd
->collapse
> 1 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
3453 && gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
3456 for (i
= 1; i
< fd
->collapse
; i
++)
3458 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3459 OMP_CLAUSE__LOOPTEMP_
);
3460 gcc_assert (innerc
);
3462 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3463 OMP_CLAUSE__LOOPTEMP_
);
3466 /* If needed (distribute parallel for with lastprivate),
3467 propagate down the total number of iterations. */
3468 tree t
= fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc
)),
3470 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, false,
3471 GSI_CONTINUE_LINKING
);
3472 assign_stmt
= gimple_build_assign (OMP_CLAUSE_DECL (innerc
), t
);
3473 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3477 t
= fold_convert (itype
, s0
);
3478 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
3479 if (POINTER_TYPE_P (type
))
3480 t
= fold_build_pointer_plus (n1
, t
);
3482 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
3483 t
= fold_convert (TREE_TYPE (startvar
), t
);
3484 t
= force_gimple_operand_gsi (&gsi
, t
,
3486 && TREE_ADDRESSABLE (startvar
),
3487 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
3488 assign_stmt
= gimple_build_assign (startvar
, t
);
3489 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3491 t
= fold_convert (itype
, e0
);
3492 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
3493 if (POINTER_TYPE_P (type
))
3494 t
= fold_build_pointer_plus (n1
, t
);
3496 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
3497 t
= fold_convert (TREE_TYPE (startvar
), t
);
3498 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3499 false, GSI_CONTINUE_LINKING
);
3502 assign_stmt
= gimple_build_assign (endvar
, e
);
3503 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3504 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
3505 assign_stmt
= gimple_build_assign (fd
->loop
.v
, e
);
3507 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, e
);
3508 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3510 /* Handle linear clause adjustments. */
3511 tree itercnt
= NULL_TREE
;
3512 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_FOR
)
3513 for (tree c
= gimple_omp_for_clauses (fd
->for_stmt
);
3514 c
; c
= OMP_CLAUSE_CHAIN (c
))
3515 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
3516 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
3518 tree d
= OMP_CLAUSE_DECL (c
);
3519 bool is_ref
= omp_is_reference (d
);
3520 tree t
= d
, a
, dest
;
3522 t
= build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c
), t
);
3523 if (itercnt
== NULL_TREE
)
3525 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
3527 itercnt
= fold_build2 (MINUS_EXPR
, itype
,
3528 fold_convert (itype
, n1
),
3529 fold_convert (itype
, fd
->loop
.n1
));
3530 itercnt
= fold_build2 (EXACT_DIV_EXPR
, itype
, itercnt
, step
);
3531 itercnt
= fold_build2 (PLUS_EXPR
, itype
, itercnt
, s0
);
3532 itercnt
= force_gimple_operand_gsi (&gsi
, itercnt
, true,
3534 GSI_CONTINUE_LINKING
);
3539 tree type
= TREE_TYPE (t
);
3540 if (POINTER_TYPE_P (type
))
3542 a
= fold_build2 (MULT_EXPR
, type
,
3543 fold_convert (type
, itercnt
),
3544 fold_convert (type
, OMP_CLAUSE_LINEAR_STEP (c
)));
3545 dest
= unshare_expr (t
);
3546 t
= fold_build2 (type
== TREE_TYPE (t
) ? PLUS_EXPR
3547 : POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, a
);
3548 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3549 false, GSI_CONTINUE_LINKING
);
3550 assign_stmt
= gimple_build_assign (dest
, t
);
3551 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3553 if (fd
->collapse
> 1)
3554 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
3558 /* The code controlling the sequential loop replaces the
3559 GIMPLE_OMP_CONTINUE. */
3560 gsi
= gsi_last_bb (cont_bb
);
3561 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
3562 gcc_assert (gimple_code (cont_stmt
) == GIMPLE_OMP_CONTINUE
);
3563 vmain
= gimple_omp_continue_control_use (cont_stmt
);
3564 vback
= gimple_omp_continue_control_def (cont_stmt
);
3566 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
3568 if (POINTER_TYPE_P (type
))
3569 t
= fold_build_pointer_plus (vmain
, step
);
3571 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
3572 t
= force_gimple_operand_gsi (&gsi
, t
,
3574 && TREE_ADDRESSABLE (vback
),
3575 NULL_TREE
, true, GSI_SAME_STMT
);
3576 assign_stmt
= gimple_build_assign (vback
, t
);
3577 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
3579 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
3580 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
3582 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3585 /* Remove the GIMPLE_OMP_CONTINUE statement. */
3586 gsi_remove (&gsi
, true);
3588 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
3589 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
3592 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
3593 gsi
= gsi_last_bb (exit_bb
);
3594 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
3596 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
3597 gsi_insert_after (&gsi
, omp_build_barrier (t
), GSI_SAME_STMT
);
3599 gsi_remove (&gsi
, true);
3601 /* Connect all the blocks. */
3602 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
3603 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
3604 ep
= find_edge (entry_bb
, second_bb
);
3605 ep
->flags
= EDGE_TRUE_VALUE
;
3606 ep
->probability
= REG_BR_PROB_BASE
/ 4;
3607 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
3608 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
3612 ep
= find_edge (cont_bb
, body_bb
);
3615 ep
= BRANCH_EDGE (cont_bb
);
3616 gcc_assert (single_succ (ep
->dest
) == body_bb
);
3618 if (gimple_omp_for_combined_p (fd
->for_stmt
))
3623 else if (fd
->collapse
> 1)
3626 ep
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
3629 ep
->flags
= EDGE_TRUE_VALUE
;
3630 find_edge (cont_bb
, fin_bb
)->flags
3631 = ep
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
3634 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
3635 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
3636 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
3638 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
3639 recompute_dominator (CDI_DOMINATORS
, body_bb
));
3640 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
3641 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
3643 struct loop
*loop
= body_bb
->loop_father
;
3644 if (loop
!= entry_bb
->loop_father
)
3646 gcc_assert (broken_loop
|| loop
->header
== body_bb
);
3647 gcc_assert (broken_loop
3648 || loop
->latch
== region
->cont
3649 || single_pred (loop
->latch
) == region
->cont
);
3653 if (!broken_loop
&& !gimple_omp_for_combined_p (fd
->for_stmt
))
3655 loop
= alloc_loop ();
3656 loop
->header
= body_bb
;
3657 if (collapse_bb
== NULL
)
3658 loop
->latch
= cont_bb
;
3659 add_loop (loop
, body_bb
->loop_father
);
3663 /* Return phi in E->DEST with ARG on edge E. */
3666 find_phi_with_arg_on_edge (tree arg
, edge e
)
3668 basic_block bb
= e
->dest
;
3670 for (gphi_iterator gpi
= gsi_start_phis (bb
);
3674 gphi
*phi
= gpi
.phi ();
3675 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == arg
)
3682 /* A subroutine of expand_omp_for. Generate code for a parallel
3683 loop with static schedule and a specified chunk size. Given
3686 for (V = N1; V cond N2; V += STEP) BODY;
3688 where COND is "<" or ">", we generate pseudocode
3690 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3695 if ((__typeof (V)) -1 > 0 && cond is >)
3696 n = -(adj + N2 - N1) / -STEP;
3698 n = (adj + N2 - N1) / STEP;
3700 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3701 here so that V is defined
3702 if the loop is not entered
3704 s0 = (trip * nthreads + threadid) * CHUNK;
3705 e0 = min (s0 + CHUNK, n);
3706 if (s0 < n) goto L1; else goto L4;
3713 if (V cond e) goto L2; else goto L3;
3721 expand_omp_for_static_chunk (struct omp_region
*region
,
3722 struct omp_for_data
*fd
, gimple
*inner_stmt
)
3724 tree n
, s0
, e0
, e
, t
;
3725 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
3726 tree type
, itype
, vmain
, vback
, vextra
;
3727 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
3728 basic_block trip_update_bb
= NULL
, cont_bb
, collapse_bb
= NULL
, fin_bb
;
3729 gimple_stmt_iterator gsi
;
3731 bool broken_loop
= region
->cont
== NULL
;
3732 tree
*counts
= NULL
;
3735 itype
= type
= TREE_TYPE (fd
->loop
.v
);
3736 if (POINTER_TYPE_P (type
))
3737 itype
= signed_type_for (type
);
3739 entry_bb
= region
->entry
;
3740 se
= split_block (entry_bb
, last_stmt (entry_bb
));
3742 iter_part_bb
= se
->dest
;
3743 cont_bb
= region
->cont
;
3744 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
3745 fin_bb
= BRANCH_EDGE (iter_part_bb
)->dest
;
3746 gcc_assert (broken_loop
3747 || fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
);
3748 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
3749 body_bb
= single_succ (seq_start_bb
);
3752 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
3753 || single_succ (BRANCH_EDGE (cont_bb
)->dest
) == body_bb
);
3754 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3755 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
3757 exit_bb
= region
->exit
;
3759 /* Trip and adjustment setup goes in ENTRY_BB. */
3760 gsi
= gsi_last_bb (entry_bb
);
3761 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3763 if (fd
->collapse
> 1)
3765 int first_zero_iter
= -1, dummy
= -1;
3766 basic_block l2_dom_bb
= NULL
, dummy_bb
= NULL
;
3768 counts
= XALLOCAVEC (tree
, fd
->collapse
);
3769 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
3770 fin_bb
, first_zero_iter
,
3771 dummy_bb
, dummy
, l2_dom_bb
);
3774 else if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
3775 t
= integer_one_node
;
3777 t
= fold_binary (fd
->loop
.cond_code
, boolean_type_node
,
3778 fold_convert (type
, fd
->loop
.n1
),
3779 fold_convert (type
, fd
->loop
.n2
));
3780 if (fd
->collapse
== 1
3781 && TYPE_UNSIGNED (type
)
3782 && (t
== NULL_TREE
|| !integer_onep (t
)))
3784 n1
= fold_convert (type
, unshare_expr (fd
->loop
.n1
));
3785 n1
= force_gimple_operand_gsi (&gsi
, n1
, true, NULL_TREE
,
3786 true, GSI_SAME_STMT
);
3787 n2
= fold_convert (type
, unshare_expr (fd
->loop
.n2
));
3788 n2
= force_gimple_operand_gsi (&gsi
, n2
, true, NULL_TREE
,
3789 true, GSI_SAME_STMT
);
3790 gcond
*cond_stmt
= gimple_build_cond (fd
->loop
.cond_code
, n1
, n2
,
3791 NULL_TREE
, NULL_TREE
);
3792 gsi_insert_before (&gsi
, cond_stmt
, GSI_SAME_STMT
);
3793 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
),
3794 expand_omp_regimplify_p
, NULL
, NULL
)
3795 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
),
3796 expand_omp_regimplify_p
, NULL
, NULL
))
3798 gsi
= gsi_for_stmt (cond_stmt
);
3799 gimple_regimplify_operands (cond_stmt
, &gsi
);
3801 se
= split_block (entry_bb
, cond_stmt
);
3802 se
->flags
= EDGE_TRUE_VALUE
;
3803 entry_bb
= se
->dest
;
3804 se
->probability
= REG_BR_PROB_BASE
- (REG_BR_PROB_BASE
/ 2000 - 1);
3805 se
= make_edge (se
->src
, fin_bb
, EDGE_FALSE_VALUE
);
3806 se
->probability
= REG_BR_PROB_BASE
/ 2000 - 1;
3807 if (gimple_in_ssa_p (cfun
))
3809 int dest_idx
= find_edge (iter_part_bb
, fin_bb
)->dest_idx
;
3810 for (gphi_iterator gpi
= gsi_start_phis (fin_bb
);
3811 !gsi_end_p (gpi
); gsi_next (&gpi
))
3813 gphi
*phi
= gpi
.phi ();
3814 add_phi_arg (phi
, gimple_phi_arg_def (phi
, dest_idx
),
3815 se
, UNKNOWN_LOCATION
);
3818 gsi
= gsi_last_bb (entry_bb
);
3821 switch (gimple_omp_for_kind (fd
->for_stmt
))
3823 case GF_OMP_FOR_KIND_FOR
:
3824 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3825 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3827 case GF_OMP_FOR_KIND_DISTRIBUTE
:
3828 nthreads
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS
);
3829 threadid
= builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM
);
3834 nthreads
= build_call_expr (nthreads
, 0);
3835 nthreads
= fold_convert (itype
, nthreads
);
3836 nthreads
= force_gimple_operand_gsi (&gsi
, nthreads
, true, NULL_TREE
,
3837 true, GSI_SAME_STMT
);
3838 threadid
= build_call_expr (threadid
, 0);
3839 threadid
= fold_convert (itype
, threadid
);
3840 threadid
= force_gimple_operand_gsi (&gsi
, threadid
, true, NULL_TREE
,
3841 true, GSI_SAME_STMT
);
3845 step
= fd
->loop
.step
;
3846 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
3848 tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
3849 OMP_CLAUSE__LOOPTEMP_
);
3850 gcc_assert (innerc
);
3851 n1
= OMP_CLAUSE_DECL (innerc
);
3852 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3853 OMP_CLAUSE__LOOPTEMP_
);
3854 gcc_assert (innerc
);
3855 n2
= OMP_CLAUSE_DECL (innerc
);
3857 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
3858 true, NULL_TREE
, true, GSI_SAME_STMT
);
3859 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
3860 true, NULL_TREE
, true, GSI_SAME_STMT
);
3861 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
3862 true, NULL_TREE
, true, GSI_SAME_STMT
);
3863 tree chunk_size
= fold_convert (itype
, fd
->chunk_size
);
3864 chunk_size
= omp_adjust_chunk_size (chunk_size
, fd
->simd_schedule
);
3866 = force_gimple_operand_gsi (&gsi
, chunk_size
, true, NULL_TREE
, true,
3869 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
3870 t
= fold_build2 (PLUS_EXPR
, itype
, step
, t
);
3871 t
= fold_build2 (PLUS_EXPR
, itype
, t
, n2
);
3872 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, n1
));
3873 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
3874 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3875 fold_build1 (NEGATE_EXPR
, itype
, t
),
3876 fold_build1 (NEGATE_EXPR
, itype
, step
));
3878 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, step
);
3879 t
= fold_convert (itype
, t
);
3880 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3881 true, GSI_SAME_STMT
);
3883 trip_var
= create_tmp_reg (itype
, ".trip");
3884 if (gimple_in_ssa_p (cfun
))
3886 trip_init
= make_ssa_name (trip_var
);
3887 trip_main
= make_ssa_name (trip_var
);
3888 trip_back
= make_ssa_name (trip_var
);
3892 trip_init
= trip_var
;
3893 trip_main
= trip_var
;
3894 trip_back
= trip_var
;
3897 gassign
*assign_stmt
3898 = gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
3899 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
3901 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, chunk_size
);
3902 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
3903 if (POINTER_TYPE_P (type
))
3904 t
= fold_build_pointer_plus (n1
, t
);
3906 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
3907 vextra
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3908 true, GSI_SAME_STMT
);
3910 /* Remove the GIMPLE_OMP_FOR. */
3911 gsi_remove (&gsi
, true);
3913 gimple_stmt_iterator gsif
= gsi
;
3915 /* Iteration space partitioning goes in ITER_PART_BB. */
3916 gsi
= gsi_last_bb (iter_part_bb
);
3918 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
3919 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
3920 t
= fold_build2 (MULT_EXPR
, itype
, t
, chunk_size
);
3921 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3922 false, GSI_CONTINUE_LINKING
);
3924 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, chunk_size
);
3925 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
3926 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3927 false, GSI_CONTINUE_LINKING
);
3929 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
3930 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
3932 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3933 gsi
= gsi_start_bb (seq_start_bb
);
3935 tree startvar
= fd
->loop
.v
;
3936 tree endvar
= NULL_TREE
;
3938 if (gimple_omp_for_combined_p (fd
->for_stmt
))
3940 tree clauses
= gimple_code (inner_stmt
) == GIMPLE_OMP_PARALLEL
3941 ? gimple_omp_parallel_clauses (inner_stmt
)
3942 : gimple_omp_for_clauses (inner_stmt
);
3943 tree innerc
= omp_find_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
3944 gcc_assert (innerc
);
3945 startvar
= OMP_CLAUSE_DECL (innerc
);
3946 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3947 OMP_CLAUSE__LOOPTEMP_
);
3948 gcc_assert (innerc
);
3949 endvar
= OMP_CLAUSE_DECL (innerc
);
3950 if (fd
->collapse
> 1 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
3951 && gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_DISTRIBUTE
)
3954 for (i
= 1; i
< fd
->collapse
; i
++)
3956 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3957 OMP_CLAUSE__LOOPTEMP_
);
3958 gcc_assert (innerc
);
3960 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
3961 OMP_CLAUSE__LOOPTEMP_
);
3964 /* If needed (distribute parallel for with lastprivate),
3965 propagate down the total number of iterations. */
3966 tree t
= fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc
)),
3968 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, false,
3969 GSI_CONTINUE_LINKING
);
3970 assign_stmt
= gimple_build_assign (OMP_CLAUSE_DECL (innerc
), t
);
3971 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3976 t
= fold_convert (itype
, s0
);
3977 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
3978 if (POINTER_TYPE_P (type
))
3979 t
= fold_build_pointer_plus (n1
, t
);
3981 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
3982 t
= fold_convert (TREE_TYPE (startvar
), t
);
3983 t
= force_gimple_operand_gsi (&gsi
, t
,
3985 && TREE_ADDRESSABLE (startvar
),
3986 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
3987 assign_stmt
= gimple_build_assign (startvar
, t
);
3988 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
3990 t
= fold_convert (itype
, e0
);
3991 t
= fold_build2 (MULT_EXPR
, itype
, t
, step
);
3992 if (POINTER_TYPE_P (type
))
3993 t
= fold_build_pointer_plus (n1
, t
);
3995 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
3996 t
= fold_convert (TREE_TYPE (startvar
), t
);
3997 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3998 false, GSI_CONTINUE_LINKING
);
4001 assign_stmt
= gimple_build_assign (endvar
, e
);
4002 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4003 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
4004 assign_stmt
= gimple_build_assign (fd
->loop
.v
, e
);
4006 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, e
);
4007 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4009 /* Handle linear clause adjustments. */
4010 tree itercnt
= NULL_TREE
, itercntbias
= NULL_TREE
;
4011 if (gimple_omp_for_kind (fd
->for_stmt
) == GF_OMP_FOR_KIND_FOR
)
4012 for (tree c
= gimple_omp_for_clauses (fd
->for_stmt
);
4013 c
; c
= OMP_CLAUSE_CHAIN (c
))
4014 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LINEAR
4015 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c
))
4017 tree d
= OMP_CLAUSE_DECL (c
);
4018 bool is_ref
= omp_is_reference (d
);
4019 tree t
= d
, a
, dest
;
4021 t
= build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c
), t
);
4022 tree type
= TREE_TYPE (t
);
4023 if (POINTER_TYPE_P (type
))
4025 dest
= unshare_expr (t
);
4026 tree v
= create_tmp_var (TREE_TYPE (t
), NULL
);
4027 expand_omp_build_assign (&gsif
, v
, t
);
4028 if (itercnt
== NULL_TREE
)
4030 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
4033 = fold_build2 (MINUS_EXPR
, itype
, fold_convert (itype
, n1
),
4034 fold_convert (itype
, fd
->loop
.n1
));
4035 itercntbias
= fold_build2 (EXACT_DIV_EXPR
, itype
,
4038 = force_gimple_operand_gsi (&gsif
, itercntbias
, true,
4041 itercnt
= fold_build2 (PLUS_EXPR
, itype
, itercntbias
, s0
);
4042 itercnt
= force_gimple_operand_gsi (&gsi
, itercnt
, true,
4044 GSI_CONTINUE_LINKING
);
4049 a
= fold_build2 (MULT_EXPR
, type
,
4050 fold_convert (type
, itercnt
),
4051 fold_convert (type
, OMP_CLAUSE_LINEAR_STEP (c
)));
4052 t
= fold_build2 (type
== TREE_TYPE (t
) ? PLUS_EXPR
4053 : POINTER_PLUS_EXPR
, TREE_TYPE (t
), v
, a
);
4054 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4055 false, GSI_CONTINUE_LINKING
);
4056 assign_stmt
= gimple_build_assign (dest
, t
);
4057 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4059 if (fd
->collapse
> 1)
4060 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
4064 /* The code controlling the sequential loop goes in CONT_BB,
4065 replacing the GIMPLE_OMP_CONTINUE. */
4066 gsi
= gsi_last_bb (cont_bb
);
4067 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
4068 vmain
= gimple_omp_continue_control_use (cont_stmt
);
4069 vback
= gimple_omp_continue_control_def (cont_stmt
);
4071 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
4073 if (POINTER_TYPE_P (type
))
4074 t
= fold_build_pointer_plus (vmain
, step
);
4076 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
4077 if (DECL_P (vback
) && TREE_ADDRESSABLE (vback
))
4078 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4079 true, GSI_SAME_STMT
);
4080 assign_stmt
= gimple_build_assign (vback
, t
);
4081 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
4083 if (tree_int_cst_equal (fd
->chunk_size
, integer_one_node
))
4084 t
= build2 (EQ_EXPR
, boolean_type_node
,
4085 build_int_cst (itype
, 0),
4086 build_int_cst (itype
, 1));
4088 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4089 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
4091 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4094 /* Remove GIMPLE_OMP_CONTINUE. */
4095 gsi_remove (&gsi
, true);
4097 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
4098 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
4100 /* Trip update code goes into TRIP_UPDATE_BB. */
4101 gsi
= gsi_start_bb (trip_update_bb
);
4103 t
= build_int_cst (itype
, 1);
4104 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4105 assign_stmt
= gimple_build_assign (trip_back
, t
);
4106 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4109 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4110 gsi
= gsi_last_bb (exit_bb
);
4111 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4113 t
= gimple_omp_return_lhs (gsi_stmt (gsi
));
4114 gsi_insert_after (&gsi
, omp_build_barrier (t
), GSI_SAME_STMT
);
4116 gsi_remove (&gsi
, true);
4118 /* Connect the new blocks. */
4119 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4120 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4124 se
= find_edge (cont_bb
, body_bb
);
4127 se
= BRANCH_EDGE (cont_bb
);
4128 gcc_assert (single_succ (se
->dest
) == body_bb
);
4130 if (gimple_omp_for_combined_p (fd
->for_stmt
))
4135 else if (fd
->collapse
> 1)
4138 se
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4141 se
->flags
= EDGE_TRUE_VALUE
;
4142 find_edge (cont_bb
, trip_update_bb
)->flags
4143 = se
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
4145 redirect_edge_and_branch (single_succ_edge (trip_update_bb
),
4149 if (gimple_in_ssa_p (cfun
))
4157 gcc_assert (fd
->collapse
== 1 && !broken_loop
);
4159 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4160 remove arguments of the phi nodes in fin_bb. We need to create
4161 appropriate phi nodes in iter_part_bb instead. */
4162 se
= find_edge (iter_part_bb
, fin_bb
);
4163 re
= single_succ_edge (trip_update_bb
);
4164 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (re
);
4165 ene
= single_succ_edge (entry_bb
);
4167 psi
= gsi_start_phis (fin_bb
);
4168 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
4169 gsi_next (&psi
), ++i
)
4172 source_location locus
;
4175 t
= gimple_phi_result (phi
);
4176 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4178 if (!single_pred_p (fin_bb
))
4179 t
= copy_ssa_name (t
, phi
);
4181 nphi
= create_phi_node (t
, iter_part_bb
);
4183 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4184 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4186 /* A special case -- fd->loop.v is not yet computed in
4187 iter_part_bb, we need to use vextra instead. */
4188 if (t
== fd
->loop
.v
)
4190 add_phi_arg (nphi
, t
, ene
, locus
);
4191 locus
= redirect_edge_var_map_location (vm
);
4192 tree back_arg
= redirect_edge_var_map_def (vm
);
4193 add_phi_arg (nphi
, back_arg
, re
, locus
);
4194 edge ce
= find_edge (cont_bb
, body_bb
);
4197 ce
= BRANCH_EDGE (cont_bb
);
4198 gcc_assert (single_succ (ce
->dest
) == body_bb
);
4199 ce
= single_succ_edge (ce
->dest
);
4201 gphi
*inner_loop_phi
= find_phi_with_arg_on_edge (back_arg
, ce
);
4202 gcc_assert (inner_loop_phi
!= NULL
);
4203 add_phi_arg (inner_loop_phi
, gimple_phi_result (nphi
),
4204 find_edge (seq_start_bb
, body_bb
), locus
);
4206 if (!single_pred_p (fin_bb
))
4207 add_phi_arg (phi
, gimple_phi_result (nphi
), se
, locus
);
4209 gcc_assert (gsi_end_p (psi
) && (head
== NULL
|| i
== head
->length ()));
4210 redirect_edge_var_map_clear (re
);
4211 if (single_pred_p (fin_bb
))
4214 psi
= gsi_start_phis (fin_bb
);
4215 if (gsi_end_p (psi
))
4217 remove_phi_node (&psi
, false);
4220 /* Make phi node for trip. */
4221 phi
= create_phi_node (trip_main
, iter_part_bb
);
4222 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4224 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4229 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4230 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4231 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4232 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4233 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4234 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4235 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4236 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4237 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4241 struct loop
*loop
= body_bb
->loop_father
;
4242 struct loop
*trip_loop
= alloc_loop ();
4243 trip_loop
->header
= iter_part_bb
;
4244 trip_loop
->latch
= trip_update_bb
;
4245 add_loop (trip_loop
, iter_part_bb
->loop_father
);
4247 if (loop
!= entry_bb
->loop_father
)
4249 gcc_assert (loop
->header
== body_bb
);
4250 gcc_assert (loop
->latch
== region
->cont
4251 || single_pred (loop
->latch
) == region
->cont
);
4252 trip_loop
->inner
= loop
;
4256 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
4258 loop
= alloc_loop ();
4259 loop
->header
= body_bb
;
4260 if (collapse_bb
== NULL
)
4261 loop
->latch
= cont_bb
;
4262 add_loop (loop
, trip_loop
);
4267 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
4269 for (V = N1; V cond N2; V += STEP) BODY;
4271 where COND is "<" or ">" or "!=", we generate pseudocode
4273 for (ind_var = low; ind_var < high; ind_var++)
4275 V = n1 + (ind_var * STEP)
4280 In the above pseudocode, low and high are function parameters of the
4281 child function. In the function below, we are inserting a temp.
4282 variable that will be making a call to two OMP functions that will not be
4283 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
4284 with _Cilk_for). These functions are replaced with low and high
4285 by the function that handles taskreg. */
4289 expand_cilk_for (struct omp_region
*region
, struct omp_for_data
*fd
)
4291 bool broken_loop
= region
->cont
== NULL
;
4292 basic_block entry_bb
= region
->entry
;
4293 basic_block cont_bb
= region
->cont
;
4295 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4296 gcc_assert (broken_loop
4297 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4298 basic_block l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
4299 basic_block l1_bb
, l2_bb
;
4303 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
4304 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4305 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
4306 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
4310 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
4311 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
4312 l2_bb
= single_succ (l1_bb
);
4314 basic_block exit_bb
= region
->exit
;
4315 basic_block l2_dom_bb
= NULL
;
4317 gimple_stmt_iterator gsi
= gsi_last_bb (entry_bb
);
4319 /* Below statements until the "tree high_val = ..." are pseudo statements
4320 used to pass information to be used by expand_omp_taskreg.
4321 low_val and high_val will be replaced by the __low and __high
4322 parameter from the child function.
4324 The call_exprs part is a place-holder, it is mainly used
4325 to distinctly identify to the top-level part that this is
4326 where we should put low and high (reasoning given in header
4329 gomp_parallel
*par_stmt
4330 = as_a
<gomp_parallel
*> (last_stmt (region
->outer
->entry
));
4331 tree child_fndecl
= gimple_omp_parallel_child_fn (par_stmt
);
4332 tree t
, low_val
= NULL_TREE
, high_val
= NULL_TREE
;
4333 for (t
= DECL_ARGUMENTS (child_fndecl
); t
; t
= TREE_CHAIN (t
))
4335 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t
)), "__high"))
4337 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t
)), "__low"))
4340 gcc_assert (low_val
&& high_val
);
4342 tree type
= TREE_TYPE (low_val
);
4343 tree ind_var
= create_tmp_reg (type
, "__cilk_ind_var");
4344 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4346 /* Not needed in SSA form right now. */
4347 gcc_assert (!gimple_in_ssa_p (cfun
));
4348 if (l2_dom_bb
== NULL
)
4354 gimple
*stmt
= gimple_build_assign (ind_var
, n1
);
4356 /* Replace the GIMPLE_OMP_FOR statement. */
4357 gsi_replace (&gsi
, stmt
, true);
4361 /* Code to control the increment goes in the CONT_BB. */
4362 gsi
= gsi_last_bb (cont_bb
);
4363 stmt
= gsi_stmt (gsi
);
4364 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4365 stmt
= gimple_build_assign (ind_var
, PLUS_EXPR
, ind_var
,
4366 build_one_cst (type
));
4368 /* Replace GIMPLE_OMP_CONTINUE. */
4369 gsi_replace (&gsi
, stmt
, true);
4372 /* Emit the condition in L1_BB. */
4373 gsi
= gsi_after_labels (l1_bb
);
4374 t
= fold_build2 (MULT_EXPR
, TREE_TYPE (fd
->loop
.step
),
4375 fold_convert (TREE_TYPE (fd
->loop
.step
), ind_var
),
4377 if (POINTER_TYPE_P (TREE_TYPE (fd
->loop
.n1
)))
4378 t
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (fd
->loop
.n1
),
4379 fd
->loop
.n1
, fold_convert (sizetype
, t
));
4381 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loop
.n1
),
4382 fd
->loop
.n1
, fold_convert (TREE_TYPE (fd
->loop
.n1
), t
));
4383 t
= fold_convert (TREE_TYPE (fd
->loop
.v
), t
);
4384 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
4386 /* The condition is always '<' since the runtime will fill in the low
4388 stmt
= gimple_build_cond (LT_EXPR
, ind_var
, n2
, NULL_TREE
, NULL_TREE
);
4389 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4391 /* Remove GIMPLE_OMP_RETURN. */
4392 gsi
= gsi_last_bb (exit_bb
);
4393 gsi_remove (&gsi
, true);
4395 /* Connect the new blocks. */
4396 remove_edge (FALLTHRU_EDGE (entry_bb
));
4401 remove_edge (BRANCH_EDGE (entry_bb
));
4402 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
4404 e
= BRANCH_EDGE (l1_bb
);
4405 ne
= FALLTHRU_EDGE (l1_bb
);
4406 e
->flags
= EDGE_TRUE_VALUE
;
4410 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4412 ne
= single_succ_edge (l1_bb
);
4413 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4416 ne
->flags
= EDGE_FALSE_VALUE
;
4417 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4418 ne
->probability
= REG_BR_PROB_BASE
/ 8;
4420 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
4421 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
4422 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
4426 struct loop
*loop
= alloc_loop ();
4427 loop
->header
= l1_bb
;
4428 loop
->latch
= cont_bb
;
4429 add_loop (loop
, l1_bb
->loop_father
);
4430 loop
->safelen
= INT_MAX
;
4433 /* Pick the correct library function based on the precision of the
4434 induction variable type. */
4435 tree lib_fun
= NULL_TREE
;
4436 if (TYPE_PRECISION (type
) == 32)
4437 lib_fun
= cilk_for_32_fndecl
;
4438 else if (TYPE_PRECISION (type
) == 64)
4439 lib_fun
= cilk_for_64_fndecl
;
4443 gcc_assert (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_CILKFOR
);
4445 /* WS_ARGS contains the library function flavor to call:
4446 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
4447 user-defined grain value. If the user does not define one, then zero
4448 is passed in by the parser. */
4449 vec_alloc (region
->ws_args
, 2);
4450 region
->ws_args
->quick_push (lib_fun
);
4451 region
->ws_args
->quick_push (fd
->chunk_size
);
4454 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
4455 loop. Given parameters:
4457 for (V = N1; V cond N2; V += STEP) BODY;
4459 where COND is "<" or ">", we generate pseudocode
4467 if (V cond N2) goto L0; else goto L2;
4470 For collapsed loops, given parameters:
4472 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4473 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4474 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4477 we generate pseudocode
4483 count3 = (adj + N32 - N31) / STEP3;
4488 count2 = (adj + N22 - N21) / STEP2;
4493 count1 = (adj + N12 - N11) / STEP1;
4494 count = count1 * count2 * count3;
4504 V2 += (V3 cond3 N32) ? 0 : STEP2;
4505 V3 = (V3 cond3 N32) ? V3 : N31;
4506 V1 += (V2 cond2 N22) ? 0 : STEP1;
4507 V2 = (V2 cond2 N22) ? V2 : N21;
4509 if (V < count) goto L0; else goto L2;
4515 expand_omp_simd (struct omp_region
*region
, struct omp_for_data
*fd
)
4518 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, l2_bb
, l2_dom_bb
;
4519 gimple_stmt_iterator gsi
;
4522 bool broken_loop
= region
->cont
== NULL
;
4524 tree
*counts
= NULL
;
4526 int safelen_int
= INT_MAX
;
4527 tree safelen
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
4528 OMP_CLAUSE_SAFELEN
);
4529 tree simduid
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
4530 OMP_CLAUSE__SIMDUID_
);
4535 safelen
= OMP_CLAUSE_SAFELEN_EXPR (safelen
);
4536 if (TREE_CODE (safelen
) != INTEGER_CST
)
4538 else if (tree_fits_uhwi_p (safelen
) && tree_to_uhwi (safelen
) < INT_MAX
)
4539 safelen_int
= tree_to_uhwi (safelen
);
4540 if (safelen_int
== 1)
4543 type
= TREE_TYPE (fd
->loop
.v
);
4544 entry_bb
= region
->entry
;
4545 cont_bb
= region
->cont
;
4546 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4547 gcc_assert (broken_loop
4548 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4549 l0_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
4552 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l0_bb
);
4553 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4554 l1_bb
= split_block (cont_bb
, last_stmt (cont_bb
))->dest
;
4555 l2_bb
= BRANCH_EDGE (entry_bb
)->dest
;
4559 BRANCH_EDGE (entry_bb
)->flags
&= ~EDGE_ABNORMAL
;
4560 l1_bb
= split_edge (BRANCH_EDGE (entry_bb
));
4561 l2_bb
= single_succ (l1_bb
);
4563 exit_bb
= region
->exit
;
4566 gsi
= gsi_last_bb (entry_bb
);
4568 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4569 /* Not needed in SSA form right now. */
4570 gcc_assert (!gimple_in_ssa_p (cfun
));
4571 if (fd
->collapse
> 1)
4573 int first_zero_iter
= -1, dummy
= -1;
4574 basic_block zero_iter_bb
= l2_bb
, dummy_bb
= NULL
;
4576 counts
= XALLOCAVEC (tree
, fd
->collapse
);
4577 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
4578 zero_iter_bb
, first_zero_iter
,
4579 dummy_bb
, dummy
, l2_dom_bb
);
4581 if (l2_dom_bb
== NULL
)
4586 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
4588 tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
4589 OMP_CLAUSE__LOOPTEMP_
);
4590 gcc_assert (innerc
);
4591 n1
= OMP_CLAUSE_DECL (innerc
);
4592 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
4593 OMP_CLAUSE__LOOPTEMP_
);
4594 gcc_assert (innerc
);
4595 n2
= OMP_CLAUSE_DECL (innerc
);
4597 tree step
= fd
->loop
.step
;
4599 bool is_simt
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
4603 cfun
->curr_properties
&= ~PROP_gimple_lomp_dev
;
4604 is_simt
= safelen_int
> 1;
4606 tree simt_lane
= NULL_TREE
, simt_maxlane
= NULL_TREE
;
4609 simt_lane
= create_tmp_var (unsigned_type_node
);
4610 gimple
*g
= gimple_build_call_internal (IFN_GOMP_SIMT_LANE
, 0);
4611 gimple_call_set_lhs (g
, simt_lane
);
4612 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
4613 tree offset
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), step
,
4614 fold_convert (TREE_TYPE (step
), simt_lane
));
4615 n1
= fold_convert (type
, n1
);
4616 if (POINTER_TYPE_P (type
))
4617 n1
= fold_build_pointer_plus (n1
, offset
);
4619 n1
= fold_build2 (PLUS_EXPR
, type
, n1
, fold_convert (type
, offset
));
4621 /* Collapsed loops not handled for SIMT yet: limit to one lane only. */
4622 if (fd
->collapse
> 1)
4623 simt_maxlane
= build_one_cst (unsigned_type_node
);
4624 else if (safelen_int
< omp_max_simt_vf ())
4625 simt_maxlane
= build_int_cst (unsigned_type_node
, safelen_int
);
4627 = build_call_expr_internal_loc (UNKNOWN_LOCATION
, IFN_GOMP_SIMT_VF
,
4628 unsigned_type_node
, 0);
4630 vf
= fold_build2 (MIN_EXPR
, unsigned_type_node
, vf
, simt_maxlane
);
4631 vf
= fold_convert (TREE_TYPE (step
), vf
);
4632 step
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), step
, vf
);
4635 expand_omp_build_assign (&gsi
, fd
->loop
.v
, fold_convert (type
, n1
));
4636 if (fd
->collapse
> 1)
4638 if (gimple_omp_for_combined_into_p (fd
->for_stmt
))
4641 expand_omp_for_init_vars (fd
, &gsi
, counts
, NULL
, n1
);
4645 for (i
= 0; i
< fd
->collapse
; i
++)
4647 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
4648 if (POINTER_TYPE_P (itype
))
4649 itype
= signed_type_for (itype
);
4650 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
), fd
->loops
[i
].n1
);
4651 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
4655 /* Remove the GIMPLE_OMP_FOR statement. */
4656 gsi_remove (&gsi
, true);
4660 /* Code to control the increment goes in the CONT_BB. */
4661 gsi
= gsi_last_bb (cont_bb
);
4662 stmt
= gsi_stmt (gsi
);
4663 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4665 if (POINTER_TYPE_P (type
))
4666 t
= fold_build_pointer_plus (fd
->loop
.v
, step
);
4668 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.v
, step
);
4669 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
4671 if (fd
->collapse
> 1)
4673 i
= fd
->collapse
- 1;
4674 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
].v
)))
4676 t
= fold_convert (sizetype
, fd
->loops
[i
].step
);
4677 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, t
);
4681 t
= fold_convert (TREE_TYPE (fd
->loops
[i
].v
),
4683 t
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->loops
[i
].v
),
4686 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
4688 for (i
= fd
->collapse
- 1; i
> 0; i
--)
4690 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
4691 tree itype2
= TREE_TYPE (fd
->loops
[i
- 1].v
);
4692 if (POINTER_TYPE_P (itype2
))
4693 itype2
= signed_type_for (itype2
);
4694 t
= build3 (COND_EXPR
, itype2
,
4695 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4697 fold_convert (itype
, fd
->loops
[i
].n2
)),
4698 build_int_cst (itype2
, 0),
4699 fold_convert (itype2
, fd
->loops
[i
- 1].step
));
4700 if (POINTER_TYPE_P (TREE_TYPE (fd
->loops
[i
- 1].v
)))
4701 t
= fold_build_pointer_plus (fd
->loops
[i
- 1].v
, t
);
4703 t
= fold_build2 (PLUS_EXPR
, itype2
, fd
->loops
[i
- 1].v
, t
);
4704 expand_omp_build_assign (&gsi
, fd
->loops
[i
- 1].v
, t
);
4706 t
= build3 (COND_EXPR
, itype
,
4707 build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4709 fold_convert (itype
, fd
->loops
[i
].n2
)),
4711 fold_convert (itype
, fd
->loops
[i
].n1
));
4712 expand_omp_build_assign (&gsi
, fd
->loops
[i
].v
, t
);
4716 /* Remove GIMPLE_OMP_CONTINUE. */
4717 gsi_remove (&gsi
, true);
4720 /* Emit the condition in L1_BB. */
4721 gsi
= gsi_start_bb (l1_bb
);
4723 t
= fold_convert (type
, n2
);
4724 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4725 false, GSI_CONTINUE_LINKING
);
4726 tree v
= fd
->loop
.v
;
4727 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
4728 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
4729 false, GSI_CONTINUE_LINKING
);
4730 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v
, t
);
4731 cond_stmt
= gimple_build_cond_empty (t
);
4732 gsi_insert_after (&gsi
, cond_stmt
, GSI_CONTINUE_LINKING
);
4733 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt
), expand_omp_regimplify_p
,
4735 || walk_tree (gimple_cond_rhs_ptr (cond_stmt
), expand_omp_regimplify_p
,
4738 gsi
= gsi_for_stmt (cond_stmt
);
4739 gimple_regimplify_operands (cond_stmt
, &gsi
);
4742 /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */
4745 gsi
= gsi_start_bb (l2_bb
);
4746 step
= fold_build2 (MINUS_EXPR
, TREE_TYPE (step
), fd
->loop
.step
, step
);
4747 if (POINTER_TYPE_P (type
))
4748 t
= fold_build_pointer_plus (fd
->loop
.v
, step
);
4750 t
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.v
, step
);
4751 expand_omp_build_assign (&gsi
, fd
->loop
.v
, t
);
4754 /* Remove GIMPLE_OMP_RETURN. */
4755 gsi
= gsi_last_bb (exit_bb
);
4756 gsi_remove (&gsi
, true);
4758 /* Connect the new blocks. */
4759 remove_edge (FALLTHRU_EDGE (entry_bb
));
4763 remove_edge (BRANCH_EDGE (entry_bb
));
4764 make_edge (entry_bb
, l1_bb
, EDGE_FALLTHRU
);
4766 e
= BRANCH_EDGE (l1_bb
);
4767 ne
= FALLTHRU_EDGE (l1_bb
);
4768 e
->flags
= EDGE_TRUE_VALUE
;
4772 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4774 ne
= single_succ_edge (l1_bb
);
4775 e
= make_edge (l1_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4778 ne
->flags
= EDGE_FALSE_VALUE
;
4779 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4780 ne
->probability
= REG_BR_PROB_BASE
/ 8;
4782 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
, entry_bb
);
4783 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
, l1_bb
);
4787 cond_stmt
= gimple_build_cond (LT_EXPR
, simt_lane
, simt_maxlane
,
4788 NULL_TREE
, NULL_TREE
);
4789 gsi
= gsi_last_bb (entry_bb
);
4790 gsi_insert_after (&gsi
, cond_stmt
, GSI_NEW_STMT
);
4791 make_edge (entry_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4792 FALLTHRU_EDGE (entry_bb
)->flags
= EDGE_TRUE_VALUE
;
4793 FALLTHRU_EDGE (entry_bb
)->probability
= REG_BR_PROB_BASE
* 7 / 8;
4794 BRANCH_EDGE (entry_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4795 l2_dom_bb
= entry_bb
;
4797 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
, l2_dom_bb
);
4801 struct loop
*loop
= alloc_loop ();
4802 loop
->header
= l1_bb
;
4803 loop
->latch
= cont_bb
;
4804 add_loop (loop
, l1_bb
->loop_father
);
4805 loop
->safelen
= safelen_int
;
4808 loop
->simduid
= OMP_CLAUSE__SIMDUID__DECL (simduid
);
4809 cfun
->has_simduid_loops
= true;
4811 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
4813 if ((flag_tree_loop_vectorize
4814 || (!global_options_set
.x_flag_tree_loop_vectorize
4815 && !global_options_set
.x_flag_tree_vectorize
))
4816 && flag_tree_loop_optimize
4817 && loop
->safelen
> 1)
4819 loop
->force_vectorize
= true;
4820 cfun
->has_force_vectorize_loops
= true;
4824 cfun
->has_simduid_loops
= true;
4827 /* Taskloop construct is represented after gimplification with
4828 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
4829 in between them. This routine expands the outer GIMPLE_OMP_FOR,
4830 which should just compute all the needed loop temporaries
4831 for GIMPLE_OMP_TASK. */
4834 expand_omp_taskloop_for_outer (struct omp_region
*region
,
4835 struct omp_for_data
*fd
,
4838 tree type
, bias
= NULL_TREE
;
4839 basic_block entry_bb
, cont_bb
, exit_bb
;
4840 gimple_stmt_iterator gsi
;
4841 gassign
*assign_stmt
;
4842 tree
*counts
= NULL
;
4845 gcc_assert (inner_stmt
);
4846 gcc_assert (region
->cont
);
4847 gcc_assert (gimple_code (inner_stmt
) == GIMPLE_OMP_TASK
4848 && gimple_omp_task_taskloop_p (inner_stmt
));
4849 type
= TREE_TYPE (fd
->loop
.v
);
4851 /* See if we need to bias by LLONG_MIN. */
4852 if (fd
->iter_type
== long_long_unsigned_type_node
4853 && TREE_CODE (type
) == INTEGER_TYPE
4854 && !TYPE_UNSIGNED (type
))
4858 if (fd
->loop
.cond_code
== LT_EXPR
)
4861 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
4865 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
4868 if (TREE_CODE (n1
) != INTEGER_CST
4869 || TREE_CODE (n2
) != INTEGER_CST
4870 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
4871 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
4874 entry_bb
= region
->entry
;
4875 cont_bb
= region
->cont
;
4876 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4877 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4878 exit_bb
= region
->exit
;
4880 gsi
= gsi_last_bb (entry_bb
);
4881 gimple
*for_stmt
= gsi_stmt (gsi
);
4882 gcc_assert (gimple_code (for_stmt
) == GIMPLE_OMP_FOR
);
4883 if (fd
->collapse
> 1)
4885 int first_zero_iter
= -1, dummy
= -1;
4886 basic_block zero_iter_bb
= NULL
, dummy_bb
= NULL
, l2_dom_bb
= NULL
;
4888 counts
= XALLOCAVEC (tree
, fd
->collapse
);
4889 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
4890 zero_iter_bb
, first_zero_iter
,
4891 dummy_bb
, dummy
, l2_dom_bb
);
4895 /* Some counts[i] vars might be uninitialized if
4896 some loop has zero iterations. But the body shouldn't
4897 be executed in that case, so just avoid uninit warnings. */
4898 for (i
= first_zero_iter
; i
< fd
->collapse
; i
++)
4899 if (SSA_VAR_P (counts
[i
]))
4900 TREE_NO_WARNING (counts
[i
]) = 1;
4902 edge e
= split_block (entry_bb
, gsi_stmt (gsi
));
4904 make_edge (zero_iter_bb
, entry_bb
, EDGE_FALLTHRU
);
4905 gsi
= gsi_last_bb (entry_bb
);
4906 set_immediate_dominator (CDI_DOMINATORS
, entry_bb
,
4907 get_immediate_dominator (CDI_DOMINATORS
,
4915 if (POINTER_TYPE_P (TREE_TYPE (t0
))
4916 && TYPE_PRECISION (TREE_TYPE (t0
))
4917 != TYPE_PRECISION (fd
->iter_type
))
4919 /* Avoid casting pointers to integer of a different size. */
4920 tree itype
= signed_type_for (type
);
4921 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, t1
));
4922 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, t0
));
4926 t1
= fold_convert (fd
->iter_type
, t1
);
4927 t0
= fold_convert (fd
->iter_type
, t0
);
4931 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
4932 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
4935 tree innerc
= omp_find_clause (gimple_omp_task_clauses (inner_stmt
),
4936 OMP_CLAUSE__LOOPTEMP_
);
4937 gcc_assert (innerc
);
4938 tree startvar
= OMP_CLAUSE_DECL (innerc
);
4939 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
), OMP_CLAUSE__LOOPTEMP_
);
4940 gcc_assert (innerc
);
4941 tree endvar
= OMP_CLAUSE_DECL (innerc
);
4942 if (fd
->collapse
> 1 && TREE_CODE (fd
->loop
.n2
) != INTEGER_CST
)
4944 gcc_assert (innerc
);
4945 for (i
= 1; i
< fd
->collapse
; i
++)
4947 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
4948 OMP_CLAUSE__LOOPTEMP_
);
4949 gcc_assert (innerc
);
4951 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
4952 OMP_CLAUSE__LOOPTEMP_
);
4955 /* If needed (inner taskloop has lastprivate clause), propagate
4956 down the total number of iterations. */
4957 tree t
= force_gimple_operand_gsi (&gsi
, fd
->loop
.n2
, false,
4959 GSI_CONTINUE_LINKING
);
4960 assign_stmt
= gimple_build_assign (OMP_CLAUSE_DECL (innerc
), t
);
4961 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4965 t0
= force_gimple_operand_gsi (&gsi
, t0
, false, NULL_TREE
, false,
4966 GSI_CONTINUE_LINKING
);
4967 assign_stmt
= gimple_build_assign (startvar
, t0
);
4968 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4970 t1
= force_gimple_operand_gsi (&gsi
, t1
, false, NULL_TREE
, false,
4971 GSI_CONTINUE_LINKING
);
4972 assign_stmt
= gimple_build_assign (endvar
, t1
);
4973 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
4974 if (fd
->collapse
> 1)
4975 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
4977 /* Remove the GIMPLE_OMP_FOR statement. */
4978 gsi
= gsi_for_stmt (for_stmt
);
4979 gsi_remove (&gsi
, true);
4981 gsi
= gsi_last_bb (cont_bb
);
4982 gsi_remove (&gsi
, true);
4984 gsi
= gsi_last_bb (exit_bb
);
4985 gsi_remove (&gsi
, true);
4987 FALLTHRU_EDGE (entry_bb
)->probability
= REG_BR_PROB_BASE
;
4988 remove_edge (BRANCH_EDGE (entry_bb
));
4989 FALLTHRU_EDGE (cont_bb
)->probability
= REG_BR_PROB_BASE
;
4990 remove_edge (BRANCH_EDGE (cont_bb
));
4991 set_immediate_dominator (CDI_DOMINATORS
, exit_bb
, cont_bb
);
4992 set_immediate_dominator (CDI_DOMINATORS
, region
->entry
,
4993 recompute_dominator (CDI_DOMINATORS
, region
->entry
));
4996 /* Taskloop construct is represented after gimplification with
4997 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
4998 in between them. This routine expands the inner GIMPLE_OMP_FOR.
4999 GOMP_taskloop{,_ull} function arranges for each task to be given just
5000 a single range of iterations. */
5003 expand_omp_taskloop_for_inner (struct omp_region
*region
,
5004 struct omp_for_data
*fd
,
5007 tree e
, t
, type
, itype
, vmain
, vback
, bias
= NULL_TREE
;
5008 basic_block entry_bb
, exit_bb
, body_bb
, cont_bb
, collapse_bb
= NULL
;
5010 gimple_stmt_iterator gsi
;
5012 bool broken_loop
= region
->cont
== NULL
;
5013 tree
*counts
= NULL
;
5016 itype
= type
= TREE_TYPE (fd
->loop
.v
);
5017 if (POINTER_TYPE_P (type
))
5018 itype
= signed_type_for (type
);
5020 /* See if we need to bias by LLONG_MIN. */
5021 if (fd
->iter_type
== long_long_unsigned_type_node
5022 && TREE_CODE (type
) == INTEGER_TYPE
5023 && !TYPE_UNSIGNED (type
))
5027 if (fd
->loop
.cond_code
== LT_EXPR
)
5030 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
5034 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
5037 if (TREE_CODE (n1
) != INTEGER_CST
5038 || TREE_CODE (n2
) != INTEGER_CST
5039 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
5040 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
5043 entry_bb
= region
->entry
;
5044 cont_bb
= region
->cont
;
5045 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
5046 fin_bb
= BRANCH_EDGE (entry_bb
)->dest
;
5047 gcc_assert (broken_loop
5048 || (fin_bb
== FALLTHRU_EDGE (cont_bb
)->dest
));
5049 body_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
5052 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
5053 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
5055 exit_bb
= region
->exit
;
5057 /* Iteration space partitioning goes in ENTRY_BB. */
5058 gsi
= gsi_last_bb (entry_bb
);
5059 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
5061 if (fd
->collapse
> 1)
5063 int first_zero_iter
= -1, dummy
= -1;
5064 basic_block l2_dom_bb
= NULL
, dummy_bb
= NULL
;
5066 counts
= XALLOCAVEC (tree
, fd
->collapse
);
5067 expand_omp_for_init_counts (fd
, &gsi
, entry_bb
, counts
,
5068 fin_bb
, first_zero_iter
,
5069 dummy_bb
, dummy
, l2_dom_bb
);
5073 t
= integer_one_node
;
5075 step
= fd
->loop
.step
;
5076 tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd
->for_stmt
),
5077 OMP_CLAUSE__LOOPTEMP_
);
5078 gcc_assert (innerc
);
5079 n1
= OMP_CLAUSE_DECL (innerc
);
5080 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
), OMP_CLAUSE__LOOPTEMP_
);
5081 gcc_assert (innerc
);
5082 n2
= OMP_CLAUSE_DECL (innerc
);
5085 n1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, n1
, bias
);
5086 n2
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, n2
, bias
);
5088 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
5089 true, NULL_TREE
, true, GSI_SAME_STMT
);
5090 n2
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, n2
),
5091 true, NULL_TREE
, true, GSI_SAME_STMT
);
5092 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
5093 true, NULL_TREE
, true, GSI_SAME_STMT
);
5095 tree startvar
= fd
->loop
.v
;
5096 tree endvar
= NULL_TREE
;
5098 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5100 tree clauses
= gimple_omp_for_clauses (inner_stmt
);
5101 tree innerc
= omp_find_clause (clauses
, OMP_CLAUSE__LOOPTEMP_
);
5102 gcc_assert (innerc
);
5103 startvar
= OMP_CLAUSE_DECL (innerc
);
5104 innerc
= omp_find_clause (OMP_CLAUSE_CHAIN (innerc
),
5105 OMP_CLAUSE__LOOPTEMP_
);
5106 gcc_assert (innerc
);
5107 endvar
= OMP_CLAUSE_DECL (innerc
);
5109 t
= fold_convert (TREE_TYPE (startvar
), n1
);
5110 t
= force_gimple_operand_gsi (&gsi
, t
,
5112 && TREE_ADDRESSABLE (startvar
),
5113 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
5114 gimple
*assign_stmt
= gimple_build_assign (startvar
, t
);
5115 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
5117 t
= fold_convert (TREE_TYPE (startvar
), n2
);
5118 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
5119 false, GSI_CONTINUE_LINKING
);
5122 assign_stmt
= gimple_build_assign (endvar
, e
);
5123 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
5124 if (useless_type_conversion_p (TREE_TYPE (fd
->loop
.v
), TREE_TYPE (e
)))
5125 assign_stmt
= gimple_build_assign (fd
->loop
.v
, e
);
5127 assign_stmt
= gimple_build_assign (fd
->loop
.v
, NOP_EXPR
, e
);
5128 gsi_insert_after (&gsi
, assign_stmt
, GSI_CONTINUE_LINKING
);
5130 if (fd
->collapse
> 1)
5131 expand_omp_for_init_vars (fd
, &gsi
, counts
, inner_stmt
, startvar
);
5135 /* The code controlling the sequential loop replaces the
5136 GIMPLE_OMP_CONTINUE. */
5137 gsi
= gsi_last_bb (cont_bb
);
5138 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
5139 gcc_assert (gimple_code (cont_stmt
) == GIMPLE_OMP_CONTINUE
);
5140 vmain
= gimple_omp_continue_control_use (cont_stmt
);
5141 vback
= gimple_omp_continue_control_def (cont_stmt
);
5143 if (!gimple_omp_for_combined_p (fd
->for_stmt
))
5145 if (POINTER_TYPE_P (type
))
5146 t
= fold_build_pointer_plus (vmain
, step
);
5148 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, step
);
5149 t
= force_gimple_operand_gsi (&gsi
, t
,
5151 && TREE_ADDRESSABLE (vback
),
5152 NULL_TREE
, true, GSI_SAME_STMT
);
5153 assign_stmt
= gimple_build_assign (vback
, t
);
5154 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
5156 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
5157 DECL_P (vback
) && TREE_ADDRESSABLE (vback
)
5159 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
5162 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5163 gsi_remove (&gsi
, true);
5165 if (fd
->collapse
> 1 && !gimple_omp_for_combined_p (fd
->for_stmt
))
5166 collapse_bb
= extract_omp_for_update_vars (fd
, cont_bb
, body_bb
);
5169 /* Remove the GIMPLE_OMP_FOR statement. */
5170 gsi
= gsi_for_stmt (fd
->for_stmt
);
5171 gsi_remove (&gsi
, true);
5173 /* Remove the GIMPLE_OMP_RETURN statement. */
5174 gsi
= gsi_last_bb (exit_bb
);
5175 gsi_remove (&gsi
, true);
5177 FALLTHRU_EDGE (entry_bb
)->probability
= REG_BR_PROB_BASE
;
5179 remove_edge (BRANCH_EDGE (entry_bb
));
5182 remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb
));
5183 region
->outer
->cont
= NULL
;
5186 /* Connect all the blocks. */
5189 ep
= find_edge (cont_bb
, body_bb
);
5190 if (gimple_omp_for_combined_p (fd
->for_stmt
))
5195 else if (fd
->collapse
> 1)
5198 ep
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
5201 ep
->flags
= EDGE_TRUE_VALUE
;
5202 find_edge (cont_bb
, fin_bb
)->flags
5203 = ep
? EDGE_FALSE_VALUE
: EDGE_FALLTHRU
;
5206 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
5207 recompute_dominator (CDI_DOMINATORS
, body_bb
));
5209 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
5210 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
5212 if (!broken_loop
&& !gimple_omp_for_combined_p (fd
->for_stmt
))
5214 struct loop
*loop
= alloc_loop ();
5215 loop
->header
= body_bb
;
5216 if (collapse_bb
== NULL
)
5217 loop
->latch
= cont_bb
;
5218 add_loop (loop
, body_bb
->loop_father
);
5222 /* A subroutine of expand_omp_for. Generate code for an OpenACC
5223 partitioned loop. The lowering here is abstracted, in that the
5224 loop parameters are passed through internal functions, which are
5225 further lowered by oacc_device_lower, once we get to the target
5226 compiler. The loop is of the form:
5228 for (V = B; V LTGT E; V += S) {BODY}
5230 where LTGT is < or >. We may have a specified chunking size, CHUNKING
5231 (constant 0 for no chunking) and we will have a GWV partitioning
5232 mask, specifying dimensions over which the loop is to be
5233 partitioned (see note below). We generate code that looks like:
5235 <entry_bb> [incoming FALL->body, BRANCH->exit]
5236 typedef signedintify (typeof (V)) T; // underlying signed integral type
5239 T DIR = LTGT == '<' ? +1 : -1;
5240 T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
5241 T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
5243 <head_bb> [created by splitting end of entry_bb]
5244 T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
5245 T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
5246 if (!(offset LTGT bound)) goto bottom_bb;
5248 <body_bb> [incoming]
5252 <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
5254 if (offset LTGT bound) goto body_bb; [*]
5256 <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
5258 if (chunk < chunk_max) goto head_bb;
5260 <exit_bb> [incoming]
5261 V = B + ((range -/+ 1) / S +/- 1) * S [*]
5263 [*] Needed if V live at end of loop
5265 Note: CHUNKING & GWV mask are specified explicitly here. This is a
5266 transition, and will be specified by a more general mechanism shortly.
5270 expand_oacc_for (struct omp_region
*region
, struct omp_for_data
*fd
)
5272 tree v
= fd
->loop
.v
;
5273 enum tree_code cond_code
= fd
->loop
.cond_code
;
5274 enum tree_code plus_code
= PLUS_EXPR
;
5276 tree chunk_size
= integer_minus_one_node
;
5277 tree gwv
= integer_zero_node
;
5278 tree iter_type
= TREE_TYPE (v
);
5279 tree diff_type
= iter_type
;
5280 tree plus_type
= iter_type
;
5281 struct oacc_collapse
*counts
= NULL
;
5283 gcc_checking_assert (gimple_omp_for_kind (fd
->for_stmt
)
5284 == GF_OMP_FOR_KIND_OACC_LOOP
);
5285 gcc_assert (!gimple_omp_for_combined_into_p (fd
->for_stmt
));
5286 gcc_assert (cond_code
== LT_EXPR
|| cond_code
== GT_EXPR
);
5288 if (POINTER_TYPE_P (iter_type
))
5290 plus_code
= POINTER_PLUS_EXPR
;
5291 plus_type
= sizetype
;
5293 if (POINTER_TYPE_P (diff_type
) || TYPE_UNSIGNED (diff_type
))
5294 diff_type
= signed_type_for (diff_type
);
5296 basic_block entry_bb
= region
->entry
; /* BB ending in OMP_FOR */
5297 basic_block exit_bb
= region
->exit
; /* BB ending in OMP_RETURN */
5298 basic_block cont_bb
= region
->cont
; /* BB ending in OMP_CONTINUE */
5299 basic_block bottom_bb
= NULL
;
5301 /* entry_bb has two sucessors; the branch edge is to the exit
5302 block, fallthrough edge to body. */
5303 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2
5304 && BRANCH_EDGE (entry_bb
)->dest
== exit_bb
);
5306 /* If cont_bb non-NULL, it has 2 successors. The branch successor is
5307 body_bb, or to a block whose only successor is the body_bb. Its
5308 fallthrough successor is the final block (same as the branch
5309 successor of the entry_bb). */
5312 basic_block body_bb
= FALLTHRU_EDGE (entry_bb
)->dest
;
5313 basic_block bed
= BRANCH_EDGE (cont_bb
)->dest
;
5315 gcc_assert (FALLTHRU_EDGE (cont_bb
)->dest
== exit_bb
);
5316 gcc_assert (bed
== body_bb
|| single_succ_edge (bed
)->dest
== body_bb
);
5319 gcc_assert (!gimple_in_ssa_p (cfun
));
5321 /* The exit block only has entry_bb and cont_bb as predecessors. */
5322 gcc_assert (EDGE_COUNT (exit_bb
->preds
) == 1 + (cont_bb
!= NULL
));
5325 tree chunk_max
= NULL_TREE
;
5327 tree step
= create_tmp_var (diff_type
, ".step");
5328 bool up
= cond_code
== LT_EXPR
;
5329 tree dir
= build_int_cst (diff_type
, up
? +1 : -1);
5330 bool chunking
= !gimple_in_ssa_p (cfun
);;
5333 /* SSA instances. */
5334 tree offset_incr
= NULL_TREE
;
5335 tree offset_init
= NULL_TREE
;
5337 gimple_stmt_iterator gsi
;
5343 edge split
, be
, fte
;
5345 /* Split the end of entry_bb to create head_bb. */
5346 split
= split_block (entry_bb
, last_stmt (entry_bb
));
5347 basic_block head_bb
= split
->dest
;
5348 entry_bb
= split
->src
;
5350 /* Chunk setup goes at end of entry_bb, replacing the omp_for. */
5351 gsi
= gsi_last_bb (entry_bb
);
5352 gomp_for
*for_stmt
= as_a
<gomp_for
*> (gsi_stmt (gsi
));
5353 loc
= gimple_location (for_stmt
);
5355 if (gimple_in_ssa_p (cfun
))
5357 offset_init
= gimple_omp_for_index (for_stmt
, 0);
5358 gcc_assert (integer_zerop (fd
->loop
.n1
));
5359 /* The SSA parallelizer does gang parallelism. */
5360 gwv
= build_int_cst (integer_type_node
, GOMP_DIM_MASK (GOMP_DIM_GANG
));
5363 if (fd
->collapse
> 1)
5365 counts
= XALLOCAVEC (struct oacc_collapse
, fd
->collapse
);
5366 tree total
= expand_oacc_collapse_init (fd
, &gsi
, counts
,
5367 TREE_TYPE (fd
->loop
.n2
));
5369 if (SSA_VAR_P (fd
->loop
.n2
))
5371 total
= force_gimple_operand_gsi (&gsi
, total
, false, NULL_TREE
,
5372 true, GSI_SAME_STMT
);
5373 ass
= gimple_build_assign (fd
->loop
.n2
, total
);
5374 gsi_insert_before (&gsi
, ass
, GSI_SAME_STMT
);
5379 tree b
= fd
->loop
.n1
;
5380 tree e
= fd
->loop
.n2
;
5381 tree s
= fd
->loop
.step
;
5383 b
= force_gimple_operand_gsi (&gsi
, b
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5384 e
= force_gimple_operand_gsi (&gsi
, e
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5386 /* Convert the step, avoiding possible unsigned->signed overflow. */
5387 negating
= !up
&& TYPE_UNSIGNED (TREE_TYPE (s
));
5389 s
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (s
), s
);
5390 s
= fold_convert (diff_type
, s
);
5392 s
= fold_build1 (NEGATE_EXPR
, diff_type
, s
);
5393 s
= force_gimple_operand_gsi (&gsi
, s
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5396 chunk_size
= integer_zero_node
;
5397 expr
= fold_convert (diff_type
, chunk_size
);
5398 chunk_size
= force_gimple_operand_gsi (&gsi
, expr
, true,
5399 NULL_TREE
, true, GSI_SAME_STMT
);
5400 /* Determine the range, avoiding possible unsigned->signed overflow. */
5401 negating
= !up
&& TYPE_UNSIGNED (iter_type
);
5402 expr
= fold_build2 (MINUS_EXPR
, plus_type
,
5403 fold_convert (plus_type
, negating
? b
: e
),
5404 fold_convert (plus_type
, negating
? e
: b
));
5405 expr
= fold_convert (diff_type
, expr
);
5407 expr
= fold_build1 (NEGATE_EXPR
, diff_type
, expr
);
5408 tree range
= force_gimple_operand_gsi (&gsi
, expr
, true,
5409 NULL_TREE
, true, GSI_SAME_STMT
);
5411 chunk_no
= build_int_cst (diff_type
, 0);
5414 gcc_assert (!gimple_in_ssa_p (cfun
));
5417 chunk_max
= create_tmp_var (diff_type
, ".chunk_max");
5418 chunk_no
= create_tmp_var (diff_type
, ".chunk_no");
5420 ass
= gimple_build_assign (chunk_no
, expr
);
5421 gsi_insert_before (&gsi
, ass
, GSI_SAME_STMT
);
5423 call
= gimple_build_call_internal (IFN_GOACC_LOOP
, 6,
5424 build_int_cst (integer_type_node
,
5425 IFN_GOACC_LOOP_CHUNKS
),
5426 dir
, range
, s
, chunk_size
, gwv
);
5427 gimple_call_set_lhs (call
, chunk_max
);
5428 gimple_set_location (call
, loc
);
5429 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
5432 chunk_size
= chunk_no
;
5434 call
= gimple_build_call_internal (IFN_GOACC_LOOP
, 6,
5435 build_int_cst (integer_type_node
,
5436 IFN_GOACC_LOOP_STEP
),
5437 dir
, range
, s
, chunk_size
, gwv
);
5438 gimple_call_set_lhs (call
, step
);
5439 gimple_set_location (call
, loc
);
5440 gsi_insert_before (&gsi
, call
, GSI_SAME_STMT
);
5442 /* Remove the GIMPLE_OMP_FOR. */
5443 gsi_remove (&gsi
, true);
5445 /* Fixup edges from head_bb. */
5446 be
= BRANCH_EDGE (head_bb
);
5447 fte
= FALLTHRU_EDGE (head_bb
);
5448 be
->flags
|= EDGE_FALSE_VALUE
;
5449 fte
->flags
^= EDGE_FALLTHRU
| EDGE_TRUE_VALUE
;
5451 basic_block body_bb
= fte
->dest
;
5453 if (gimple_in_ssa_p (cfun
))
5455 gsi
= gsi_last_bb (cont_bb
);
5456 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
5458 offset
= gimple_omp_continue_control_use (cont_stmt
);
5459 offset_incr
= gimple_omp_continue_control_def (cont_stmt
);
5463 offset
= create_tmp_var (diff_type
, ".offset");
5464 offset_init
= offset_incr
= offset
;
5466 bound
= create_tmp_var (TREE_TYPE (offset
), ".bound");
5468 /* Loop offset & bound go into head_bb. */
5469 gsi
= gsi_start_bb (head_bb
);
5471 call
= gimple_build_call_internal (IFN_GOACC_LOOP
, 7,
5472 build_int_cst (integer_type_node
,
5473 IFN_GOACC_LOOP_OFFSET
),
5475 chunk_size
, gwv
, chunk_no
);
5476 gimple_call_set_lhs (call
, offset_init
);
5477 gimple_set_location (call
, loc
);
5478 gsi_insert_after (&gsi
, call
, GSI_CONTINUE_LINKING
);
5480 call
= gimple_build_call_internal (IFN_GOACC_LOOP
, 7,
5481 build_int_cst (integer_type_node
,
5482 IFN_GOACC_LOOP_BOUND
),
5484 chunk_size
, gwv
, offset_init
);
5485 gimple_call_set_lhs (call
, bound
);
5486 gimple_set_location (call
, loc
);
5487 gsi_insert_after (&gsi
, call
, GSI_CONTINUE_LINKING
);
5489 expr
= build2 (cond_code
, boolean_type_node
, offset_init
, bound
);
5490 gsi_insert_after (&gsi
, gimple_build_cond_empty (expr
),
5491 GSI_CONTINUE_LINKING
);
5493 /* V assignment goes into body_bb. */
5494 if (!gimple_in_ssa_p (cfun
))
5496 gsi
= gsi_start_bb (body_bb
);
5498 expr
= build2 (plus_code
, iter_type
, b
,
5499 fold_convert (plus_type
, offset
));
5500 expr
= force_gimple_operand_gsi (&gsi
, expr
, false, NULL_TREE
,
5501 true, GSI_SAME_STMT
);
5502 ass
= gimple_build_assign (v
, expr
);
5503 gsi_insert_before (&gsi
, ass
, GSI_SAME_STMT
);
5504 if (fd
->collapse
> 1)
5505 expand_oacc_collapse_vars (fd
, &gsi
, counts
, v
);
5508 /* Loop increment goes into cont_bb. If this is not a loop, we
5509 will have spawned threads as if it was, and each one will
5510 execute one iteration. The specification is not explicit about
5511 whether such constructs are ill-formed or not, and they can
5512 occur, especially when noreturn routines are involved. */
5515 gsi
= gsi_last_bb (cont_bb
);
5516 gomp_continue
*cont_stmt
= as_a
<gomp_continue
*> (gsi_stmt (gsi
));
5517 loc
= gimple_location (cont_stmt
);
5519 /* Increment offset. */
5520 if (gimple_in_ssa_p (cfun
))
5521 expr
= build2 (plus_code
, iter_type
, offset
,
5522 fold_convert (plus_type
, step
));
5524 expr
= build2 (PLUS_EXPR
, diff_type
, offset
, step
);
5525 expr
= force_gimple_operand_gsi (&gsi
, expr
, false, NULL_TREE
,
5526 true, GSI_SAME_STMT
);
5527 ass
= gimple_build_assign (offset_incr
, expr
);
5528 gsi_insert_before (&gsi
, ass
, GSI_SAME_STMT
);
5529 expr
= build2 (cond_code
, boolean_type_node
, offset_incr
, bound
);
5530 gsi_insert_before (&gsi
, gimple_build_cond_empty (expr
), GSI_SAME_STMT
);
5532 /* Remove the GIMPLE_OMP_CONTINUE. */
5533 gsi_remove (&gsi
, true);
5535 /* Fixup edges from cont_bb. */
5536 be
= BRANCH_EDGE (cont_bb
);
5537 fte
= FALLTHRU_EDGE (cont_bb
);
5538 be
->flags
|= EDGE_TRUE_VALUE
;
5539 fte
->flags
^= EDGE_FALLTHRU
| EDGE_FALSE_VALUE
;
5543 /* Split the beginning of exit_bb to make bottom_bb. We
5544 need to insert a nop at the start, because splitting is
5545 after a stmt, not before. */
5546 gsi
= gsi_start_bb (exit_bb
);
5547 stmt
= gimple_build_nop ();
5548 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
5549 split
= split_block (exit_bb
, stmt
);
5550 bottom_bb
= split
->src
;
5551 exit_bb
= split
->dest
;
5552 gsi
= gsi_last_bb (bottom_bb
);
5554 /* Chunk increment and test goes into bottom_bb. */
5555 expr
= build2 (PLUS_EXPR
, diff_type
, chunk_no
,
5556 build_int_cst (diff_type
, 1));
5557 ass
= gimple_build_assign (chunk_no
, expr
);
5558 gsi_insert_after (&gsi
, ass
, GSI_CONTINUE_LINKING
);
5560 /* Chunk test at end of bottom_bb. */
5561 expr
= build2 (LT_EXPR
, boolean_type_node
, chunk_no
, chunk_max
);
5562 gsi_insert_after (&gsi
, gimple_build_cond_empty (expr
),
5563 GSI_CONTINUE_LINKING
);
5565 /* Fixup edges from bottom_bb. */
5566 split
->flags
^= EDGE_FALLTHRU
| EDGE_FALSE_VALUE
;
5567 make_edge (bottom_bb
, head_bb
, EDGE_TRUE_VALUE
);
5571 gsi
= gsi_last_bb (exit_bb
);
5572 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
5573 loc
= gimple_location (gsi_stmt (gsi
));
5575 if (!gimple_in_ssa_p (cfun
))
5577 /* Insert the final value of V, in case it is live. This is the
5578 value for the only thread that survives past the join. */
5579 expr
= fold_build2 (MINUS_EXPR
, diff_type
, range
, dir
);
5580 expr
= fold_build2 (PLUS_EXPR
, diff_type
, expr
, s
);
5581 expr
= fold_build2 (TRUNC_DIV_EXPR
, diff_type
, expr
, s
);
5582 expr
= fold_build2 (MULT_EXPR
, diff_type
, expr
, s
);
5583 expr
= build2 (plus_code
, iter_type
, b
, fold_convert (plus_type
, expr
));
5584 expr
= force_gimple_operand_gsi (&gsi
, expr
, false, NULL_TREE
,
5585 true, GSI_SAME_STMT
);
5586 ass
= gimple_build_assign (v
, expr
);
5587 gsi_insert_before (&gsi
, ass
, GSI_SAME_STMT
);
5590 /* Remove the OMP_RETURN. */
5591 gsi_remove (&gsi
, true);
5595 /* We now have one or two nested loops. Update the loop
5597 struct loop
*parent
= entry_bb
->loop_father
;
5598 struct loop
*body
= body_bb
->loop_father
;
5602 struct loop
*chunk_loop
= alloc_loop ();
5603 chunk_loop
->header
= head_bb
;
5604 chunk_loop
->latch
= bottom_bb
;
5605 add_loop (chunk_loop
, parent
);
5606 parent
= chunk_loop
;
5608 else if (parent
!= body
)
5610 gcc_assert (body
->header
== body_bb
);
5611 gcc_assert (body
->latch
== cont_bb
5612 || single_pred (body
->latch
) == cont_bb
);
5618 struct loop
*body_loop
= alloc_loop ();
5619 body_loop
->header
= body_bb
;
5620 body_loop
->latch
= cont_bb
;
5621 add_loop (body_loop
, parent
);
5626 /* Expand the OMP loop defined by REGION. */
5629 expand_omp_for (struct omp_region
*region
, gimple
*inner_stmt
)
5631 struct omp_for_data fd
;
5632 struct omp_for_data_loop
*loops
;
5635 = (struct omp_for_data_loop
*)
5636 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
5637 * sizeof (struct omp_for_data_loop
));
5638 omp_extract_for_data (as_a
<gomp_for
*> (last_stmt (region
->entry
)),
5640 region
->sched_kind
= fd
.sched_kind
;
5641 region
->sched_modifiers
= fd
.sched_modifiers
;
5643 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
5644 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
5645 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
5648 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
5649 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
5650 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
5653 /* If there isn't a continue then this is a degerate case where
5654 the introduction of abnormal edges during lowering will prevent
5655 original loops from being detected. Fix that up. */
5656 loops_state_set (LOOPS_NEED_FIXUP
);
5658 if (gimple_omp_for_kind (fd
.for_stmt
) & GF_OMP_FOR_SIMD
)
5659 expand_omp_simd (region
, &fd
);
5660 else if (gimple_omp_for_kind (fd
.for_stmt
) == GF_OMP_FOR_KIND_CILKFOR
)
5661 expand_cilk_for (region
, &fd
);
5662 else if (gimple_omp_for_kind (fd
.for_stmt
) == GF_OMP_FOR_KIND_OACC_LOOP
)
5664 gcc_assert (!inner_stmt
);
5665 expand_oacc_for (region
, &fd
);
5667 else if (gimple_omp_for_kind (fd
.for_stmt
) == GF_OMP_FOR_KIND_TASKLOOP
)
5669 if (gimple_omp_for_combined_into_p (fd
.for_stmt
))
5670 expand_omp_taskloop_for_inner (region
, &fd
, inner_stmt
);
5672 expand_omp_taskloop_for_outer (region
, &fd
, inner_stmt
);
5674 else if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
5675 && !fd
.have_ordered
)
5677 if (fd
.chunk_size
== NULL
)
5678 expand_omp_for_static_nochunk (region
, &fd
, inner_stmt
);
5680 expand_omp_for_static_chunk (region
, &fd
, inner_stmt
);
5684 int fn_index
, start_ix
, next_ix
;
5686 gcc_assert (gimple_omp_for_kind (fd
.for_stmt
)
5687 == GF_OMP_FOR_KIND_FOR
);
5688 if (fd
.chunk_size
== NULL
5689 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
5690 fd
.chunk_size
= integer_zero_node
;
5691 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
5692 switch (fd
.sched_kind
)
5694 case OMP_CLAUSE_SCHEDULE_RUNTIME
:
5697 case OMP_CLAUSE_SCHEDULE_DYNAMIC
:
5698 case OMP_CLAUSE_SCHEDULE_GUIDED
:
5699 if ((fd
.sched_modifiers
& OMP_CLAUSE_SCHEDULE_NONMONOTONIC
)
5701 && !fd
.have_ordered
)
5703 fn_index
= 3 + fd
.sched_kind
;
5708 fn_index
= fd
.sched_kind
;
5712 fn_index
+= fd
.have_ordered
* 6;
5714 start_ix
= ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START
) + fn_index
;
5716 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
5717 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
5718 if (fd
.iter_type
== long_long_unsigned_type_node
)
5720 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5721 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
5722 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5723 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
5725 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
5726 (enum built_in_function
) next_ix
, inner_stmt
);
5729 if (gimple_in_ssa_p (cfun
))
5730 update_ssa (TODO_update_ssa_only_virtuals
);
5733 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5735 v = GOMP_sections_start (n);
5752 v = GOMP_sections_next ();
5757 If this is a combined parallel sections, replace the call to
5758 GOMP_sections_start with call to GOMP_sections_next. */
5761 expand_omp_sections (struct omp_region
*region
)
5763 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
5765 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
5766 gimple_stmt_iterator si
, switch_si
;
5767 gomp_sections
*sections_stmt
;
5769 gomp_continue
*cont
;
5772 struct omp_region
*inner
;
5774 bool exit_reachable
= region
->cont
!= NULL
;
5776 gcc_assert (region
->exit
!= NULL
);
5777 entry_bb
= region
->entry
;
5778 l0_bb
= single_succ (entry_bb
);
5779 l1_bb
= region
->cont
;
5780 l2_bb
= region
->exit
;
5781 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
5782 l2
= gimple_block_label (l2_bb
);
5785 /* This can happen if there are reductions. */
5786 len
= EDGE_COUNT (l0_bb
->succs
);
5787 gcc_assert (len
> 0);
5788 e
= EDGE_SUCC (l0_bb
, len
- 1);
5789 si
= gsi_last_bb (e
->dest
);
5792 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
5793 l2
= gimple_block_label (e
->dest
);
5795 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
5797 si
= gsi_last_bb (e
->dest
);
5799 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
5801 l2
= gimple_block_label (e
->dest
);
5807 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
5809 default_bb
= create_empty_bb (l0_bb
);
5811 /* We will build a switch() with enough cases for all the
5812 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5813 and a default case to abort if something goes wrong. */
5814 len
= EDGE_COUNT (l0_bb
->succs
);
5816 /* Use vec::quick_push on label_vec throughout, since we know the size
5818 auto_vec
<tree
> label_vec (len
);
5820 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5821 GIMPLE_OMP_SECTIONS statement. */
5822 si
= gsi_last_bb (entry_bb
);
5823 sections_stmt
= as_a
<gomp_sections
*> (gsi_stmt (si
));
5824 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
5825 vin
= gimple_omp_sections_control (sections_stmt
);
5826 if (!is_combined_parallel (region
))
5828 /* If we are not inside a combined parallel+sections region,
5829 call GOMP_sections_start. */
5830 t
= build_int_cst (unsigned_type_node
, len
- 1);
5831 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
5832 stmt
= gimple_build_call (u
, 1, t
);
5836 /* Otherwise, call GOMP_sections_next. */
5837 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
5838 stmt
= gimple_build_call (u
, 0);
5840 gimple_call_set_lhs (stmt
, vin
);
5841 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5842 gsi_remove (&si
, true);
5844 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5846 switch_si
= gsi_last_bb (l0_bb
);
5847 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
5850 cont
= as_a
<gomp_continue
*> (last_stmt (l1_bb
));
5851 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
5852 vmain
= gimple_omp_continue_control_use (cont
);
5853 vnext
= gimple_omp_continue_control_def (cont
);
5861 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
5862 label_vec
.quick_push (t
);
5865 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5866 for (inner
= region
->inner
, casei
= 1;
5868 inner
= inner
->next
, i
++, casei
++)
5870 basic_block s_entry_bb
, s_exit_bb
;
5872 /* Skip optional reduction region. */
5873 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
5880 s_entry_bb
= inner
->entry
;
5881 s_exit_bb
= inner
->exit
;
5883 t
= gimple_block_label (s_entry_bb
);
5884 u
= build_int_cst (unsigned_type_node
, casei
);
5885 u
= build_case_label (u
, NULL
, t
);
5886 label_vec
.quick_push (u
);
5888 si
= gsi_last_bb (s_entry_bb
);
5889 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
5890 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
5891 gsi_remove (&si
, true);
5892 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
5894 if (s_exit_bb
== NULL
)
5897 si
= gsi_last_bb (s_exit_bb
);
5898 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
5899 gsi_remove (&si
, true);
5901 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
5904 /* Error handling code goes in DEFAULT_BB. */
5905 t
= gimple_block_label (default_bb
);
5906 u
= build_case_label (NULL
, NULL
, t
);
5907 make_edge (l0_bb
, default_bb
, 0);
5908 add_bb_to_loop (default_bb
, current_loops
->tree_root
);
5910 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
5911 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
5912 gsi_remove (&switch_si
, true);
5914 si
= gsi_start_bb (default_bb
);
5915 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
5916 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
5922 /* Code to get the next section goes in L1_BB. */
5923 si
= gsi_last_bb (l1_bb
);
5924 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
5926 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
5927 stmt
= gimple_build_call (bfn_decl
, 0);
5928 gimple_call_set_lhs (stmt
, vnext
);
5929 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5930 gsi_remove (&si
, true);
5932 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
5935 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5936 si
= gsi_last_bb (l2_bb
);
5937 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
5938 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
5939 else if (gimple_omp_return_lhs (gsi_stmt (si
)))
5940 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL
);
5942 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
5943 stmt
= gimple_build_call (t
, 0);
5944 if (gimple_omp_return_lhs (gsi_stmt (si
)))
5945 gimple_call_set_lhs (stmt
, gimple_omp_return_lhs (gsi_stmt (si
)));
5946 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
5947 gsi_remove (&si
, true);
5949 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
5952 /* Expand code for an OpenMP single directive. We've already expanded
5953 much of the code, here we simply place the GOMP_barrier call. */
5956 expand_omp_single (struct omp_region
*region
)
5958 basic_block entry_bb
, exit_bb
;
5959 gimple_stmt_iterator si
;
5961 entry_bb
= region
->entry
;
5962 exit_bb
= region
->exit
;
5964 si
= gsi_last_bb (entry_bb
);
5965 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
5966 gsi_remove (&si
, true);
5967 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
5969 si
= gsi_last_bb (exit_bb
);
5970 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
5972 tree t
= gimple_omp_return_lhs (gsi_stmt (si
));
5973 gsi_insert_after (&si
, omp_build_barrier (t
), GSI_SAME_STMT
);
5975 gsi_remove (&si
, true);
5976 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
5979 /* Generic expansion for OpenMP synchronization directives: master,
5980 ordered and critical. All we need to do here is remove the entry
5981 and exit markers for REGION. */
5984 expand_omp_synch (struct omp_region
*region
)
5986 basic_block entry_bb
, exit_bb
;
5987 gimple_stmt_iterator si
;
5989 entry_bb
= region
->entry
;
5990 exit_bb
= region
->exit
;
5992 si
= gsi_last_bb (entry_bb
);
5993 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
5994 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
5995 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TASKGROUP
5996 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
5997 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
5998 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_TEAMS
);
5999 gsi_remove (&si
, true);
6000 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
6004 si
= gsi_last_bb (exit_bb
);
6005 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
6006 gsi_remove (&si
, true);
6007 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
6011 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6012 operation as a normal volatile load. */
6015 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
6016 tree loaded_val
, int index
)
6018 enum built_in_function tmpbase
;
6019 gimple_stmt_iterator gsi
;
6020 basic_block store_bb
;
6023 tree decl
, call
, type
, itype
;
6025 gsi
= gsi_last_bb (load_bb
);
6026 stmt
= gsi_stmt (gsi
);
6027 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
6028 loc
= gimple_location (stmt
);
6030 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6031 is smaller than word size, then expand_atomic_load assumes that the load
6032 is atomic. We could avoid the builtin entirely in this case. */
6034 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
6035 decl
= builtin_decl_explicit (tmpbase
);
6036 if (decl
== NULL_TREE
)
6039 type
= TREE_TYPE (loaded_val
);
6040 itype
= TREE_TYPE (TREE_TYPE (decl
));
6042 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
6043 build_int_cst (NULL
,
6044 gimple_omp_atomic_seq_cst_p (stmt
)
6046 : MEMMODEL_RELAXED
));
6047 if (!useless_type_conversion_p (type
, itype
))
6048 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
6049 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
6051 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6052 gsi_remove (&gsi
, true);
6054 store_bb
= single_succ (load_bb
);
6055 gsi
= gsi_last_bb (store_bb
);
6056 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
6057 gsi_remove (&gsi
, true);
6059 if (gimple_in_ssa_p (cfun
))
6060 update_ssa (TODO_update_ssa_no_phi
);
6065 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6066 operation as a normal volatile store. */
6069 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
6070 tree loaded_val
, tree stored_val
, int index
)
6072 enum built_in_function tmpbase
;
6073 gimple_stmt_iterator gsi
;
6074 basic_block store_bb
= single_succ (load_bb
);
6077 tree decl
, call
, type
, itype
;
6081 gsi
= gsi_last_bb (load_bb
);
6082 stmt
= gsi_stmt (gsi
);
6083 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
6085 /* If the load value is needed, then this isn't a store but an exchange. */
6086 exchange
= gimple_omp_atomic_need_value_p (stmt
);
6088 gsi
= gsi_last_bb (store_bb
);
6089 stmt
= gsi_stmt (gsi
);
6090 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
6091 loc
= gimple_location (stmt
);
6093 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6094 is smaller than word size, then expand_atomic_store assumes that the store
6095 is atomic. We could avoid the builtin entirely in this case. */
6097 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
6098 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
6099 decl
= builtin_decl_explicit (tmpbase
);
6100 if (decl
== NULL_TREE
)
6103 type
= TREE_TYPE (stored_val
);
6105 /* Dig out the type of the function's second argument. */
6106 itype
= TREE_TYPE (decl
);
6107 itype
= TYPE_ARG_TYPES (itype
);
6108 itype
= TREE_CHAIN (itype
);
6109 itype
= TREE_VALUE (itype
);
6110 imode
= TYPE_MODE (itype
);
6112 if (exchange
&& !can_atomic_exchange_p (imode
, true))
6115 if (!useless_type_conversion_p (itype
, type
))
6116 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
6117 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
6118 build_int_cst (NULL
,
6119 gimple_omp_atomic_seq_cst_p (stmt
)
6121 : MEMMODEL_RELAXED
));
6124 if (!useless_type_conversion_p (type
, itype
))
6125 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
6126 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
6129 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6130 gsi_remove (&gsi
, true);
6132 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6133 gsi
= gsi_last_bb (load_bb
);
6134 gsi_remove (&gsi
, true);
6136 if (gimple_in_ssa_p (cfun
))
6137 update_ssa (TODO_update_ssa_no_phi
);
6142 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6143 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6144 size of the data type, and thus usable to find the index of the builtin
6145 decl. Returns false if the expression is not of the proper form. */
6148 expand_omp_atomic_fetch_op (basic_block load_bb
,
6149 tree addr
, tree loaded_val
,
6150 tree stored_val
, int index
)
6152 enum built_in_function oldbase
, newbase
, tmpbase
;
6153 tree decl
, itype
, call
;
6155 basic_block store_bb
= single_succ (load_bb
);
6156 gimple_stmt_iterator gsi
;
6159 enum tree_code code
;
6160 bool need_old
, need_new
;
6164 /* We expect to find the following sequences:
6167 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6170 val = tmp OP something; (or: something OP tmp)
6171 GIMPLE_OMP_STORE (val)
6173 ???FIXME: Allow a more flexible sequence.
6174 Perhaps use data flow to pick the statements.
6178 gsi
= gsi_after_labels (store_bb
);
6179 stmt
= gsi_stmt (gsi
);
6180 loc
= gimple_location (stmt
);
6181 if (!is_gimple_assign (stmt
))
6184 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
6186 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
6187 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
6188 seq_cst
= gimple_omp_atomic_seq_cst_p (last_stmt (load_bb
));
6189 gcc_checking_assert (!need_old
|| !need_new
);
6191 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
6194 /* Check for one of the supported fetch-op operations. */
6195 code
= gimple_assign_rhs_code (stmt
);
6199 case POINTER_PLUS_EXPR
:
6200 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
6201 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
6204 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
6205 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
6208 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
6209 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
6212 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
6213 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
6216 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
6217 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
6223 /* Make sure the expression is of the proper form. */
6224 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
6225 rhs
= gimple_assign_rhs2 (stmt
);
6226 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
6227 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
6228 rhs
= gimple_assign_rhs1 (stmt
);
6232 tmpbase
= ((enum built_in_function
)
6233 ((need_new
? newbase
: oldbase
) + index
+ 1));
6234 decl
= builtin_decl_explicit (tmpbase
);
6235 if (decl
== NULL_TREE
)
6237 itype
= TREE_TYPE (TREE_TYPE (decl
));
6238 imode
= TYPE_MODE (itype
);
6240 /* We could test all of the various optabs involved, but the fact of the
6241 matter is that (with the exception of i486 vs i586 and xadd) all targets
6242 that support any atomic operaton optab also implements compare-and-swap.
6243 Let optabs.c take care of expanding any compare-and-swap loop. */
6244 if (!can_compare_and_swap_p (imode
, true) || !can_atomic_load_p (imode
))
6247 gsi
= gsi_last_bb (load_bb
);
6248 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6250 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6251 It only requires that the operation happen atomically. Thus we can
6252 use the RELAXED memory model. */
6253 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
6254 fold_convert_loc (loc
, itype
, rhs
),
6255 build_int_cst (NULL
,
6256 seq_cst
? MEMMODEL_SEQ_CST
6257 : MEMMODEL_RELAXED
));
6259 if (need_old
|| need_new
)
6261 lhs
= need_old
? loaded_val
: stored_val
;
6262 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
6263 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
6266 call
= fold_convert_loc (loc
, void_type_node
, call
);
6267 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6268 gsi_remove (&gsi
, true);
6270 gsi
= gsi_last_bb (store_bb
);
6271 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
6272 gsi_remove (&gsi
, true);
6273 gsi
= gsi_last_bb (store_bb
);
6274 stmt
= gsi_stmt (gsi
);
6275 gsi_remove (&gsi
, true);
6277 if (gimple_in_ssa_p (cfun
))
6279 release_defs (stmt
);
6280 update_ssa (TODO_update_ssa_no_phi
);
6286 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6290 newval = rhs; // with oldval replacing *addr in rhs
6291 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6292 if (oldval != newval)
6295 INDEX is log2 of the size of the data type, and thus usable to find the
6296 index of the builtin decl. */
6299 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
6300 tree addr
, tree loaded_val
, tree stored_val
,
6303 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
6304 tree type
, itype
, cmpxchg
, iaddr
;
6305 gimple_stmt_iterator si
;
6306 basic_block loop_header
= single_succ (load_bb
);
6309 enum built_in_function fncode
;
6311 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6312 order to use the RELAXED memory model effectively. */
6313 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6315 cmpxchg
= builtin_decl_explicit (fncode
);
6316 if (cmpxchg
== NULL_TREE
)
6318 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
6319 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
6321 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true)
6322 || !can_atomic_load_p (TYPE_MODE (itype
)))
6325 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6326 si
= gsi_last_bb (load_bb
);
6327 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6329 /* For floating-point values, we'll need to view-convert them to integers
6330 so that we can perform the atomic compare and swap. Simplify the
6331 following code by always setting up the "i"ntegral variables. */
6332 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
6336 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
6339 = force_gimple_operand_gsi (&si
,
6340 fold_convert (TREE_TYPE (iaddr
), addr
),
6341 false, NULL_TREE
, true, GSI_SAME_STMT
);
6342 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
6343 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6344 loadedi
= create_tmp_var (itype
);
6345 if (gimple_in_ssa_p (cfun
))
6346 loadedi
= make_ssa_name (loadedi
);
6351 loadedi
= loaded_val
;
6354 fncode
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
6355 tree loaddecl
= builtin_decl_explicit (fncode
);
6358 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr
)),
6359 build_call_expr (loaddecl
, 2, iaddr
,
6360 build_int_cst (NULL_TREE
,
6361 MEMMODEL_RELAXED
)));
6363 initial
= build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)), iaddr
,
6364 build_int_cst (TREE_TYPE (iaddr
), 0));
6367 = force_gimple_operand_gsi (&si
, initial
, true, NULL_TREE
, true,
6370 /* Move the value to the LOADEDI temporary. */
6371 if (gimple_in_ssa_p (cfun
))
6373 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
6374 phi
= create_phi_node (loadedi
, loop_header
);
6375 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
6379 gsi_insert_before (&si
,
6380 gimple_build_assign (loadedi
, initial
),
6382 if (loadedi
!= loaded_val
)
6384 gimple_stmt_iterator gsi2
;
6387 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
6388 gsi2
= gsi_start_bb (loop_header
);
6389 if (gimple_in_ssa_p (cfun
))
6392 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
6393 true, GSI_SAME_STMT
);
6394 stmt
= gimple_build_assign (loaded_val
, x
);
6395 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
6399 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
6400 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
6401 true, GSI_SAME_STMT
);
6404 gsi_remove (&si
, true);
6406 si
= gsi_last_bb (store_bb
);
6407 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
6410 storedi
= stored_val
;
6413 = force_gimple_operand_gsi (&si
,
6414 build1 (VIEW_CONVERT_EXPR
, itype
,
6415 stored_val
), true, NULL_TREE
, true,
6418 /* Build the compare&swap statement. */
6419 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
6420 new_storedi
= force_gimple_operand_gsi (&si
,
6421 fold_convert (TREE_TYPE (loadedi
),
6424 true, GSI_SAME_STMT
);
6426 if (gimple_in_ssa_p (cfun
))
6430 old_vali
= create_tmp_var (TREE_TYPE (loadedi
));
6431 stmt
= gimple_build_assign (old_vali
, loadedi
);
6432 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6434 stmt
= gimple_build_assign (loadedi
, new_storedi
);
6435 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6438 /* Note that we always perform the comparison as an integer, even for
6439 floating point. This allows the atomic operation to properly
6440 succeed even with NaNs and -0.0. */
6441 tree ne
= build2 (NE_EXPR
, boolean_type_node
, new_storedi
, old_vali
);
6442 stmt
= gimple_build_cond_empty (ne
);
6443 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6446 e
= single_succ_edge (store_bb
);
6447 e
->flags
&= ~EDGE_FALLTHRU
;
6448 e
->flags
|= EDGE_FALSE_VALUE
;
6450 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
6452 /* Copy the new value to loadedi (we already did that before the condition
6453 if we are not in SSA). */
6454 if (gimple_in_ssa_p (cfun
))
6456 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
6457 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
6460 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6461 gsi_remove (&si
, true);
6463 struct loop
*loop
= alloc_loop ();
6464 loop
->header
= loop_header
;
6465 loop
->latch
= store_bb
;
6466 add_loop (loop
, loop_header
->loop_father
);
6468 if (gimple_in_ssa_p (cfun
))
6469 update_ssa (TODO_update_ssa_no_phi
);
6474 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6476 GOMP_atomic_start ();
6480 The result is not globally atomic, but works so long as all parallel
6481 references are within #pragma omp atomic directives. According to
6482 responses received from omp@openmp.org, appears to be within spec.
6483 Which makes sense, since that's how several other compilers handle
6484 this situation as well.
6485 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6486 expanding. STORED_VAL is the operand of the matching
6487 GIMPLE_OMP_ATOMIC_STORE.
6490 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6494 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6499 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
6500 tree addr
, tree loaded_val
, tree stored_val
)
6502 gimple_stmt_iterator si
;
6506 si
= gsi_last_bb (load_bb
);
6507 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
6509 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
6510 t
= build_call_expr (t
, 0);
6511 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6513 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
6514 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6515 gsi_remove (&si
, true);
6517 si
= gsi_last_bb (store_bb
);
6518 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
6520 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
6522 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
6524 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
6525 t
= build_call_expr (t
, 0);
6526 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
6527 gsi_remove (&si
, true);
6529 if (gimple_in_ssa_p (cfun
))
6530 update_ssa (TODO_update_ssa_no_phi
);
6534 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6535 using expand_omp_atomic_fetch_op. If it failed, we try to
6536 call expand_omp_atomic_pipeline, and if it fails too, the
6537 ultimate fallback is wrapping the operation in a mutex
6538 (expand_omp_atomic_mutex). REGION is the atomic region built
6539 by build_omp_regions_1(). */
6542 expand_omp_atomic (struct omp_region
*region
)
6544 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
6545 gomp_atomic_load
*load
= as_a
<gomp_atomic_load
*> (last_stmt (load_bb
));
6546 gomp_atomic_store
*store
= as_a
<gomp_atomic_store
*> (last_stmt (store_bb
));
6547 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
6548 tree addr
= gimple_omp_atomic_load_rhs (load
);
6549 tree stored_val
= gimple_omp_atomic_store_val (store
);
6550 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
6551 HOST_WIDE_INT index
;
6553 /* Make sure the type is one of the supported sizes. */
6554 index
= tree_to_uhwi (TYPE_SIZE_UNIT (type
));
6555 index
= exact_log2 (index
);
6556 if (index
>= 0 && index
<= 4)
6558 unsigned int align
= TYPE_ALIGN_UNIT (type
);
6560 /* __sync builtins require strict data alignment. */
6561 if (exact_log2 (align
) >= index
)
6564 if (loaded_val
== stored_val
6565 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
6566 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
6567 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
6568 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
6572 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
6573 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
6574 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
6575 && store_bb
== single_succ (load_bb
)
6576 && first_stmt (store_bb
) == store
6577 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
6581 /* When possible, use specialized atomic update functions. */
6582 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
6583 && store_bb
== single_succ (load_bb
)
6584 && expand_omp_atomic_fetch_op (load_bb
, addr
,
6585 loaded_val
, stored_val
, index
))
6588 /* If we don't have specialized __sync builtins, try and implement
6589 as a compare and swap loop. */
6590 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
6591 loaded_val
, stored_val
, index
))
6596 /* The ultimate fallback is wrapping the operation in a mutex. */
6597 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
6600 /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
6604 mark_loops_in_oacc_kernels_region (basic_block region_entry
,
6605 basic_block region_exit
)
6607 struct loop
*outer
= region_entry
->loop_father
;
6608 gcc_assert (region_exit
== NULL
|| outer
== region_exit
->loop_father
);
6610 /* Don't parallelize the kernels region if it contains more than one outer
6612 unsigned int nr_outer_loops
= 0;
6613 struct loop
*single_outer
= NULL
;
6614 for (struct loop
*loop
= outer
->inner
; loop
!= NULL
; loop
= loop
->next
)
6616 gcc_assert (loop_outer (loop
) == outer
);
6618 if (!dominated_by_p (CDI_DOMINATORS
, loop
->header
, region_entry
))
6621 if (region_exit
!= NULL
6622 && dominated_by_p (CDI_DOMINATORS
, loop
->header
, region_exit
))
6626 single_outer
= loop
;
6628 if (nr_outer_loops
!= 1)
6631 for (struct loop
*loop
= single_outer
->inner
;
6637 /* Mark the loops in the region. */
6638 for (struct loop
*loop
= single_outer
; loop
!= NULL
; loop
= loop
->inner
)
6639 loop
->in_oacc_kernels_region
= true;
6642 /* Types used to pass grid and wortkgroup sizes to kernel invocation. */
6644 struct GTY(()) grid_launch_attributes_trees
6646 tree kernel_dim_array_type
;
6647 tree kernel_lattrs_dimnum_decl
;
6648 tree kernel_lattrs_grid_decl
;
6649 tree kernel_lattrs_group_decl
;
6650 tree kernel_launch_attributes_type
;
6653 static GTY(()) struct grid_launch_attributes_trees
*grid_attr_trees
;
6655 /* Create types used to pass kernel launch attributes to target. */
6658 grid_create_kernel_launch_attr_types (void)
6660 if (grid_attr_trees
)
6662 grid_attr_trees
= ggc_alloc
<grid_launch_attributes_trees
> ();
6664 tree dim_arr_index_type
6665 = build_index_type (build_int_cst (integer_type_node
, 2));
6666 grid_attr_trees
->kernel_dim_array_type
6667 = build_array_type (uint32_type_node
, dim_arr_index_type
);
6669 grid_attr_trees
->kernel_launch_attributes_type
= make_node (RECORD_TYPE
);
6670 grid_attr_trees
->kernel_lattrs_dimnum_decl
6671 = build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("ndim"),
6673 DECL_CHAIN (grid_attr_trees
->kernel_lattrs_dimnum_decl
) = NULL_TREE
;
6675 grid_attr_trees
->kernel_lattrs_grid_decl
6676 = build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("grid_size"),
6677 grid_attr_trees
->kernel_dim_array_type
);
6678 DECL_CHAIN (grid_attr_trees
->kernel_lattrs_grid_decl
)
6679 = grid_attr_trees
->kernel_lattrs_dimnum_decl
;
6680 grid_attr_trees
->kernel_lattrs_group_decl
6681 = build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("group_size"),
6682 grid_attr_trees
->kernel_dim_array_type
);
6683 DECL_CHAIN (grid_attr_trees
->kernel_lattrs_group_decl
)
6684 = grid_attr_trees
->kernel_lattrs_grid_decl
;
6685 finish_builtin_struct (grid_attr_trees
->kernel_launch_attributes_type
,
6686 "__gomp_kernel_launch_attributes",
6687 grid_attr_trees
->kernel_lattrs_group_decl
, NULL_TREE
);
6690 /* Insert before the current statement in GSI a store of VALUE to INDEX of
6691 array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be
6692 of type uint32_type_node. */
6695 grid_insert_store_range_dim (gimple_stmt_iterator
*gsi
, tree range_var
,
6696 tree fld_decl
, int index
, tree value
)
6698 tree ref
= build4 (ARRAY_REF
, uint32_type_node
,
6699 build3 (COMPONENT_REF
,
6700 grid_attr_trees
->kernel_dim_array_type
,
6701 range_var
, fld_decl
, NULL_TREE
),
6702 build_int_cst (integer_type_node
, index
),
6703 NULL_TREE
, NULL_TREE
);
6704 gsi_insert_before (gsi
, gimple_build_assign (ref
, value
), GSI_SAME_STMT
);
6707 /* Return a tree representation of a pointer to a structure with grid and
6708 work-group size information. Statements filling that information will be
6709 inserted before GSI, TGT_STMT is the target statement which has the
6710 necessary information in it. */
6713 grid_get_kernel_launch_attributes (gimple_stmt_iterator
*gsi
,
6714 gomp_target
*tgt_stmt
)
6716 grid_create_kernel_launch_attr_types ();
6717 tree lattrs
= create_tmp_var (grid_attr_trees
->kernel_launch_attributes_type
,
6718 "__kernel_launch_attrs");
6720 unsigned max_dim
= 0;
6721 for (tree clause
= gimple_omp_target_clauses (tgt_stmt
);
6723 clause
= OMP_CLAUSE_CHAIN (clause
))
6725 if (OMP_CLAUSE_CODE (clause
) != OMP_CLAUSE__GRIDDIM_
)
6728 unsigned dim
= OMP_CLAUSE__GRIDDIM__DIMENSION (clause
);
6729 max_dim
= MAX (dim
, max_dim
);
6731 grid_insert_store_range_dim (gsi
, lattrs
,
6732 grid_attr_trees
->kernel_lattrs_grid_decl
,
6733 dim
, OMP_CLAUSE__GRIDDIM__SIZE (clause
));
6734 grid_insert_store_range_dim (gsi
, lattrs
,
6735 grid_attr_trees
->kernel_lattrs_group_decl
,
6736 dim
, OMP_CLAUSE__GRIDDIM__GROUP (clause
));
6739 tree dimref
= build3 (COMPONENT_REF
, uint32_type_node
, lattrs
,
6740 grid_attr_trees
->kernel_lattrs_dimnum_decl
, NULL_TREE
);
6741 gcc_checking_assert (max_dim
<= 2);
6742 tree dimensions
= build_int_cstu (uint32_type_node
, max_dim
+ 1);
6743 gsi_insert_before (gsi
, gimple_build_assign (dimref
, dimensions
),
6745 TREE_ADDRESSABLE (lattrs
) = 1;
6746 return build_fold_addr_expr (lattrs
);
6749 /* Build target argument identifier from the DEVICE identifier, value
6750 identifier ID and whether the element also has a SUBSEQUENT_PARAM. */
6753 get_target_argument_identifier_1 (int device
, bool subseqent_param
, int id
)
6755 tree t
= build_int_cst (integer_type_node
, device
);
6756 if (subseqent_param
)
6757 t
= fold_build2 (BIT_IOR_EXPR
, integer_type_node
, t
,
6758 build_int_cst (integer_type_node
,
6759 GOMP_TARGET_ARG_SUBSEQUENT_PARAM
));
6760 t
= fold_build2 (BIT_IOR_EXPR
, integer_type_node
, t
,
6761 build_int_cst (integer_type_node
, id
));
6765 /* Like above but return it in type that can be directly stored as an element
6766 of the argument array. */
6769 get_target_argument_identifier (int device
, bool subseqent_param
, int id
)
6771 tree t
= get_target_argument_identifier_1 (device
, subseqent_param
, id
);
6772 return fold_convert (ptr_type_node
, t
);
6775 /* Return a target argument consisting of DEVICE identifier, value identifier
6776 ID, and the actual VALUE. */
6779 get_target_argument_value (gimple_stmt_iterator
*gsi
, int device
, int id
,
6782 tree t
= fold_build2 (LSHIFT_EXPR
, integer_type_node
,
6783 fold_convert (integer_type_node
, value
),
6784 build_int_cst (unsigned_type_node
,
6785 GOMP_TARGET_ARG_VALUE_SHIFT
));
6786 t
= fold_build2 (BIT_IOR_EXPR
, integer_type_node
, t
,
6787 get_target_argument_identifier_1 (device
, false, id
));
6788 t
= fold_convert (ptr_type_node
, t
);
6789 return force_gimple_operand_gsi (gsi
, t
, true, NULL
, true, GSI_SAME_STMT
);
6792 /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15,
6793 push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it,
6794 otherwise push an identifier (with DEVICE and ID) and the VALUE in two
6798 push_target_argument_according_to_value (gimple_stmt_iterator
*gsi
, int device
,
6799 int id
, tree value
, vec
<tree
> *args
)
6801 if (tree_fits_shwi_p (value
)
6802 && tree_to_shwi (value
) > -(1 << 15)
6803 && tree_to_shwi (value
) < (1 << 15))
6804 args
->quick_push (get_target_argument_value (gsi
, device
, id
, value
));
6807 args
->quick_push (get_target_argument_identifier (device
, true, id
));
6808 value
= fold_convert (ptr_type_node
, value
);
6809 value
= force_gimple_operand_gsi (gsi
, value
, true, NULL
, true,
6811 args
->quick_push (value
);
6815 /* Create an array of arguments that is then passed to GOMP_target. */
6818 get_target_arguments (gimple_stmt_iterator
*gsi
, gomp_target
*tgt_stmt
)
6820 auto_vec
<tree
, 6> args
;
6821 tree clauses
= gimple_omp_target_clauses (tgt_stmt
);
6822 tree t
, c
= omp_find_clause (clauses
, OMP_CLAUSE_NUM_TEAMS
);
6824 t
= OMP_CLAUSE_NUM_TEAMS_EXPR (c
);
6826 t
= integer_minus_one_node
;
6827 push_target_argument_according_to_value (gsi
, GOMP_TARGET_ARG_DEVICE_ALL
,
6828 GOMP_TARGET_ARG_NUM_TEAMS
, t
, &args
);
6830 c
= omp_find_clause (clauses
, OMP_CLAUSE_THREAD_LIMIT
);
6832 t
= OMP_CLAUSE_THREAD_LIMIT_EXPR (c
);
6834 t
= integer_minus_one_node
;
6835 push_target_argument_according_to_value (gsi
, GOMP_TARGET_ARG_DEVICE_ALL
,
6836 GOMP_TARGET_ARG_THREAD_LIMIT
, t
,
6839 /* Add HSA-specific grid sizes, if available. */
6840 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt
),
6841 OMP_CLAUSE__GRIDDIM_
))
6843 int id
= GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES
;
6844 t
= get_target_argument_identifier (GOMP_DEVICE_HSA
, true, id
);
6845 args
.quick_push (t
);
6846 args
.quick_push (grid_get_kernel_launch_attributes (gsi
, tgt_stmt
));
6849 /* Produce more, perhaps device specific, arguments here. */
6851 tree argarray
= create_tmp_var (build_array_type_nelts (ptr_type_node
,
6852 args
.length () + 1),
6853 ".omp_target_args");
6854 for (unsigned i
= 0; i
< args
.length (); i
++)
6856 tree ref
= build4 (ARRAY_REF
, ptr_type_node
, argarray
,
6857 build_int_cst (integer_type_node
, i
),
6858 NULL_TREE
, NULL_TREE
);
6859 gsi_insert_before (gsi
, gimple_build_assign (ref
, args
[i
]),
6862 tree ref
= build4 (ARRAY_REF
, ptr_type_node
, argarray
,
6863 build_int_cst (integer_type_node
, args
.length ()),
6864 NULL_TREE
, NULL_TREE
);
6865 gsi_insert_before (gsi
, gimple_build_assign (ref
, null_pointer_node
),
6867 TREE_ADDRESSABLE (argarray
) = 1;
6868 return build_fold_addr_expr (argarray
);
6871 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
6874 expand_omp_target (struct omp_region
*region
)
6876 basic_block entry_bb
, exit_bb
, new_bb
;
6877 struct function
*child_cfun
;
6878 tree child_fn
, block
, t
;
6879 gimple_stmt_iterator gsi
;
6880 gomp_target
*entry_stmt
;
6883 bool offloaded
, data_region
;
6885 entry_stmt
= as_a
<gomp_target
*> (last_stmt (region
->entry
));
6886 new_bb
= region
->entry
;
6888 offloaded
= is_gimple_omp_offloaded (entry_stmt
);
6889 switch (gimple_omp_target_kind (entry_stmt
))
6891 case GF_OMP_TARGET_KIND_REGION
:
6892 case GF_OMP_TARGET_KIND_UPDATE
:
6893 case GF_OMP_TARGET_KIND_ENTER_DATA
:
6894 case GF_OMP_TARGET_KIND_EXIT_DATA
:
6895 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
6896 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
6897 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
6898 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
6899 case GF_OMP_TARGET_KIND_OACC_DECLARE
:
6900 data_region
= false;
6902 case GF_OMP_TARGET_KIND_DATA
:
6903 case GF_OMP_TARGET_KIND_OACC_DATA
:
6904 case GF_OMP_TARGET_KIND_OACC_HOST_DATA
:
6911 child_fn
= NULL_TREE
;
6915 child_fn
= gimple_omp_target_child_fn (entry_stmt
);
6916 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6919 /* Supported by expand_omp_taskreg, but not here. */
6920 if (child_cfun
!= NULL
)
6921 gcc_checking_assert (!child_cfun
->cfg
);
6922 gcc_checking_assert (!gimple_in_ssa_p (cfun
));
6924 entry_bb
= region
->entry
;
6925 exit_bb
= region
->exit
;
6927 if (gimple_omp_target_kind (entry_stmt
) == GF_OMP_TARGET_KIND_OACC_KERNELS
)
6928 mark_loops_in_oacc_kernels_region (region
->entry
, region
->exit
);
6932 unsigned srcidx
, dstidx
, num
;
6934 /* If the offloading region needs data sent from the parent
6935 function, then the very first statement (except possible
6936 tree profile counter updates) of the offloading body
6937 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
6938 &.OMP_DATA_O is passed as an argument to the child function,
6939 we need to replace it with the argument as seen by the child
6942 In most cases, this will end up being the identity assignment
6943 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
6944 a function call that has been inlined, the original PARM_DECL
6945 .OMP_DATA_I may have been converted into a different local
6946 variable. In which case, we need to keep the assignment. */
6947 tree data_arg
= gimple_omp_target_data_arg (entry_stmt
);
6950 basic_block entry_succ_bb
= single_succ (entry_bb
);
6951 gimple_stmt_iterator gsi
;
6953 gimple
*tgtcopy_stmt
= NULL
;
6954 tree sender
= TREE_VEC_ELT (data_arg
, 0);
6956 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
6958 gcc_assert (!gsi_end_p (gsi
));
6959 stmt
= gsi_stmt (gsi
);
6960 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
6963 if (gimple_num_ops (stmt
) == 2)
6965 tree arg
= gimple_assign_rhs1 (stmt
);
6967 /* We're ignoring the subcode because we're
6968 effectively doing a STRIP_NOPS. */
6970 if (TREE_CODE (arg
) == ADDR_EXPR
6971 && TREE_OPERAND (arg
, 0) == sender
)
6973 tgtcopy_stmt
= stmt
;
6979 gcc_assert (tgtcopy_stmt
!= NULL
);
6980 arg
= DECL_ARGUMENTS (child_fn
);
6982 gcc_assert (gimple_assign_lhs (tgtcopy_stmt
) == arg
);
6983 gsi_remove (&gsi
, true);
6986 /* Declare local variables needed in CHILD_CFUN. */
6987 block
= DECL_INITIAL (child_fn
);
6988 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
6989 /* The gimplifier could record temporaries in the offloading block
6990 rather than in containing function's local_decls chain,
6991 which would mean cgraph missed finalizing them. Do it now. */
6992 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
6993 if (VAR_P (t
) && TREE_STATIC (t
) && !DECL_EXTERNAL (t
))
6994 varpool_node::finalize_decl (t
);
6995 DECL_SAVED_TREE (child_fn
) = NULL
;
6996 /* We'll create a CFG for child_fn, so no gimple body is needed. */
6997 gimple_set_body (child_fn
, NULL
);
6998 TREE_USED (block
) = 1;
7000 /* Reset DECL_CONTEXT on function arguments. */
7001 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
7002 DECL_CONTEXT (t
) = child_fn
;
7004 /* Split ENTRY_BB at GIMPLE_*,
7005 so that it can be moved to the child function. */
7006 gsi
= gsi_last_bb (entry_bb
);
7007 stmt
= gsi_stmt (gsi
);
7009 && gimple_code (stmt
) == gimple_code (entry_stmt
));
7010 e
= split_block (entry_bb
, stmt
);
7011 gsi_remove (&gsi
, true);
7013 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
7015 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7018 gsi
= gsi_last_bb (exit_bb
);
7019 gcc_assert (!gsi_end_p (gsi
)
7020 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
7021 stmt
= gimple_build_return (NULL
);
7022 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
7023 gsi_remove (&gsi
, true);
7026 /* Make sure to generate early debug for the function before
7027 outlining anything. */
7028 if (! gimple_in_ssa_p (cfun
))
7029 (*debug_hooks
->early_global_decl
) (cfun
->decl
);
7031 /* Move the offloading region into CHILD_CFUN. */
7033 block
= gimple_block (entry_stmt
);
7035 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
7037 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
7038 /* When the OMP expansion process cannot guarantee an up-to-date
7039 loop tree arrange for the child function to fixup loops. */
7040 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
7041 child_cfun
->x_current_loops
->state
|= LOOPS_NEED_FIXUP
;
7043 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7044 num
= vec_safe_length (child_cfun
->local_decls
);
7045 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
7047 t
= (*child_cfun
->local_decls
)[srcidx
];
7048 if (DECL_CONTEXT (t
) == cfun
->decl
)
7050 if (srcidx
!= dstidx
)
7051 (*child_cfun
->local_decls
)[dstidx
] = t
;
7055 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
7057 /* Inform the callgraph about the new function. */
7058 child_cfun
->curr_properties
= cfun
->curr_properties
;
7059 child_cfun
->has_simduid_loops
|= cfun
->has_simduid_loops
;
7060 child_cfun
->has_force_vectorize_loops
|= cfun
->has_force_vectorize_loops
;
7061 cgraph_node
*node
= cgraph_node::get_create (child_fn
);
7062 node
->parallelized_function
= 1;
7063 cgraph_node::add_new_function (child_fn
, true);
7065 /* Add the new function to the offload table. */
7066 if (ENABLE_OFFLOADING
)
7067 vec_safe_push (offload_funcs
, child_fn
);
7069 bool need_asm
= DECL_ASSEMBLER_NAME_SET_P (current_function_decl
)
7070 && !DECL_ASSEMBLER_NAME_SET_P (child_fn
);
7072 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7073 fixed in a following pass. */
7074 push_cfun (child_cfun
);
7076 assign_assembler_name_if_needed (child_fn
);
7077 cgraph_edge::rebuild_edges ();
7079 /* Some EH regions might become dead, see PR34608. If
7080 pass_cleanup_cfg isn't the first pass to happen with the
7081 new child, these dead EH edges might cause problems.
7082 Clean them up now. */
7083 if (flag_exceptions
)
7086 bool changed
= false;
7088 FOR_EACH_BB_FN (bb
, cfun
)
7089 changed
|= gimple_purge_dead_eh_edges (bb
);
7091 cleanup_tree_cfg ();
7093 if (flag_checking
&& !loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
7094 verify_loop_structure ();
7097 if (dump_file
&& !gimple_in_ssa_p (cfun
))
7099 omp_any_child_fn_dumped
= true;
7100 dump_function_header (dump_file
, child_fn
, dump_flags
);
7101 dump_function_to_file (child_fn
, dump_file
, dump_flags
);
7105 /* Emit a library call to launch the offloading region, or do data
7107 tree t1
, t2
, t3
, t4
, device
, cond
, depend
, c
, clauses
;
7108 enum built_in_function start_ix
;
7109 location_t clause_loc
;
7110 unsigned int flags_i
= 0;
7111 bool oacc_kernels_p
= false;
7113 switch (gimple_omp_target_kind (entry_stmt
))
7115 case GF_OMP_TARGET_KIND_REGION
:
7116 start_ix
= BUILT_IN_GOMP_TARGET
;
7118 case GF_OMP_TARGET_KIND_DATA
:
7119 start_ix
= BUILT_IN_GOMP_TARGET_DATA
;
7121 case GF_OMP_TARGET_KIND_UPDATE
:
7122 start_ix
= BUILT_IN_GOMP_TARGET_UPDATE
;
7124 case GF_OMP_TARGET_KIND_ENTER_DATA
:
7125 start_ix
= BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA
;
7127 case GF_OMP_TARGET_KIND_EXIT_DATA
:
7128 start_ix
= BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA
;
7129 flags_i
|= GOMP_TARGET_FLAG_EXIT_DATA
;
7131 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
7132 oacc_kernels_p
= true;
7134 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
7135 start_ix
= BUILT_IN_GOACC_PARALLEL
;
7137 case GF_OMP_TARGET_KIND_OACC_DATA
:
7138 case GF_OMP_TARGET_KIND_OACC_HOST_DATA
:
7139 start_ix
= BUILT_IN_GOACC_DATA_START
;
7141 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
7142 start_ix
= BUILT_IN_GOACC_UPDATE
;
7144 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
7145 start_ix
= BUILT_IN_GOACC_ENTER_EXIT_DATA
;
7147 case GF_OMP_TARGET_KIND_OACC_DECLARE
:
7148 start_ix
= BUILT_IN_GOACC_DECLARE
;
7154 clauses
= gimple_omp_target_clauses (entry_stmt
);
7156 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
7157 library choose) and there is no conditional. */
7159 device
= build_int_cst (integer_type_node
, GOMP_DEVICE_ICV
);
7161 c
= omp_find_clause (clauses
, OMP_CLAUSE_IF
);
7163 cond
= OMP_CLAUSE_IF_EXPR (c
);
7165 c
= omp_find_clause (clauses
, OMP_CLAUSE_DEVICE
);
7168 /* Even if we pass it to all library function calls, it is currently only
7169 defined/used for the OpenMP target ones. */
7170 gcc_checking_assert (start_ix
== BUILT_IN_GOMP_TARGET
7171 || start_ix
== BUILT_IN_GOMP_TARGET_DATA
7172 || start_ix
== BUILT_IN_GOMP_TARGET_UPDATE
7173 || start_ix
== BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA
);
7175 device
= OMP_CLAUSE_DEVICE_ID (c
);
7176 clause_loc
= OMP_CLAUSE_LOCATION (c
);
7179 clause_loc
= gimple_location (entry_stmt
);
7181 c
= omp_find_clause (clauses
, OMP_CLAUSE_NOWAIT
);
7183 flags_i
|= GOMP_TARGET_FLAG_NOWAIT
;
7185 /* Ensure 'device' is of the correct type. */
7186 device
= fold_convert_loc (clause_loc
, integer_type_node
, device
);
7188 /* If we found the clause 'if (cond)', build
7189 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
7192 cond
= gimple_boolify (cond
);
7194 basic_block cond_bb
, then_bb
, else_bb
;
7198 tmp_var
= create_tmp_var (TREE_TYPE (device
));
7200 e
= split_block_after_labels (new_bb
);
7203 gsi
= gsi_last_bb (new_bb
);
7205 e
= split_block (new_bb
, gsi_stmt (gsi
));
7211 then_bb
= create_empty_bb (cond_bb
);
7212 else_bb
= create_empty_bb (then_bb
);
7213 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
7214 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
7216 stmt
= gimple_build_cond_empty (cond
);
7217 gsi
= gsi_last_bb (cond_bb
);
7218 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
7220 gsi
= gsi_start_bb (then_bb
);
7221 stmt
= gimple_build_assign (tmp_var
, device
);
7222 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
7224 gsi
= gsi_start_bb (else_bb
);
7225 stmt
= gimple_build_assign (tmp_var
,
7226 build_int_cst (integer_type_node
,
7227 GOMP_DEVICE_HOST_FALLBACK
));
7228 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
7230 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
7231 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
7232 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
7233 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
7234 make_edge (then_bb
, new_bb
, EDGE_FALLTHRU
);
7235 make_edge (else_bb
, new_bb
, EDGE_FALLTHRU
);
7238 gsi
= gsi_last_bb (new_bb
);
7242 gsi
= gsi_last_bb (new_bb
);
7243 device
= force_gimple_operand_gsi (&gsi
, device
, true, NULL_TREE
,
7244 true, GSI_SAME_STMT
);
7247 t
= gimple_omp_target_data_arg (entry_stmt
);
7250 t1
= size_zero_node
;
7251 t2
= build_zero_cst (ptr_type_node
);
7257 t1
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t
, 1))));
7258 t1
= size_binop (PLUS_EXPR
, t1
, size_int (1));
7259 t2
= build_fold_addr_expr (TREE_VEC_ELT (t
, 0));
7260 t3
= build_fold_addr_expr (TREE_VEC_ELT (t
, 1));
7261 t4
= build_fold_addr_expr (TREE_VEC_ELT (t
, 2));
7265 bool tagging
= false;
7266 /* The maximum number used by any start_ix, without varargs. */
7267 auto_vec
<tree
, 11> args
;
7268 args
.quick_push (device
);
7270 args
.quick_push (build_fold_addr_expr (child_fn
));
7271 args
.quick_push (t1
);
7272 args
.quick_push (t2
);
7273 args
.quick_push (t3
);
7274 args
.quick_push (t4
);
7277 case BUILT_IN_GOACC_DATA_START
:
7278 case BUILT_IN_GOACC_DECLARE
:
7279 case BUILT_IN_GOMP_TARGET_DATA
:
7281 case BUILT_IN_GOMP_TARGET
:
7282 case BUILT_IN_GOMP_TARGET_UPDATE
:
7283 case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA
:
7284 args
.quick_push (build_int_cst (unsigned_type_node
, flags_i
));
7285 c
= omp_find_clause (clauses
, OMP_CLAUSE_DEPEND
);
7287 depend
= OMP_CLAUSE_DECL (c
);
7289 depend
= build_int_cst (ptr_type_node
, 0);
7290 args
.quick_push (depend
);
7291 if (start_ix
== BUILT_IN_GOMP_TARGET
)
7292 args
.quick_push (get_target_arguments (&gsi
, entry_stmt
));
7294 case BUILT_IN_GOACC_PARALLEL
:
7296 oacc_set_fn_attrib (child_fn
, clauses
, oacc_kernels_p
, &args
);
7300 case BUILT_IN_GOACC_ENTER_EXIT_DATA
:
7301 case BUILT_IN_GOACC_UPDATE
:
7303 tree t_async
= NULL_TREE
;
7305 /* If present, use the value specified by the respective
7306 clause, making sure that is of the correct type. */
7307 c
= omp_find_clause (clauses
, OMP_CLAUSE_ASYNC
);
7309 t_async
= fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
7311 OMP_CLAUSE_ASYNC_EXPR (c
));
7313 /* Default values for t_async. */
7314 t_async
= fold_convert_loc (gimple_location (entry_stmt
),
7316 build_int_cst (integer_type_node
,
7318 if (tagging
&& t_async
)
7320 unsigned HOST_WIDE_INT i_async
= GOMP_LAUNCH_OP_MAX
;
7322 if (TREE_CODE (t_async
) == INTEGER_CST
)
7324 /* See if we can pack the async arg in to the tag's
7326 i_async
= TREE_INT_CST_LOW (t_async
);
7327 if (i_async
< GOMP_LAUNCH_OP_MAX
)
7328 t_async
= NULL_TREE
;
7330 i_async
= GOMP_LAUNCH_OP_MAX
;
7332 args
.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC
, NULL_TREE
,
7336 args
.safe_push (t_async
);
7338 /* Save the argument index, and ... */
7339 unsigned t_wait_idx
= args
.length ();
7340 unsigned num_waits
= 0;
7341 c
= omp_find_clause (clauses
, OMP_CLAUSE_WAIT
);
7343 /* ... push a placeholder. */
7344 args
.safe_push (integer_zero_node
);
7346 for (; c
; c
= OMP_CLAUSE_CHAIN (c
))
7347 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_WAIT
)
7349 args
.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c
),
7351 OMP_CLAUSE_WAIT_EXPR (c
)));
7355 if (!tagging
|| num_waits
)
7359 /* Now that we know the number, update the placeholder. */
7361 len
= oacc_launch_pack (GOMP_LAUNCH_WAIT
, NULL_TREE
, num_waits
);
7363 len
= build_int_cst (integer_type_node
, num_waits
);
7364 len
= fold_convert_loc (gimple_location (entry_stmt
),
7365 unsigned_type_node
, len
);
7366 args
[t_wait_idx
] = len
;
7374 /* Push terminal marker - zero. */
7375 args
.safe_push (oacc_launch_pack (0, NULL_TREE
, 0));
7377 g
= gimple_build_call_vec (builtin_decl_explicit (start_ix
), args
);
7378 gimple_set_location (g
, gimple_location (entry_stmt
));
7379 gsi_insert_before (&gsi
, g
, GSI_SAME_STMT
);
7383 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_TARGET
);
7384 gsi_remove (&gsi
, true);
7386 if (data_region
&& region
->exit
)
7388 gsi
= gsi_last_bb (region
->exit
);
7390 gcc_assert (g
&& gimple_code (g
) == GIMPLE_OMP_RETURN
);
7391 gsi_remove (&gsi
, true);
7395 /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with
7396 iteration variable derived from the thread number. INTRA_GROUP means this
7397 is an expansion of a loop iterating over work-items within a separate
7398 iteration over groups. */
7401 grid_expand_omp_for_loop (struct omp_region
*kfor
, bool intra_group
)
7403 gimple_stmt_iterator gsi
;
7404 gomp_for
*for_stmt
= as_a
<gomp_for
*> (last_stmt (kfor
->entry
));
7405 gcc_checking_assert (gimple_omp_for_kind (for_stmt
)
7406 == GF_OMP_FOR_KIND_GRID_LOOP
);
7407 size_t collapse
= gimple_omp_for_collapse (for_stmt
);
7408 struct omp_for_data_loop
*loops
7409 = XALLOCAVEC (struct omp_for_data_loop
,
7410 gimple_omp_for_collapse (for_stmt
));
7411 struct omp_for_data fd
;
7413 remove_edge (BRANCH_EDGE (kfor
->entry
));
7414 basic_block body_bb
= FALLTHRU_EDGE (kfor
->entry
)->dest
;
7416 gcc_assert (kfor
->cont
);
7417 omp_extract_for_data (for_stmt
, &fd
, loops
);
7419 gsi
= gsi_start_bb (body_bb
);
7421 for (size_t dim
= 0; dim
< collapse
; dim
++)
7424 itype
= type
= TREE_TYPE (fd
.loops
[dim
].v
);
7425 if (POINTER_TYPE_P (type
))
7426 itype
= signed_type_for (type
);
7428 tree n1
= fd
.loops
[dim
].n1
;
7429 tree step
= fd
.loops
[dim
].step
;
7430 n1
= force_gimple_operand_gsi (&gsi
, fold_convert (type
, n1
),
7431 true, NULL_TREE
, true, GSI_SAME_STMT
);
7432 step
= force_gimple_operand_gsi (&gsi
, fold_convert (itype
, step
),
7433 true, NULL_TREE
, true, GSI_SAME_STMT
);
7435 if (gimple_omp_for_grid_group_iter (for_stmt
))
7437 gcc_checking_assert (!intra_group
);
7438 threadid
= build_call_expr (builtin_decl_explicit
7439 (BUILT_IN_HSA_WORKGROUPID
), 1,
7440 build_int_cstu (unsigned_type_node
, dim
));
7442 else if (intra_group
)
7443 threadid
= build_call_expr (builtin_decl_explicit
7444 (BUILT_IN_HSA_WORKITEMID
), 1,
7445 build_int_cstu (unsigned_type_node
, dim
));
7447 threadid
= build_call_expr (builtin_decl_explicit
7448 (BUILT_IN_HSA_WORKITEMABSID
), 1,
7449 build_int_cstu (unsigned_type_node
, dim
));
7450 threadid
= fold_convert (itype
, threadid
);
7451 threadid
= force_gimple_operand_gsi (&gsi
, threadid
, true, NULL_TREE
,
7452 true, GSI_SAME_STMT
);
7454 tree startvar
= fd
.loops
[dim
].v
;
7455 tree t
= fold_build2 (MULT_EXPR
, itype
, threadid
, step
);
7456 if (POINTER_TYPE_P (type
))
7457 t
= fold_build_pointer_plus (n1
, t
);
7459 t
= fold_build2 (PLUS_EXPR
, type
, t
, n1
);
7460 t
= fold_convert (type
, t
);
7461 t
= force_gimple_operand_gsi (&gsi
, t
,
7463 && TREE_ADDRESSABLE (startvar
),
7464 NULL_TREE
, true, GSI_SAME_STMT
);
7465 gassign
*assign_stmt
= gimple_build_assign (startvar
, t
);
7466 gsi_insert_before (&gsi
, assign_stmt
, GSI_SAME_STMT
);
7468 /* Remove the omp for statement. */
7469 gsi
= gsi_last_bb (kfor
->entry
);
7470 gsi_remove (&gsi
, true);
7472 /* Remove the GIMPLE_OMP_CONTINUE statement. */
7473 gsi
= gsi_last_bb (kfor
->cont
);
7474 gcc_assert (!gsi_end_p (gsi
)
7475 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_CONTINUE
);
7476 gsi_remove (&gsi
, true);
7478 /* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */
7479 gsi
= gsi_last_bb (kfor
->exit
);
7480 gcc_assert (!gsi_end_p (gsi
)
7481 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
7483 gsi_insert_before (&gsi
, omp_build_barrier (NULL_TREE
), GSI_SAME_STMT
);
7484 gsi_remove (&gsi
, true);
7486 /* Fixup the much simpler CFG. */
7487 remove_edge (find_edge (kfor
->cont
, body_bb
));
7489 if (kfor
->cont
!= body_bb
)
7490 set_immediate_dominator (CDI_DOMINATORS
, kfor
->cont
, body_bb
);
7491 set_immediate_dominator (CDI_DOMINATORS
, kfor
->exit
, kfor
->cont
);
7494 /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap
7497 struct grid_arg_decl_map
7503 /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones
7504 pertaining to kernel function. */
7507 grid_remap_kernel_arg_accesses (tree
*tp
, int *walk_subtrees
, void *data
)
7509 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
7510 struct grid_arg_decl_map
*adm
= (struct grid_arg_decl_map
*) wi
->info
;
7513 if (t
== adm
->old_arg
)
7515 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
7519 /* If TARGET region contains a kernel body for loop, remove its region from the
7520 TARGET and expand it in HSA gridified kernel fashion. */
7523 grid_expand_target_grid_body (struct omp_region
*target
)
7525 if (!hsa_gen_requested_p ())
7528 gomp_target
*tgt_stmt
= as_a
<gomp_target
*> (last_stmt (target
->entry
));
7529 struct omp_region
**pp
;
7531 for (pp
= &target
->inner
; *pp
; pp
= &(*pp
)->next
)
7532 if ((*pp
)->type
== GIMPLE_OMP_GRID_BODY
)
7535 struct omp_region
*gpukernel
= *pp
;
7537 tree orig_child_fndecl
= gimple_omp_target_child_fn (tgt_stmt
);
7540 /* HSA cannot handle OACC stuff. */
7541 if (gimple_omp_target_kind (tgt_stmt
) != GF_OMP_TARGET_KIND_REGION
)
7543 gcc_checking_assert (orig_child_fndecl
);
7544 gcc_assert (!omp_find_clause (gimple_omp_target_clauses (tgt_stmt
),
7545 OMP_CLAUSE__GRIDDIM_
));
7546 cgraph_node
*n
= cgraph_node::get (orig_child_fndecl
);
7548 hsa_register_kernel (n
);
7552 gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt
),
7553 OMP_CLAUSE__GRIDDIM_
));
7555 = gimple_block (first_stmt (single_succ (gpukernel
->entry
)));
7556 *pp
= gpukernel
->next
;
7557 for (pp
= &gpukernel
->inner
; *pp
; pp
= &(*pp
)->next
)
7558 if ((*pp
)->type
== GIMPLE_OMP_FOR
)
7561 struct omp_region
*kfor
= *pp
;
7563 gomp_for
*for_stmt
= as_a
<gomp_for
*> (last_stmt (kfor
->entry
));
7564 gcc_assert (gimple_omp_for_kind (for_stmt
) == GF_OMP_FOR_KIND_GRID_LOOP
);
7568 if (gimple_omp_for_grid_group_iter (for_stmt
))
7570 struct omp_region
**next_pp
;
7571 for (pp
= &kfor
->inner
; *pp
; pp
= next_pp
)
7573 next_pp
= &(*pp
)->next
;
7574 if ((*pp
)->type
!= GIMPLE_OMP_FOR
)
7576 gomp_for
*inner
= as_a
<gomp_for
*> (last_stmt ((*pp
)->entry
));
7577 gcc_assert (gimple_omp_for_kind (inner
)
7578 == GF_OMP_FOR_KIND_GRID_LOOP
);
7579 grid_expand_omp_for_loop (*pp
, true);
7584 expand_omp (kfor
->inner
);
7586 if (gpukernel
->inner
)
7587 expand_omp (gpukernel
->inner
);
7589 tree kern_fndecl
= copy_node (orig_child_fndecl
);
7590 DECL_NAME (kern_fndecl
) = clone_function_name (kern_fndecl
, "kernel");
7591 SET_DECL_ASSEMBLER_NAME (kern_fndecl
, DECL_NAME (kern_fndecl
));
7592 tree tgtblock
= gimple_block (tgt_stmt
);
7593 tree fniniblock
= make_node (BLOCK
);
7594 BLOCK_ABSTRACT_ORIGIN (fniniblock
) = tgtblock
;
7595 BLOCK_SOURCE_LOCATION (fniniblock
) = BLOCK_SOURCE_LOCATION (tgtblock
);
7596 BLOCK_SOURCE_END_LOCATION (fniniblock
) = BLOCK_SOURCE_END_LOCATION (tgtblock
);
7597 BLOCK_SUPERCONTEXT (fniniblock
) = kern_fndecl
;
7598 DECL_INITIAL (kern_fndecl
) = fniniblock
;
7599 push_struct_function (kern_fndecl
);
7600 cfun
->function_end_locus
= gimple_location (tgt_stmt
);
7601 init_tree_ssa (cfun
);
7604 /* Make sure to generate early debug for the function before
7605 outlining anything. */
7606 if (! gimple_in_ssa_p (cfun
))
7607 (*debug_hooks
->early_global_decl
) (cfun
->decl
);
7609 tree old_parm_decl
= DECL_ARGUMENTS (kern_fndecl
);
7610 gcc_assert (!DECL_CHAIN (old_parm_decl
));
7611 tree new_parm_decl
= copy_node (DECL_ARGUMENTS (kern_fndecl
));
7612 DECL_CONTEXT (new_parm_decl
) = kern_fndecl
;
7613 DECL_ARGUMENTS (kern_fndecl
) = new_parm_decl
;
7614 gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl
))));
7615 DECL_RESULT (kern_fndecl
) = copy_node (DECL_RESULT (kern_fndecl
));
7616 DECL_CONTEXT (DECL_RESULT (kern_fndecl
)) = kern_fndecl
;
7617 struct function
*kern_cfun
= DECL_STRUCT_FUNCTION (kern_fndecl
);
7618 kern_cfun
->curr_properties
= cfun
->curr_properties
;
7620 grid_expand_omp_for_loop (kfor
, false);
7622 /* Remove the omp for statement. */
7623 gimple_stmt_iterator gsi
= gsi_last_bb (gpukernel
->entry
);
7624 gsi_remove (&gsi
, true);
7625 /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real
7627 gsi
= gsi_last_bb (gpukernel
->exit
);
7628 gcc_assert (!gsi_end_p (gsi
)
7629 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
7630 gimple
*ret_stmt
= gimple_build_return (NULL
);
7631 gsi_insert_after (&gsi
, ret_stmt
, GSI_SAME_STMT
);
7632 gsi_remove (&gsi
, true);
7634 /* Statements in the first BB in the target construct have been produced by
7635 target lowering and must be copied inside the GPUKERNEL, with the two
7636 exceptions of the first OMP statement and the OMP_DATA assignment
7638 gsi
= gsi_start_bb (single_succ (gpukernel
->entry
));
7639 tree data_arg
= gimple_omp_target_data_arg (tgt_stmt
);
7640 tree sender
= data_arg
? TREE_VEC_ELT (data_arg
, 0) : NULL
;
7641 for (gimple_stmt_iterator tsi
= gsi_start_bb (single_succ (target
->entry
));
7642 !gsi_end_p (tsi
); gsi_next (&tsi
))
7644 gimple
*stmt
= gsi_stmt (tsi
);
7645 if (is_gimple_omp (stmt
))
7648 && is_gimple_assign (stmt
)
7649 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ADDR_EXPR
7650 && TREE_OPERAND (gimple_assign_rhs1 (stmt
), 0) == sender
)
7652 gimple
*copy
= gimple_copy (stmt
);
7653 gsi_insert_before (&gsi
, copy
, GSI_SAME_STMT
);
7654 gimple_set_block (copy
, fniniblock
);
7657 move_sese_region_to_fn (kern_cfun
, single_succ (gpukernel
->entry
),
7658 gpukernel
->exit
, inside_block
);
7660 cgraph_node
*kcn
= cgraph_node::get_create (kern_fndecl
);
7661 kcn
->mark_force_output ();
7662 cgraph_node
*orig_child
= cgraph_node::get (orig_child_fndecl
);
7664 hsa_register_kernel (kcn
, orig_child
);
7666 cgraph_node::add_new_function (kern_fndecl
, true);
7667 push_cfun (kern_cfun
);
7668 cgraph_edge::rebuild_edges ();
7670 /* Re-map any mention of the PARM_DECL of the original function to the
7671 PARM_DECL of the new one.
7673 TODO: It would be great if lowering produced references into the GPU
7674 kernel decl straight away and we did not have to do this. */
7675 struct grid_arg_decl_map adm
;
7676 adm
.old_arg
= old_parm_decl
;
7677 adm
.new_arg
= new_parm_decl
;
7679 FOR_EACH_BB_FN (bb
, kern_cfun
)
7681 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
7683 gimple
*stmt
= gsi_stmt (gsi
);
7684 struct walk_stmt_info wi
;
7685 memset (&wi
, 0, sizeof (wi
));
7687 walk_gimple_op (stmt
, grid_remap_kernel_arg_accesses
, &wi
);
7695 /* Expand the parallel region tree rooted at REGION. Expansion
7696 proceeds in depth-first order. Innermost regions are expanded
7697 first. This way, parallel regions that require a new function to
7698 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
7699 internal dependencies in their body. */
7702 expand_omp (struct omp_region
*region
)
7704 omp_any_child_fn_dumped
= false;
7707 location_t saved_location
;
7708 gimple
*inner_stmt
= NULL
;
7710 /* First, determine whether this is a combined parallel+workshare
7712 if (region
->type
== GIMPLE_OMP_PARALLEL
)
7713 determine_parallel_type (region
);
7714 else if (region
->type
== GIMPLE_OMP_TARGET
)
7715 grid_expand_target_grid_body (region
);
7717 if (region
->type
== GIMPLE_OMP_FOR
7718 && gimple_omp_for_combined_p (last_stmt (region
->entry
)))
7719 inner_stmt
= last_stmt (region
->inner
->entry
);
7722 expand_omp (region
->inner
);
7724 saved_location
= input_location
;
7725 if (gimple_has_location (last_stmt (region
->entry
)))
7726 input_location
= gimple_location (last_stmt (region
->entry
));
7728 switch (region
->type
)
7730 case GIMPLE_OMP_PARALLEL
:
7731 case GIMPLE_OMP_TASK
:
7732 expand_omp_taskreg (region
);
7735 case GIMPLE_OMP_FOR
:
7736 expand_omp_for (region
, inner_stmt
);
7739 case GIMPLE_OMP_SECTIONS
:
7740 expand_omp_sections (region
);
7743 case GIMPLE_OMP_SECTION
:
7744 /* Individual omp sections are handled together with their
7745 parent GIMPLE_OMP_SECTIONS region. */
7748 case GIMPLE_OMP_SINGLE
:
7749 expand_omp_single (region
);
7752 case GIMPLE_OMP_ORDERED
:
7754 gomp_ordered
*ord_stmt
7755 = as_a
<gomp_ordered
*> (last_stmt (region
->entry
));
7756 if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt
),
7759 /* We'll expand these when expanding corresponding
7760 worksharing region with ordered(n) clause. */
7761 gcc_assert (region
->outer
7762 && region
->outer
->type
== GIMPLE_OMP_FOR
);
7763 region
->ord_stmt
= ord_stmt
;
7768 case GIMPLE_OMP_MASTER
:
7769 case GIMPLE_OMP_TASKGROUP
:
7770 case GIMPLE_OMP_CRITICAL
:
7771 case GIMPLE_OMP_TEAMS
:
7772 expand_omp_synch (region
);
7775 case GIMPLE_OMP_ATOMIC_LOAD
:
7776 expand_omp_atomic (region
);
7779 case GIMPLE_OMP_TARGET
:
7780 expand_omp_target (region
);
7787 input_location
= saved_location
;
7788 region
= region
->next
;
7790 if (omp_any_child_fn_dumped
)
7793 dump_function_header (dump_file
, current_function_decl
, dump_flags
);
7794 omp_any_child_fn_dumped
= false;
7798 /* Helper for build_omp_regions. Scan the dominator tree starting at
7799 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
7800 true, the function ends once a single tree is built (otherwise, whole
7801 forest of OMP constructs may be built). */
7804 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
7807 gimple_stmt_iterator gsi
;
7811 gsi
= gsi_last_bb (bb
);
7812 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
7814 struct omp_region
*region
;
7815 enum gimple_code code
;
7817 stmt
= gsi_stmt (gsi
);
7818 code
= gimple_code (stmt
);
7819 if (code
== GIMPLE_OMP_RETURN
)
7821 /* STMT is the return point out of region PARENT. Mark it
7822 as the exit point and make PARENT the immediately
7823 enclosing region. */
7824 gcc_assert (parent
);
7827 parent
= parent
->outer
;
7829 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
7831 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
7832 GIMPLE_OMP_RETURN, but matches with
7833 GIMPLE_OMP_ATOMIC_LOAD. */
7834 gcc_assert (parent
);
7835 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
7838 parent
= parent
->outer
;
7840 else if (code
== GIMPLE_OMP_CONTINUE
)
7842 gcc_assert (parent
);
7845 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
7847 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
7848 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
7852 region
= new_omp_region (bb
, code
, parent
);
7854 if (code
== GIMPLE_OMP_TARGET
)
7856 switch (gimple_omp_target_kind (stmt
))
7858 case GF_OMP_TARGET_KIND_REGION
:
7859 case GF_OMP_TARGET_KIND_DATA
:
7860 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
7861 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
7862 case GF_OMP_TARGET_KIND_OACC_DATA
:
7863 case GF_OMP_TARGET_KIND_OACC_HOST_DATA
:
7865 case GF_OMP_TARGET_KIND_UPDATE
:
7866 case GF_OMP_TARGET_KIND_ENTER_DATA
:
7867 case GF_OMP_TARGET_KIND_EXIT_DATA
:
7868 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
7869 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
7870 case GF_OMP_TARGET_KIND_OACC_DECLARE
:
7871 /* ..., other than for those stand-alone directives... */
7878 else if (code
== GIMPLE_OMP_ORDERED
7879 && omp_find_clause (gimple_omp_ordered_clauses
7880 (as_a
<gomp_ordered
*> (stmt
)),
7882 /* #pragma omp ordered depend is also just a stand-alone
7885 /* ..., this directive becomes the parent for a new region. */
7891 if (single_tree
&& !parent
)
7894 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
7896 son
= next_dom_son (CDI_DOMINATORS
, son
))
7897 build_omp_regions_1 (son
, parent
, single_tree
);
7900 /* Builds the tree of OMP regions rooted at ROOT, storing it to
7904 build_omp_regions_root (basic_block root
)
7906 gcc_assert (root_omp_region
== NULL
);
7907 build_omp_regions_1 (root
, NULL
, true);
7908 gcc_assert (root_omp_region
!= NULL
);
7911 /* Expands omp construct (and its subconstructs) starting in HEAD. */
7914 omp_expand_local (basic_block head
)
7916 build_omp_regions_root (head
);
7917 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
7919 fprintf (dump_file
, "\nOMP region tree\n\n");
7920 dump_omp_region (dump_file
, root_omp_region
, 0);
7921 fprintf (dump_file
, "\n");
7924 remove_exit_barriers (root_omp_region
);
7925 expand_omp (root_omp_region
);
7927 omp_free_regions ();
7930 /* Scan the CFG and build a tree of OMP regions. Return the root of
7931 the OMP region tree. */
7934 build_omp_regions (void)
7936 gcc_assert (root_omp_region
== NULL
);
7937 calculate_dominance_info (CDI_DOMINATORS
);
7938 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun
), NULL
, false);
7941 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
7944 execute_expand_omp (void)
7946 build_omp_regions ();
7948 if (!root_omp_region
)
7953 fprintf (dump_file
, "\nOMP region tree\n\n");
7954 dump_omp_region (dump_file
, root_omp_region
, 0);
7955 fprintf (dump_file
, "\n");
7958 remove_exit_barriers (root_omp_region
);
7960 expand_omp (root_omp_region
);
7962 if (flag_checking
&& !loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
7963 verify_loop_structure ();
7964 cleanup_tree_cfg ();
7966 omp_free_regions ();
7971 /* OMP expansion -- the default pass, run before creation of SSA form. */
7975 const pass_data pass_data_expand_omp
=
7977 GIMPLE_PASS
, /* type */
7978 "ompexp", /* name */
7979 OPTGROUP_OPENMP
, /* optinfo_flags */
7980 TV_NONE
, /* tv_id */
7981 PROP_gimple_any
, /* properties_required */
7982 PROP_gimple_eomp
, /* properties_provided */
7983 0, /* properties_destroyed */
7984 0, /* todo_flags_start */
7985 0, /* todo_flags_finish */
7988 class pass_expand_omp
: public gimple_opt_pass
7991 pass_expand_omp (gcc::context
*ctxt
)
7992 : gimple_opt_pass (pass_data_expand_omp
, ctxt
)
7995 /* opt_pass methods: */
7996 virtual unsigned int execute (function
*)
7998 bool gate
= ((flag_cilkplus
!= 0 || flag_openacc
!= 0 || flag_openmp
!= 0
7999 || flag_openmp_simd
!= 0)
8002 /* This pass always runs, to provide PROP_gimple_eomp.
8003 But often, there is nothing to do. */
8007 return execute_expand_omp ();
8010 }; // class pass_expand_omp
8015 make_pass_expand_omp (gcc::context
*ctxt
)
8017 return new pass_expand_omp (ctxt
);
8022 const pass_data pass_data_expand_omp_ssa
=
8024 GIMPLE_PASS
, /* type */
8025 "ompexpssa", /* name */
8026 OPTGROUP_OPENMP
, /* optinfo_flags */
8027 TV_NONE
, /* tv_id */
8028 PROP_cfg
| PROP_ssa
, /* properties_required */
8029 PROP_gimple_eomp
, /* properties_provided */
8030 0, /* properties_destroyed */
8031 0, /* todo_flags_start */
8032 TODO_cleanup_cfg
| TODO_rebuild_alias
, /* todo_flags_finish */
8035 class pass_expand_omp_ssa
: public gimple_opt_pass
8038 pass_expand_omp_ssa (gcc::context
*ctxt
)
8039 : gimple_opt_pass (pass_data_expand_omp_ssa
, ctxt
)
8042 /* opt_pass methods: */
8043 virtual bool gate (function
*fun
)
8045 return !(fun
->curr_properties
& PROP_gimple_eomp
);
8047 virtual unsigned int execute (function
*) { return execute_expand_omp (); }
8048 opt_pass
* clone () { return new pass_expand_omp_ssa (m_ctxt
); }
8050 }; // class pass_expand_omp_ssa
8055 make_pass_expand_omp_ssa (gcc::context
*ctxt
)
8057 return new pass_expand_omp_ssa (ctxt
);
8060 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
8064 omp_make_gimple_edges (basic_block bb
, struct omp_region
**region
,
8067 gimple
*last
= last_stmt (bb
);
8068 enum gimple_code code
= gimple_code (last
);
8069 struct omp_region
*cur_region
= *region
;
8070 bool fallthru
= false;
8074 case GIMPLE_OMP_PARALLEL
:
8075 case GIMPLE_OMP_TASK
:
8076 case GIMPLE_OMP_FOR
:
8077 case GIMPLE_OMP_SINGLE
:
8078 case GIMPLE_OMP_TEAMS
:
8079 case GIMPLE_OMP_MASTER
:
8080 case GIMPLE_OMP_TASKGROUP
:
8081 case GIMPLE_OMP_CRITICAL
:
8082 case GIMPLE_OMP_SECTION
:
8083 case GIMPLE_OMP_GRID_BODY
:
8084 cur_region
= new_omp_region (bb
, code
, cur_region
);
8088 case GIMPLE_OMP_ORDERED
:
8089 cur_region
= new_omp_region (bb
, code
, cur_region
);
8091 if (omp_find_clause (gimple_omp_ordered_clauses
8092 (as_a
<gomp_ordered
*> (last
)),
8094 cur_region
= cur_region
->outer
;
8097 case GIMPLE_OMP_TARGET
:
8098 cur_region
= new_omp_region (bb
, code
, cur_region
);
8100 switch (gimple_omp_target_kind (last
))
8102 case GF_OMP_TARGET_KIND_REGION
:
8103 case GF_OMP_TARGET_KIND_DATA
:
8104 case GF_OMP_TARGET_KIND_OACC_PARALLEL
:
8105 case GF_OMP_TARGET_KIND_OACC_KERNELS
:
8106 case GF_OMP_TARGET_KIND_OACC_DATA
:
8107 case GF_OMP_TARGET_KIND_OACC_HOST_DATA
:
8109 case GF_OMP_TARGET_KIND_UPDATE
:
8110 case GF_OMP_TARGET_KIND_ENTER_DATA
:
8111 case GF_OMP_TARGET_KIND_EXIT_DATA
:
8112 case GF_OMP_TARGET_KIND_OACC_UPDATE
:
8113 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA
:
8114 case GF_OMP_TARGET_KIND_OACC_DECLARE
:
8115 cur_region
= cur_region
->outer
;
8122 case GIMPLE_OMP_SECTIONS
:
8123 cur_region
= new_omp_region (bb
, code
, cur_region
);
8127 case GIMPLE_OMP_SECTIONS_SWITCH
:
8131 case GIMPLE_OMP_ATOMIC_LOAD
:
8132 case GIMPLE_OMP_ATOMIC_STORE
:
8136 case GIMPLE_OMP_RETURN
:
8137 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
8138 somewhere other than the next block. This will be
8140 cur_region
->exit
= bb
;
8141 if (cur_region
->type
== GIMPLE_OMP_TASK
)
8142 /* Add an edge corresponding to not scheduling the task
8144 make_edge (cur_region
->entry
, bb
, EDGE_ABNORMAL
);
8145 fallthru
= cur_region
->type
!= GIMPLE_OMP_SECTION
;
8146 cur_region
= cur_region
->outer
;
8149 case GIMPLE_OMP_CONTINUE
:
8150 cur_region
->cont
= bb
;
8151 switch (cur_region
->type
)
8153 case GIMPLE_OMP_FOR
:
8154 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
8155 succs edges as abnormal to prevent splitting
8157 single_succ_edge (cur_region
->entry
)->flags
|= EDGE_ABNORMAL
;
8158 /* Make the loopback edge. */
8159 make_edge (bb
, single_succ (cur_region
->entry
),
8162 /* Create an edge from GIMPLE_OMP_FOR to exit, which
8163 corresponds to the case that the body of the loop
8164 is not executed at all. */
8165 make_edge (cur_region
->entry
, bb
->next_bb
, EDGE_ABNORMAL
);
8166 make_edge (bb
, bb
->next_bb
, EDGE_FALLTHRU
| EDGE_ABNORMAL
);
8170 case GIMPLE_OMP_SECTIONS
:
8171 /* Wire up the edges into and out of the nested sections. */
8173 basic_block switch_bb
= single_succ (cur_region
->entry
);
8175 struct omp_region
*i
;
8176 for (i
= cur_region
->inner
; i
; i
= i
->next
)
8178 gcc_assert (i
->type
== GIMPLE_OMP_SECTION
);
8179 make_edge (switch_bb
, i
->entry
, 0);
8180 make_edge (i
->exit
, bb
, EDGE_FALLTHRU
);
8183 /* Make the loopback edge to the block with
8184 GIMPLE_OMP_SECTIONS_SWITCH. */
8185 make_edge (bb
, switch_bb
, 0);
8187 /* Make the edge from the switch to exit. */
8188 make_edge (switch_bb
, bb
->next_bb
, 0);
8193 case GIMPLE_OMP_TASK
:
8206 if (*region
!= cur_region
)
8208 *region
= cur_region
;
8210 *region_idx
= cur_region
->entry
->index
;
8218 #include "gt-omp-expand.h"