1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
40 #include "tree-pass.h"
43 #include "splay-tree.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context
*outer
;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
84 /* What to do with variables with implicitly determined sharing
86 enum omp_clause_default_kind default_kind
;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
93 /* True if this parallel directive is nested within another. */
98 /* A structure describing the main elements of a parallel loop. */
102 tree v
, n1
, n2
, step
, chunk_size
, for_stmt
;
103 enum tree_code cond_code
;
105 bool have_nowait
, have_ordered
;
106 enum omp_clause_schedule_kind sched_kind
;
110 static splay_tree all_contexts
;
111 static int parallel_nesting_level
;
112 struct omp_region
*root_omp_region
;
114 static void scan_omp (tree
*, omp_context
*);
115 static void lower_omp (tree
*, omp_context
*);
116 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
117 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
122 find_omp_clause (tree clauses
, enum tree_code kind
)
124 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
125 if (OMP_CLAUSE_CODE (clauses
) == kind
)
131 /* Return true if CTX is for an omp parallel. */
134 is_parallel_ctx (omp_context
*ctx
)
136 return TREE_CODE (ctx
->stmt
) == OMP_PARALLEL
;
140 /* Return true if REGION is a combined parallel+workshare region. */
143 is_combined_parallel (struct omp_region
*region
)
145 return region
->is_combined_parallel
;
149 /* Extract the header elements of parallel loop FOR_STMT and store
153 extract_omp_for_data (tree for_stmt
, struct omp_for_data
*fd
)
157 fd
->for_stmt
= for_stmt
;
160 t
= OMP_FOR_INIT (for_stmt
);
161 gcc_assert (TREE_CODE (t
) == GIMPLE_MODIFY_STMT
);
162 fd
->v
= GIMPLE_STMT_OPERAND (t
, 0);
163 gcc_assert (SSA_VAR_P (fd
->v
));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd
->v
)) == INTEGER_TYPE
);
165 var
= TREE_CODE (fd
->v
) == SSA_NAME
? SSA_NAME_VAR (fd
->v
) : fd
->v
;
166 fd
->n1
= GIMPLE_STMT_OPERAND (t
, 1);
168 t
= OMP_FOR_COND (for_stmt
);
169 fd
->cond_code
= TREE_CODE (t
);
170 gcc_assert (TREE_OPERAND (t
, 0) == var
);
171 fd
->n2
= TREE_OPERAND (t
, 1);
172 switch (fd
->cond_code
)
178 fd
->n2
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->n2
), fd
->n2
,
179 build_int_cst (TREE_TYPE (fd
->n2
), 1));
180 fd
->cond_code
= LT_EXPR
;
183 fd
->n2
= fold_build2 (MINUS_EXPR
, TREE_TYPE (fd
->n2
), fd
->n2
,
184 build_int_cst (TREE_TYPE (fd
->n2
), 1));
185 fd
->cond_code
= GT_EXPR
;
191 t
= OMP_FOR_INCR (fd
->for_stmt
);
192 gcc_assert (TREE_CODE (t
) == GIMPLE_MODIFY_STMT
);
193 gcc_assert (GIMPLE_STMT_OPERAND (t
, 0) == var
);
194 t
= GIMPLE_STMT_OPERAND (t
, 1);
195 gcc_assert (TREE_OPERAND (t
, 0) == var
);
196 switch (TREE_CODE (t
))
199 fd
->step
= TREE_OPERAND (t
, 1);
202 fd
->step
= TREE_OPERAND (t
, 1);
203 fd
->step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (fd
->step
), fd
->step
);
209 fd
->have_nowait
= fd
->have_ordered
= false;
210 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
211 fd
->chunk_size
= NULL_TREE
;
213 for (t
= OMP_FOR_CLAUSES (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
214 switch (OMP_CLAUSE_CODE (t
))
216 case OMP_CLAUSE_NOWAIT
:
217 fd
->have_nowait
= true;
219 case OMP_CLAUSE_ORDERED
:
220 fd
->have_ordered
= true;
222 case OMP_CLAUSE_SCHEDULE
:
223 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
224 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
230 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
231 gcc_assert (fd
->chunk_size
== NULL
);
232 else if (fd
->chunk_size
== NULL
)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
|| fd
->have_ordered
)
237 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
238 ? integer_zero_node
: integer_one_node
;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
259 # BLOCK 2 (PAR_ENTRY_BB)
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
286 workshare_safe_to_combine_p (basic_block par_entry_bb
, basic_block ws_entry_bb
)
288 struct omp_for_data fd
;
289 tree par_stmt
, ws_stmt
;
291 par_stmt
= last_stmt (par_entry_bb
);
292 ws_stmt
= last_stmt (ws_entry_bb
);
294 if (TREE_CODE (ws_stmt
) == OMP_SECTIONS
)
297 gcc_assert (TREE_CODE (ws_stmt
) == OMP_FOR
);
299 extract_omp_for_data (ws_stmt
, &fd
);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
306 if (!is_gimple_min_invariant (fd
.n1
)
307 || !is_gimple_min_invariant (fd
.n2
)
308 || !is_gimple_min_invariant (fd
.step
)
309 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
321 get_ws_args_for (tree ws_stmt
)
325 if (TREE_CODE (ws_stmt
) == OMP_FOR
)
327 struct omp_for_data fd
;
330 extract_omp_for_data (ws_stmt
, &fd
);
335 t
= fold_convert (long_integer_type_node
, fd
.chunk_size
);
336 ws_args
= tree_cons (NULL
, t
, ws_args
);
339 t
= fold_convert (long_integer_type_node
, fd
.step
);
340 ws_args
= tree_cons (NULL
, t
, ws_args
);
342 t
= fold_convert (long_integer_type_node
, fd
.n2
);
343 ws_args
= tree_cons (NULL
, t
, ws_args
);
345 t
= fold_convert (long_integer_type_node
, fd
.n1
);
346 ws_args
= tree_cons (NULL
, t
, ws_args
);
350 else if (TREE_CODE (ws_stmt
) == OMP_SECTIONS
)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb
= single_succ (bb_for_stmt (ws_stmt
));
356 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
357 t
= tree_cons (NULL
, t
, NULL
);
365 /* Discover whether REGION is a combined parallel+workshare region. */
368 determine_parallel_type (struct omp_region
*region
)
370 basic_block par_entry_bb
, par_exit_bb
;
371 basic_block ws_entry_bb
, ws_exit_bb
;
373 if (region
== NULL
|| region
->inner
== NULL
374 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
375 || region
->inner
->cont
== NULL
)
378 /* We only support parallel+for and parallel+sections. */
379 if (region
->type
!= OMP_PARALLEL
380 || (region
->inner
->type
!= OMP_FOR
381 && region
->inner
->type
!= OMP_SECTIONS
))
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb
= region
->entry
;
387 par_exit_bb
= region
->exit
;
388 ws_entry_bb
= region
->inner
->entry
;
389 ws_exit_bb
= region
->inner
->exit
;
391 if (single_succ (par_entry_bb
) == ws_entry_bb
392 && single_succ (ws_exit_bb
) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb
, ws_entry_bb
)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb
))
395 || (last_and_only_stmt (ws_entry_bb
)
396 && last_and_only_stmt (par_exit_bb
))))
398 tree ws_stmt
= last_stmt (ws_entry_bb
);
400 if (region
->inner
->type
== OMP_FOR
)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses
= OMP_FOR_CLAUSES (ws_stmt
);
412 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
414 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
417 region
->is_combined_parallel
= false;
418 region
->inner
->is_combined_parallel
= false;
423 region
->is_combined_parallel
= true;
424 region
->inner
->is_combined_parallel
= true;
425 region
->ws_args
= get_ws_args_for (ws_stmt
);
430 /* Return true if EXPR is variable sized. */
433 is_variable_sized (const_tree expr
)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
438 /* Return true if DECL is a reference type. */
441 is_reference (tree decl
)
443 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
451 lookup_decl (tree var
, omp_context
*ctx
)
454 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
459 maybe_lookup_decl (tree var
, omp_context
*ctx
)
462 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
463 return n
? *n
: NULL_TREE
;
467 lookup_field (tree var
, omp_context
*ctx
)
470 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
471 return (tree
) n
->value
;
475 maybe_lookup_field (tree var
, omp_context
*ctx
)
478 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
479 return n
? (tree
) n
->value
: NULL_TREE
;
482 /* Return true if DECL should be copied by pointer. SHARED_P is true
483 if DECL is to be shared. */
486 use_pointer_for_field (const_tree decl
, bool shared_p
)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
509 /* Do not use copy-in/copy-out for variables that have their
511 if (TREE_ADDRESSABLE (decl
))
518 /* Create a new VAR_DECL and copy information from VAR to it. */
521 copy_var_decl (tree var
, tree name
, tree type
)
523 tree copy
= build_decl (VAR_DECL
, name
, type
);
525 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
526 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
527 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
528 DECL_NO_TBAA_P (copy
) = DECL_NO_TBAA_P (var
);
529 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
530 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
531 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
532 DECL_SOURCE_LOCATION (copy
) = DECL_SOURCE_LOCATION (var
);
533 TREE_USED (copy
) = 1;
534 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
539 /* Construct a new automatic decl similar to VAR. */
542 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
544 tree copy
= copy_var_decl (var
, name
, type
);
546 DECL_CONTEXT (copy
) = current_function_decl
;
547 TREE_CHAIN (copy
) = ctx
->block_vars
;
548 ctx
->block_vars
= copy
;
554 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
556 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
559 /* Build tree nodes to access the field for VAR on the receiver side. */
562 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
564 tree x
, field
= lookup_field (var
, ctx
);
566 /* If the receiver record type was remapped in the child function,
567 remap the field into the new record type. */
568 x
= maybe_lookup_field (field
, ctx
);
572 x
= build_fold_indirect_ref (ctx
->receiver_decl
);
573 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
575 x
= build_fold_indirect_ref (x
);
580 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
581 of a parallel, this is a component reference; for workshare constructs
582 this is some variable. */
585 build_outer_var_ref (tree var
, omp_context
*ctx
)
589 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
591 else if (is_variable_sized (var
))
593 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
594 x
= build_outer_var_ref (x
, ctx
);
595 x
= build_fold_indirect_ref (x
);
597 else if (is_parallel_ctx (ctx
))
599 bool by_ref
= use_pointer_for_field (var
, false);
600 x
= build_receiver_ref (var
, by_ref
, ctx
);
603 x
= lookup_decl (var
, ctx
->outer
);
604 else if (is_reference (var
))
605 /* This can happen with orphaned constructs. If var is reference, it is
606 possible it is shared and as such valid. */
611 if (is_reference (var
))
612 x
= build_fold_indirect_ref (x
);
617 /* Build tree nodes to access the field for VAR on the sender side. */
620 build_sender_ref (tree var
, omp_context
*ctx
)
622 tree field
= lookup_field (var
, ctx
);
623 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
624 ctx
->sender_decl
, field
, NULL
);
627 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
630 install_var_field (tree var
, bool by_ref
, omp_context
*ctx
)
634 gcc_assert (!splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
636 type
= TREE_TYPE (var
);
638 type
= build_pointer_type (type
);
640 field
= build_decl (FIELD_DECL
, DECL_NAME (var
), type
);
642 /* Remember what variable this field was created for. This does have a
643 side effect of making dwarf2out ignore this member, so for helpful
644 debugging we clear it later in delete_omp_context. */
645 DECL_ABSTRACT_ORIGIN (field
) = var
;
647 insert_field_into_struct (ctx
->record_type
, field
);
649 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
650 (splay_tree_value
) field
);
654 install_var_local (tree var
, omp_context
*ctx
)
656 tree new_var
= omp_copy_decl_1 (var
, ctx
);
657 insert_decl_map (&ctx
->cb
, var
, new_var
);
661 /* Adjust the replacement for DECL in CTX for the new context. This means
662 copying the DECL_VALUE_EXPR, and fixing up the type. */
665 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
669 new_decl
= lookup_decl (decl
, ctx
);
671 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
673 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
674 && DECL_HAS_VALUE_EXPR_P (decl
))
676 tree ve
= DECL_VALUE_EXPR (decl
);
677 walk_tree (&ve
, copy_body_r
, &ctx
->cb
, NULL
);
678 SET_DECL_VALUE_EXPR (new_decl
, ve
);
679 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
682 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
684 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
685 if (size
== error_mark_node
)
686 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
687 DECL_SIZE (new_decl
) = size
;
689 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
690 if (size
== error_mark_node
)
691 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
692 DECL_SIZE_UNIT (new_decl
) = size
;
696 /* The callback for remap_decl. Search all containing contexts for a
697 mapping of the variable; this avoids having to duplicate the splay
698 tree ahead of time. We know a mapping doesn't already exist in the
699 given context. Create new mappings to implement default semantics. */
702 omp_copy_decl (tree var
, copy_body_data
*cb
)
704 omp_context
*ctx
= (omp_context
*) cb
;
707 if (TREE_CODE (var
) == LABEL_DECL
)
709 new_var
= create_artificial_label ();
710 DECL_CONTEXT (new_var
) = current_function_decl
;
711 insert_decl_map (&ctx
->cb
, var
, new_var
);
715 while (!is_parallel_ctx (ctx
))
720 new_var
= maybe_lookup_decl (var
, ctx
);
725 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
728 return error_mark_node
;
732 /* Return the parallel region associated with STMT. */
734 /* Debugging dumps for parallel regions. */
735 void dump_omp_region (FILE *, struct omp_region
*, int);
736 void debug_omp_region (struct omp_region
*);
737 void debug_all_omp_regions (void);
739 /* Dump the parallel region tree rooted at REGION. */
742 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
744 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
745 tree_code_name
[region
->type
]);
748 dump_omp_region (file
, region
->inner
, indent
+ 4);
752 fprintf (file
, "%*sbb %d: OMP_CONTINUE\n", indent
, "",
753 region
->cont
->index
);
757 fprintf (file
, "%*sbb %d: OMP_RETURN\n", indent
, "",
758 region
->exit
->index
);
760 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
763 dump_omp_region (file
, region
->next
, indent
);
767 debug_omp_region (struct omp_region
*region
)
769 dump_omp_region (stderr
, region
, 0);
773 debug_all_omp_regions (void)
775 dump_omp_region (stderr
, root_omp_region
, 0);
779 /* Create a new parallel region starting at STMT inside region PARENT. */
782 new_omp_region (basic_block bb
, enum tree_code type
, struct omp_region
*parent
)
784 struct omp_region
*region
= xcalloc (1, sizeof (*region
));
786 region
->outer
= parent
;
792 /* This is a nested region. Add it to the list of inner
793 regions in PARENT. */
794 region
->next
= parent
->inner
;
795 parent
->inner
= region
;
799 /* This is a toplevel region. Add it to the list of toplevel
800 regions in ROOT_OMP_REGION. */
801 region
->next
= root_omp_region
;
802 root_omp_region
= region
;
808 /* Release the memory associated with the region tree rooted at REGION. */
811 free_omp_region_1 (struct omp_region
*region
)
813 struct omp_region
*i
, *n
;
815 for (i
= region
->inner
; i
; i
= n
)
818 free_omp_region_1 (i
);
824 /* Release the memory for the entire omp region tree. */
827 free_omp_regions (void)
829 struct omp_region
*r
, *n
;
830 for (r
= root_omp_region
; r
; r
= n
)
833 free_omp_region_1 (r
);
835 root_omp_region
= NULL
;
839 /* Create a new context, with OUTER_CTX being the surrounding context. */
842 new_omp_context (tree stmt
, omp_context
*outer_ctx
)
844 omp_context
*ctx
= XCNEW (omp_context
);
846 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
847 (splay_tree_value
) ctx
);
852 ctx
->outer
= outer_ctx
;
853 ctx
->cb
= outer_ctx
->cb
;
854 ctx
->cb
.block
= NULL
;
855 ctx
->depth
= outer_ctx
->depth
+ 1;
859 ctx
->cb
.src_fn
= current_function_decl
;
860 ctx
->cb
.dst_fn
= current_function_decl
;
861 ctx
->cb
.src_node
= cgraph_node (current_function_decl
);
862 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
863 ctx
->cb
.src_cfun
= cfun
;
864 ctx
->cb
.copy_decl
= omp_copy_decl
;
865 ctx
->cb
.eh_region
= -1;
866 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
870 ctx
->cb
.decl_map
= pointer_map_create ();
875 /* Destroy a omp_context data structures. Called through the splay tree
876 value delete callback. */
879 delete_omp_context (splay_tree_value value
)
881 omp_context
*ctx
= (omp_context
*) value
;
883 pointer_map_destroy (ctx
->cb
.decl_map
);
886 splay_tree_delete (ctx
->field_map
);
888 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
889 it produces corrupt debug information. */
890 if (ctx
->record_type
)
893 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
894 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
900 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
904 fixup_child_record_type (omp_context
*ctx
)
906 tree f
, type
= ctx
->record_type
;
908 /* ??? It isn't sufficient to just call remap_type here, because
909 variably_modified_type_p doesn't work the way we expect for
910 record types. Testing each field for whether it needs remapping
911 and creating a new record by hand works, however. */
912 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
913 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
917 tree name
, new_fields
= NULL
;
919 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
920 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
921 name
= build_decl (TYPE_DECL
, name
, type
);
922 TYPE_NAME (type
) = name
;
924 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= TREE_CHAIN (f
))
926 tree new_f
= copy_node (f
);
927 DECL_CONTEXT (new_f
) = type
;
928 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
929 TREE_CHAIN (new_f
) = new_fields
;
932 /* Arrange to be able to look up the receiver field
933 given the sender field. */
934 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
935 (splay_tree_value
) new_f
);
937 TYPE_FIELDS (type
) = nreverse (new_fields
);
941 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
944 /* Instantiate decls as necessary in CTX to satisfy the data sharing
945 specified by CLAUSES. */
948 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
951 bool scan_array_reductions
= false;
953 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
957 switch (OMP_CLAUSE_CODE (c
))
959 case OMP_CLAUSE_PRIVATE
:
960 decl
= OMP_CLAUSE_DECL (c
);
961 if (!is_variable_sized (decl
))
962 install_var_local (decl
, ctx
);
965 case OMP_CLAUSE_SHARED
:
966 gcc_assert (is_parallel_ctx (ctx
));
967 decl
= OMP_CLAUSE_DECL (c
);
968 gcc_assert (!is_variable_sized (decl
));
969 by_ref
= use_pointer_for_field (decl
, true);
970 /* Global variables don't need to be copied,
971 the receiver side will use them directly. */
972 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
974 if (! TREE_READONLY (decl
)
975 || TREE_ADDRESSABLE (decl
)
977 || is_reference (decl
))
979 install_var_field (decl
, by_ref
, ctx
);
980 install_var_local (decl
, ctx
);
983 /* We don't need to copy const scalar vars back. */
984 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
987 case OMP_CLAUSE_LASTPRIVATE
:
988 /* Let the corresponding firstprivate clause create
990 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
994 case OMP_CLAUSE_FIRSTPRIVATE
:
995 case OMP_CLAUSE_REDUCTION
:
996 decl
= OMP_CLAUSE_DECL (c
);
998 if (is_variable_sized (decl
))
1000 else if (is_parallel_ctx (ctx
)
1001 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
,
1004 by_ref
= use_pointer_for_field (decl
, false);
1005 install_var_field (decl
, by_ref
, ctx
);
1007 install_var_local (decl
, ctx
);
1010 case OMP_CLAUSE_COPYPRIVATE
:
1012 scan_omp (&OMP_CLAUSE_DECL (c
), ctx
->outer
);
1015 case OMP_CLAUSE_COPYIN
:
1016 decl
= OMP_CLAUSE_DECL (c
);
1017 by_ref
= use_pointer_for_field (decl
, false);
1018 install_var_field (decl
, by_ref
, ctx
);
1021 case OMP_CLAUSE_DEFAULT
:
1022 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1026 case OMP_CLAUSE_NUM_THREADS
:
1027 case OMP_CLAUSE_SCHEDULE
:
1029 scan_omp (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1032 case OMP_CLAUSE_NOWAIT
:
1033 case OMP_CLAUSE_ORDERED
:
1041 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1043 switch (OMP_CLAUSE_CODE (c
))
1045 case OMP_CLAUSE_LASTPRIVATE
:
1046 /* Let the corresponding firstprivate clause create
1048 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1052 case OMP_CLAUSE_PRIVATE
:
1053 case OMP_CLAUSE_FIRSTPRIVATE
:
1054 case OMP_CLAUSE_REDUCTION
:
1055 decl
= OMP_CLAUSE_DECL (c
);
1056 if (is_variable_sized (decl
))
1057 install_var_local (decl
, ctx
);
1058 fixup_remapped_decl (decl
, ctx
,
1059 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1060 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1061 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1062 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1063 scan_array_reductions
= true;
1066 case OMP_CLAUSE_SHARED
:
1067 decl
= OMP_CLAUSE_DECL (c
);
1068 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1069 fixup_remapped_decl (decl
, ctx
, false);
1072 case OMP_CLAUSE_COPYPRIVATE
:
1073 case OMP_CLAUSE_COPYIN
:
1074 case OMP_CLAUSE_DEFAULT
:
1076 case OMP_CLAUSE_NUM_THREADS
:
1077 case OMP_CLAUSE_SCHEDULE
:
1078 case OMP_CLAUSE_NOWAIT
:
1079 case OMP_CLAUSE_ORDERED
:
1087 if (scan_array_reductions
)
1088 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1089 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1090 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1092 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c
), ctx
);
1093 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c
), ctx
);
1097 /* Create a new name for omp child function. Returns an identifier. */
1099 static GTY(()) unsigned int tmp_ompfn_id_num
;
1102 create_omp_child_function_name (void)
1104 tree name
= DECL_ASSEMBLER_NAME (current_function_decl
);
1105 size_t len
= IDENTIFIER_LENGTH (name
);
1106 char *tmp_name
, *prefix
;
1108 prefix
= alloca (len
+ sizeof ("_omp_fn"));
1109 memcpy (prefix
, IDENTIFIER_POINTER (name
), len
);
1110 strcpy (prefix
+ len
, "_omp_fn");
1111 #ifndef NO_DOT_IN_LABEL
1113 #elif !defined NO_DOLLAR_IN_LABEL
1116 ASM_FORMAT_PRIVATE_NAME (tmp_name
, prefix
, tmp_ompfn_id_num
++);
1117 return get_identifier (tmp_name
);
1120 /* Build a decl for the omp child function. It'll not contain a body
1121 yet, just the bare decl. */
1124 create_omp_child_function (omp_context
*ctx
)
1126 tree decl
, type
, name
, t
;
1128 name
= create_omp_child_function_name ();
1129 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1131 decl
= build_decl (FUNCTION_DECL
, name
, type
);
1132 decl
= lang_hooks
.decls
.pushdecl (decl
);
1134 ctx
->cb
.dst_fn
= decl
;
1136 TREE_STATIC (decl
) = 1;
1137 TREE_USED (decl
) = 1;
1138 DECL_ARTIFICIAL (decl
) = 1;
1139 DECL_IGNORED_P (decl
) = 0;
1140 TREE_PUBLIC (decl
) = 0;
1141 DECL_UNINLINABLE (decl
) = 1;
1142 DECL_EXTERNAL (decl
) = 0;
1143 DECL_CONTEXT (decl
) = NULL_TREE
;
1144 DECL_INITIAL (decl
) = make_node (BLOCK
);
1146 t
= build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
1147 DECL_ARTIFICIAL (t
) = 1;
1148 DECL_IGNORED_P (t
) = 1;
1149 DECL_RESULT (decl
) = t
;
1151 t
= build_decl (PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1152 DECL_ARTIFICIAL (t
) = 1;
1153 DECL_ARG_TYPE (t
) = ptr_type_node
;
1154 DECL_CONTEXT (t
) = current_function_decl
;
1156 DECL_ARGUMENTS (decl
) = t
;
1157 ctx
->receiver_decl
= t
;
1159 /* Allocate memory for the function structure. The call to
1160 allocate_struct_function clobbers CFUN, so we need to restore
1162 push_struct_function (decl
);
1163 DECL_SOURCE_LOCATION (decl
) = EXPR_LOCATION (ctx
->stmt
);
1164 cfun
->function_end_locus
= EXPR_LOCATION (ctx
->stmt
);
1169 /* Scan an OpenMP parallel directive. */
1172 scan_omp_parallel (tree
*stmt_p
, omp_context
*outer_ctx
)
1177 /* Ignore parallel directives with empty bodies, unless there
1178 are copyin clauses. */
1180 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p
))
1181 && find_omp_clause (OMP_CLAUSES (*stmt_p
), OMP_CLAUSE_COPYIN
) == NULL
)
1183 *stmt_p
= build_empty_stmt ();
1187 ctx
= new_omp_context (*stmt_p
, outer_ctx
);
1188 if (parallel_nesting_level
> 1)
1189 ctx
->is_nested
= true;
1190 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1191 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1192 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1193 name
= create_tmp_var_name (".omp_data_s");
1194 name
= build_decl (TYPE_DECL
, name
, ctx
->record_type
);
1195 TYPE_NAME (ctx
->record_type
) = name
;
1196 create_omp_child_function (ctx
);
1197 OMP_PARALLEL_FN (*stmt_p
) = ctx
->cb
.dst_fn
;
1199 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p
), ctx
);
1200 scan_omp (&OMP_PARALLEL_BODY (*stmt_p
), ctx
);
1202 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1203 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1206 layout_type (ctx
->record_type
);
1207 fixup_child_record_type (ctx
);
1212 /* Scan an OpenMP loop directive. */
1215 scan_omp_for (tree
*stmt_p
, omp_context
*outer_ctx
)
1221 ctx
= new_omp_context (stmt
, outer_ctx
);
1223 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt
), ctx
);
1225 scan_omp (&OMP_FOR_PRE_BODY (stmt
), ctx
);
1226 scan_omp (&OMP_FOR_INIT (stmt
), ctx
);
1227 scan_omp (&OMP_FOR_COND (stmt
), ctx
);
1228 scan_omp (&OMP_FOR_INCR (stmt
), ctx
);
1229 scan_omp (&OMP_FOR_BODY (stmt
), ctx
);
1232 /* Scan an OpenMP sections directive. */
1235 scan_omp_sections (tree
*stmt_p
, omp_context
*outer_ctx
)
1241 ctx
= new_omp_context (stmt
, outer_ctx
);
1242 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt
), ctx
);
1243 scan_omp (&OMP_SECTIONS_BODY (stmt
), ctx
);
1246 /* Scan an OpenMP single directive. */
1249 scan_omp_single (tree
*stmt_p
, omp_context
*outer_ctx
)
1251 tree stmt
= *stmt_p
;
1255 ctx
= new_omp_context (stmt
, outer_ctx
);
1256 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1257 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1258 name
= create_tmp_var_name (".omp_copy_s");
1259 name
= build_decl (TYPE_DECL
, name
, ctx
->record_type
);
1260 TYPE_NAME (ctx
->record_type
) = name
;
1262 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt
), ctx
);
1263 scan_omp (&OMP_SINGLE_BODY (stmt
), ctx
);
1265 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1266 ctx
->record_type
= NULL
;
1268 layout_type (ctx
->record_type
);
1272 /* Check OpenMP nesting restrictions. */
1274 check_omp_nesting_restrictions (tree t
, omp_context
*ctx
)
1276 switch (TREE_CODE (t
))
1281 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1282 switch (TREE_CODE (ctx
->stmt
))
1289 warning (0, "work-sharing region may not be closely nested inside "
1290 "of work-sharing, critical, ordered or master region");
1299 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1300 switch (TREE_CODE (ctx
->stmt
))
1305 warning (0, "master region may not be closely nested inside "
1306 "of work-sharing region");
1315 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1316 switch (TREE_CODE (ctx
->stmt
))
1319 warning (0, "ordered region may not be closely nested inside "
1320 "of critical region");
1323 if (find_omp_clause (OMP_CLAUSES (ctx
->stmt
),
1324 OMP_CLAUSE_ORDERED
) == NULL
)
1325 warning (0, "ordered region must be closely nested inside "
1326 "a loop region with an ordered clause");
1335 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1336 if (TREE_CODE (ctx
->stmt
) == OMP_CRITICAL
1337 && OMP_CRITICAL_NAME (t
) == OMP_CRITICAL_NAME (ctx
->stmt
))
1339 warning (0, "critical region may not be nested inside a critical "
1340 "region with the same name");
1350 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1353 scan_omp_1 (tree
*tp
, int *walk_subtrees
, void *data
)
1355 struct walk_stmt_info
*wi
= data
;
1356 omp_context
*ctx
= wi
->info
;
1359 if (EXPR_HAS_LOCATION (t
))
1360 input_location
= EXPR_LOCATION (t
);
1362 /* Check the OpenMP nesting restrictions. */
1363 if (OMP_DIRECTIVE_P (t
) && ctx
!= NULL
)
1364 check_omp_nesting_restrictions (t
, ctx
);
1367 switch (TREE_CODE (t
))
1370 parallel_nesting_level
++;
1371 scan_omp_parallel (tp
, ctx
);
1372 parallel_nesting_level
--;
1376 scan_omp_for (tp
, ctx
);
1380 scan_omp_sections (tp
, ctx
);
1384 scan_omp_single (tp
, ctx
);
1391 ctx
= new_omp_context (*tp
, ctx
);
1392 scan_omp (&OMP_BODY (*tp
), ctx
);
1400 for (var
= BIND_EXPR_VARS (t
); var
; var
= TREE_CHAIN (var
))
1401 insert_decl_map (&ctx
->cb
, var
, var
);
1410 *tp
= remap_decl (t
, &ctx
->cb
);
1414 if (ctx
&& TYPE_P (t
))
1415 *tp
= remap_type (t
, &ctx
->cb
);
1416 else if (!DECL_P (t
))
1425 /* Scan all the statements starting at STMT_P. CTX contains context
1426 information about the OpenMP directives and clauses found during
1430 scan_omp (tree
*stmt_p
, omp_context
*ctx
)
1432 location_t saved_location
;
1433 struct walk_stmt_info wi
;
1435 memset (&wi
, 0, sizeof (wi
));
1436 wi
.callback
= scan_omp_1
;
1438 wi
.want_bind_expr
= (ctx
!= NULL
);
1439 wi
.want_locations
= true;
1441 saved_location
= input_location
;
1442 walk_stmts (&wi
, stmt_p
);
1443 input_location
= saved_location
;
1446 /* Re-gimplification and code generation routines. */
1448 /* Build a call to GOMP_barrier. */
1451 build_omp_barrier (void)
1453 return build_call_expr (built_in_decls
[BUILT_IN_GOMP_BARRIER
], 0);
1456 /* If a context was created for STMT when it was scanned, return it. */
1458 static omp_context
*
1459 maybe_lookup_ctx (tree stmt
)
1462 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
1463 return n
? (omp_context
*) n
->value
: NULL
;
1467 /* Find the mapping for DECL in CTX or the immediately enclosing
1468 context that has a mapping for DECL.
1470 If CTX is a nested parallel directive, we may have to use the decl
1471 mappings created in CTX's parent context. Suppose that we have the
1472 following parallel nesting (variable UIDs showed for clarity):
1475 #omp parallel shared(iD.1562) -> outer parallel
1476 iD.1562 = iD.1562 + 1;
1478 #omp parallel shared (iD.1562) -> inner parallel
1479 iD.1562 = iD.1562 - 1;
1481 Each parallel structure will create a distinct .omp_data_s structure
1482 for copying iD.1562 in/out of the directive:
1484 outer parallel .omp_data_s.1.i -> iD.1562
1485 inner parallel .omp_data_s.2.i -> iD.1562
1487 A shared variable mapping will produce a copy-out operation before
1488 the parallel directive and a copy-in operation after it. So, in
1489 this case we would have:
1492 .omp_data_o.1.i = iD.1562;
1493 #omp parallel shared(iD.1562) -> outer parallel
1494 .omp_data_i.1 = &.omp_data_o.1
1495 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1497 .omp_data_o.2.i = iD.1562; -> **
1498 #omp parallel shared(iD.1562) -> inner parallel
1499 .omp_data_i.2 = &.omp_data_o.2
1500 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1503 ** This is a problem. The symbol iD.1562 cannot be referenced
1504 inside the body of the outer parallel region. But since we are
1505 emitting this copy operation while expanding the inner parallel
1506 directive, we need to access the CTX structure of the outer
1507 parallel directive to get the correct mapping:
1509 .omp_data_o.2.i = .omp_data_i.1->i
1511 Since there may be other workshare or parallel directives enclosing
1512 the parallel directive, it may be necessary to walk up the context
1513 parent chain. This is not a problem in general because nested
1514 parallelism happens only rarely. */
1517 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
1522 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
1523 t
= maybe_lookup_decl (decl
, up
);
1525 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
1527 return t
? t
: decl
;
1531 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1532 in outer contexts. */
1535 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
1540 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
1541 t
= maybe_lookup_decl (decl
, up
);
1543 return t
? t
: decl
;
1547 /* Construct the initialization value for reduction CLAUSE. */
1550 omp_reduction_init (tree clause
, tree type
)
1552 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
1559 case TRUTH_ORIF_EXPR
:
1560 case TRUTH_XOR_EXPR
:
1562 return fold_convert (type
, integer_zero_node
);
1565 case TRUTH_AND_EXPR
:
1566 case TRUTH_ANDIF_EXPR
:
1568 return fold_convert (type
, integer_one_node
);
1571 return fold_convert (type
, integer_minus_one_node
);
1574 if (SCALAR_FLOAT_TYPE_P (type
))
1576 REAL_VALUE_TYPE max
, min
;
1577 if (HONOR_INFINITIES (TYPE_MODE (type
)))
1580 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
1583 real_maxval (&min
, 1, TYPE_MODE (type
));
1584 return build_real (type
, min
);
1588 gcc_assert (INTEGRAL_TYPE_P (type
));
1589 return TYPE_MIN_VALUE (type
);
1593 if (SCALAR_FLOAT_TYPE_P (type
))
1595 REAL_VALUE_TYPE max
;
1596 if (HONOR_INFINITIES (TYPE_MODE (type
)))
1599 real_maxval (&max
, 0, TYPE_MODE (type
));
1600 return build_real (type
, max
);
1604 gcc_assert (INTEGRAL_TYPE_P (type
));
1605 return TYPE_MAX_VALUE (type
);
1613 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1614 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1615 private variables. Initialization statements go in ILIST, while calls
1616 to destructors go in DLIST. */
1619 lower_rec_input_clauses (tree clauses
, tree
*ilist
, tree
*dlist
,
1622 tree_stmt_iterator diter
;
1623 tree c
, dtor
, copyin_seq
, x
, ptr
;
1624 bool copyin_by_ref
= false;
1625 bool lastprivate_firstprivate
= false;
1628 *dlist
= alloc_stmt_list ();
1629 diter
= tsi_start (*dlist
);
1632 /* Do all the fixed sized types in the first pass, and the variable sized
1633 types in the second pass. This makes sure that the scalar arguments to
1634 the variable sized types are processed before we use them in the
1635 variable sized operations. */
1636 for (pass
= 0; pass
< 2; ++pass
)
1638 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1640 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
1646 case OMP_CLAUSE_PRIVATE
:
1647 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
1650 case OMP_CLAUSE_SHARED
:
1651 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
1653 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
1656 case OMP_CLAUSE_FIRSTPRIVATE
:
1657 case OMP_CLAUSE_COPYIN
:
1658 case OMP_CLAUSE_REDUCTION
:
1660 case OMP_CLAUSE_LASTPRIVATE
:
1661 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1663 lastprivate_firstprivate
= true;
1672 new_var
= var
= OMP_CLAUSE_DECL (c
);
1673 if (c_kind
!= OMP_CLAUSE_COPYIN
)
1674 new_var
= lookup_decl (var
, ctx
);
1676 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
1681 else if (is_variable_sized (var
))
1683 /* For variable sized types, we need to allocate the
1684 actual storage here. Call alloca and store the
1685 result in the pointer decl that we created elsewhere. */
1689 ptr
= DECL_VALUE_EXPR (new_var
);
1690 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
1691 ptr
= TREE_OPERAND (ptr
, 0);
1692 gcc_assert (DECL_P (ptr
));
1694 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
1695 x
= build_call_expr (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
1696 x
= fold_convert (TREE_TYPE (ptr
), x
);
1697 x
= build_gimple_modify_stmt (ptr
, x
);
1698 gimplify_and_add (x
, ilist
);
1700 else if (is_reference (var
))
1702 /* For references that are being privatized for Fortran,
1703 allocate new backing storage for the new pointer
1704 variable. This allows us to avoid changing all the
1705 code that expects a pointer to something that expects
1706 a direct variable. Note that this doesn't apply to
1707 C++, since reference types are disallowed in data
1708 sharing clauses there, except for NRV optimized
1713 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
1714 if (TREE_CONSTANT (x
))
1716 const char *name
= NULL
;
1717 if (DECL_NAME (var
))
1718 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
1720 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
1722 gimple_add_tmp_var (x
);
1723 x
= build_fold_addr_expr_with_type (x
, TREE_TYPE (new_var
));
1727 x
= build_call_expr (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
1728 x
= fold_convert (TREE_TYPE (new_var
), x
);
1731 x
= build_gimple_modify_stmt (new_var
, x
);
1732 gimplify_and_add (x
, ilist
);
1734 new_var
= build_fold_indirect_ref (new_var
);
1736 else if (c_kind
== OMP_CLAUSE_REDUCTION
1737 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1745 switch (OMP_CLAUSE_CODE (c
))
1747 case OMP_CLAUSE_SHARED
:
1748 /* Shared global vars are just accessed directly. */
1749 if (is_global_var (new_var
))
1751 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1752 needs to be delayed until after fixup_child_record_type so
1753 that we get the correct type during the dereference. */
1754 by_ref
= use_pointer_for_field (var
, true);
1755 x
= build_receiver_ref (var
, by_ref
, ctx
);
1756 SET_DECL_VALUE_EXPR (new_var
, x
);
1757 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
1759 /* ??? If VAR is not passed by reference, and the variable
1760 hasn't been initialized yet, then we'll get a warning for
1761 the store into the omp_data_s structure. Ideally, we'd be
1762 able to notice this and not store anything at all, but
1763 we're generating code too early. Suppress the warning. */
1765 TREE_NO_WARNING (var
) = 1;
1768 case OMP_CLAUSE_LASTPRIVATE
:
1769 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1773 case OMP_CLAUSE_PRIVATE
:
1774 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
);
1776 gimplify_and_add (x
, ilist
);
1780 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
1784 gimplify_stmt (&dtor
);
1785 tsi_link_before (&diter
, dtor
, TSI_SAME_STMT
);
1789 case OMP_CLAUSE_FIRSTPRIVATE
:
1790 x
= build_outer_var_ref (var
, ctx
);
1791 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
1792 gimplify_and_add (x
, ilist
);
1796 case OMP_CLAUSE_COPYIN
:
1797 by_ref
= use_pointer_for_field (var
, false);
1798 x
= build_receiver_ref (var
, by_ref
, ctx
);
1799 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
1800 append_to_statement_list (x
, ©in_seq
);
1801 copyin_by_ref
|= by_ref
;
1804 case OMP_CLAUSE_REDUCTION
:
1805 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1807 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c
), ilist
);
1808 OMP_CLAUSE_REDUCTION_INIT (c
) = NULL
;
1812 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
1813 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
1814 x
= build_gimple_modify_stmt (new_var
, x
);
1815 gimplify_and_add (x
, ilist
);
1825 /* The copyin sequence is not to be executed by the main thread, since
1826 that would result in self-copies. Perhaps not visible to scalars,
1827 but it certainly is to C++ operator=. */
1830 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
1831 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
1832 build_int_cst (TREE_TYPE (x
), 0));
1833 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
1834 gimplify_and_add (x
, ilist
);
1837 /* If any copyin variable is passed by reference, we must ensure the
1838 master thread doesn't modify it before it is copied over in all
1839 threads. Similarly for variables in both firstprivate and
1840 lastprivate clauses we need to ensure the lastprivate copying
1841 happens after firstprivate copying in all threads. */
1842 if (copyin_by_ref
|| lastprivate_firstprivate
)
1843 gimplify_and_add (build_omp_barrier (), ilist
);
1847 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1848 both parallel and workshare constructs. PREDICATE may be NULL if it's
1852 lower_lastprivate_clauses (tree clauses
, tree predicate
, tree
*stmt_list
,
1855 tree sub_list
, x
, c
;
1857 /* Early exit if there are no lastprivate clauses. */
1858 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
1859 if (clauses
== NULL
)
1861 /* If this was a workshare clause, see if it had been combined
1862 with its parallel. In that case, look for the clauses on the
1863 parallel statement itself. */
1864 if (is_parallel_ctx (ctx
))
1868 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
1871 clauses
= find_omp_clause (OMP_PARALLEL_CLAUSES (ctx
->stmt
),
1872 OMP_CLAUSE_LASTPRIVATE
);
1873 if (clauses
== NULL
)
1877 sub_list
= alloc_stmt_list ();
1879 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1883 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_LASTPRIVATE
)
1886 var
= OMP_CLAUSE_DECL (c
);
1887 new_var
= lookup_decl (var
, ctx
);
1889 x
= build_outer_var_ref (var
, ctx
);
1890 if (is_reference (var
))
1891 new_var
= build_fold_indirect_ref (new_var
);
1892 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
1893 append_to_statement_list (x
, &sub_list
);
1897 x
= build3 (COND_EXPR
, void_type_node
, predicate
, sub_list
, NULL
);
1901 gimplify_and_add (x
, stmt_list
);
1905 /* Generate code to implement the REDUCTION clauses. */
1908 lower_reduction_clauses (tree clauses
, tree
*stmt_list
, omp_context
*ctx
)
1910 tree sub_list
= NULL
, x
, c
;
1913 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1914 update in that case, otherwise use a lock. */
1915 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
1916 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
1918 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1920 /* Never use OMP_ATOMIC for array reductions. */
1930 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1932 tree var
, ref
, new_var
;
1933 enum tree_code code
;
1935 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
1938 var
= OMP_CLAUSE_DECL (c
);
1939 new_var
= lookup_decl (var
, ctx
);
1940 if (is_reference (var
))
1941 new_var
= build_fold_indirect_ref (new_var
);
1942 ref
= build_outer_var_ref (var
, ctx
);
1943 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
1945 /* reduction(-:var) sums up the partial results, so it acts
1946 identically to reduction(+:var). */
1947 if (code
== MINUS_EXPR
)
1952 tree addr
= build_fold_addr_expr (ref
);
1954 addr
= save_expr (addr
);
1955 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
1956 x
= fold_build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
1957 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
1958 gimplify_and_add (x
, stmt_list
);
1962 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1964 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
1966 if (is_reference (var
))
1967 ref
= build_fold_addr_expr (ref
);
1968 SET_DECL_VALUE_EXPR (placeholder
, ref
);
1969 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
1970 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c
), &sub_list
);
1971 OMP_CLAUSE_REDUCTION_MERGE (c
) = NULL
;
1972 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
1976 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
1977 ref
= build_outer_var_ref (var
, ctx
);
1978 x
= build_gimple_modify_stmt (ref
, x
);
1979 append_to_statement_list (x
, &sub_list
);
1983 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
], 0);
1984 gimplify_and_add (x
, stmt_list
);
1986 gimplify_and_add (sub_list
, stmt_list
);
1988 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
], 0);
1989 gimplify_and_add (x
, stmt_list
);
1993 /* Generate code to implement the COPYPRIVATE clauses. */
1996 lower_copyprivate_clauses (tree clauses
, tree
*slist
, tree
*rlist
,
2001 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2006 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2009 var
= OMP_CLAUSE_DECL (c
);
2010 by_ref
= use_pointer_for_field (var
, false);
2012 ref
= build_sender_ref (var
, ctx
);
2013 x
= lookup_decl_in_outer_ctx (var
, ctx
);
2014 x
= by_ref
? build_fold_addr_expr (x
) : x
;
2015 x
= build_gimple_modify_stmt (ref
, x
);
2016 gimplify_and_add (x
, slist
);
2018 ref
= build_receiver_ref (var
, by_ref
, ctx
);
2019 if (is_reference (var
))
2021 ref
= build_fold_indirect_ref (ref
);
2022 var
= build_fold_indirect_ref (var
);
2024 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, var
, ref
);
2025 gimplify_and_add (x
, rlist
);
2030 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2031 and REDUCTION from the sender (aka parent) side. */
2034 lower_send_clauses (tree clauses
, tree
*ilist
, tree
*olist
, omp_context
*ctx
)
2038 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2040 tree val
, ref
, x
, var
;
2041 bool by_ref
, do_in
= false, do_out
= false;
2043 switch (OMP_CLAUSE_CODE (c
))
2045 case OMP_CLAUSE_FIRSTPRIVATE
:
2046 case OMP_CLAUSE_COPYIN
:
2047 case OMP_CLAUSE_LASTPRIVATE
:
2048 case OMP_CLAUSE_REDUCTION
:
2054 val
= OMP_CLAUSE_DECL (c
);
2055 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2057 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2058 && is_global_var (var
))
2060 if (is_variable_sized (val
))
2062 by_ref
= use_pointer_for_field (val
, false);
2064 switch (OMP_CLAUSE_CODE (c
))
2066 case OMP_CLAUSE_FIRSTPRIVATE
:
2067 case OMP_CLAUSE_COPYIN
:
2071 case OMP_CLAUSE_LASTPRIVATE
:
2072 if (by_ref
|| is_reference (val
))
2074 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2082 case OMP_CLAUSE_REDUCTION
:
2084 do_out
= !(by_ref
|| is_reference (val
));
2093 ref
= build_sender_ref (val
, ctx
);
2094 x
= by_ref
? build_fold_addr_expr (var
) : var
;
2095 x
= build_gimple_modify_stmt (ref
, x
);
2096 gimplify_and_add (x
, ilist
);
2101 ref
= build_sender_ref (val
, ctx
);
2102 x
= build_gimple_modify_stmt (var
, ref
);
2103 gimplify_and_add (x
, olist
);
2108 /* Generate code to implement SHARED from the sender (aka parent) side.
2109 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2110 got automatically shared. */
2113 lower_send_shared_vars (tree
*ilist
, tree
*olist
, omp_context
*ctx
)
2115 tree var
, ovar
, nvar
, f
, x
;
2117 if (ctx
->record_type
== NULL
)
2120 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= TREE_CHAIN (f
))
2122 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2123 nvar
= maybe_lookup_decl (ovar
, ctx
);
2124 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2127 /* If CTX is a nested parallel directive. Find the immediately
2128 enclosing parallel or workshare construct that contains a
2129 mapping for OVAR. */
2130 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2132 if (use_pointer_for_field (ovar
, true))
2134 x
= build_sender_ref (ovar
, ctx
);
2135 var
= build_fold_addr_expr (var
);
2136 x
= build_gimple_modify_stmt (x
, var
);
2137 gimplify_and_add (x
, ilist
);
2141 x
= build_sender_ref (ovar
, ctx
);
2142 x
= build_gimple_modify_stmt (x
, var
);
2143 gimplify_and_add (x
, ilist
);
2145 x
= build_sender_ref (ovar
, ctx
);
2146 x
= build_gimple_modify_stmt (var
, x
);
2147 gimplify_and_add (x
, olist
);
2152 /* Build the function calls to GOMP_parallel_start etc to actually
2153 generate the parallel operation. REGION is the parallel region
2154 being expanded. BB is the block where to insert the code. WS_ARGS
2155 will be set if this is a call to a combined parallel+workshare
2156 construct, it contains the list of additional arguments needed by
2157 the workshare construct. */
2160 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2161 tree entry_stmt
, tree ws_args
)
2163 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2164 block_stmt_iterator si
;
2167 clauses
= OMP_PARALLEL_CLAUSES (entry_stmt
);
2169 /* Determine what flavor of GOMP_parallel_start we will be
2171 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2172 if (is_combined_parallel (region
))
2174 switch (region
->inner
->type
)
2177 start_ix
= BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2178 + region
->inner
->sched_kind
;
2181 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2188 /* By default, the value of NUM_THREADS is zero (selected at run time)
2189 and there is no conditional. */
2191 val
= build_int_cst (unsigned_type_node
, 0);
2193 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2195 cond
= OMP_CLAUSE_IF_EXPR (c
);
2197 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2199 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2201 /* Ensure 'val' is of the correct type. */
2202 val
= fold_convert (unsigned_type_node
, val
);
2204 /* If we found the clause 'if (cond)', build either
2205 (cond != 0) or (cond ? val : 1u). */
2208 block_stmt_iterator si
;
2210 cond
= gimple_boolify (cond
);
2212 if (integer_zerop (val
))
2213 val
= fold_build2 (EQ_EXPR
, unsigned_type_node
, cond
,
2214 build_int_cst (TREE_TYPE (cond
), 0));
2217 basic_block cond_bb
, then_bb
, else_bb
;
2218 edge e
, e_then
, e_else
;
2219 tree t
, tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2221 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2222 if (gimple_in_ssa_p (cfun
))
2224 tmp_then
= make_ssa_name (tmp_var
, NULL_TREE
);
2225 tmp_else
= make_ssa_name (tmp_var
, NULL_TREE
);
2226 tmp_join
= make_ssa_name (tmp_var
, NULL_TREE
);
2235 e
= split_block (bb
, NULL
);
2240 then_bb
= create_empty_bb (cond_bb
);
2241 else_bb
= create_empty_bb (then_bb
);
2242 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
2243 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
2245 t
= build3 (COND_EXPR
, void_type_node
,
2246 cond
, NULL_TREE
, NULL_TREE
);
2248 si
= bsi_start (cond_bb
);
2249 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2251 si
= bsi_start (then_bb
);
2252 t
= build_gimple_modify_stmt (tmp_then
, val
);
2253 if (gimple_in_ssa_p (cfun
))
2254 SSA_NAME_DEF_STMT (tmp_then
) = t
;
2255 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2257 si
= bsi_start (else_bb
);
2258 t
= build_gimple_modify_stmt (tmp_else
,
2259 build_int_cst (unsigned_type_node
, 1));
2260 if (gimple_in_ssa_p (cfun
))
2261 SSA_NAME_DEF_STMT (tmp_else
) = t
;
2262 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2264 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
2265 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
2266 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
2267 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
2269 if (gimple_in_ssa_p (cfun
))
2271 tree phi
= create_phi_node (tmp_join
, bb
);
2272 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
2273 add_phi_arg (phi
, tmp_then
, e_then
);
2274 add_phi_arg (phi
, tmp_else
, e_else
);
2280 si
= bsi_start (bb
);
2281 val
= force_gimple_operand_bsi (&si
, val
, true, NULL_TREE
,
2282 false, BSI_CONTINUE_LINKING
);
2286 t
= OMP_PARALLEL_DATA_ARG (entry_stmt
);
2288 t1
= null_pointer_node
;
2290 t1
= build_fold_addr_expr (t
);
2291 t2
= build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt
));
2295 tree args
= tree_cons (NULL
, t2
,
2296 tree_cons (NULL
, t1
,
2297 tree_cons (NULL
, val
, ws_args
)));
2298 t
= build_function_call_expr (built_in_decls
[start_ix
], args
);
2301 t
= build_call_expr (built_in_decls
[start_ix
], 3, t2
, t1
, val
);
2303 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2304 false, BSI_CONTINUE_LINKING
);
2306 t
= OMP_PARALLEL_DATA_ARG (entry_stmt
);
2308 t
= null_pointer_node
;
2310 t
= build_fold_addr_expr (t
);
2311 t
= build_call_expr (OMP_PARALLEL_FN (entry_stmt
), 1, t
);
2312 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2313 false, BSI_CONTINUE_LINKING
);
2315 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_PARALLEL_END
], 0);
2316 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2317 false, BSI_CONTINUE_LINKING
);
2321 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2322 handler. This prevents programs from violating the structured
2323 block semantics with throws. */
2326 maybe_catch_exception (tree
*stmt_p
)
2330 if (!flag_exceptions
)
2333 if (lang_protect_cleanup_actions
)
2334 t
= lang_protect_cleanup_actions ();
2336 t
= build_call_expr (built_in_decls
[BUILT_IN_TRAP
], 0);
2337 f
= build2 (EH_FILTER_EXPR
, void_type_node
, NULL
, NULL
);
2338 EH_FILTER_MUST_NOT_THROW (f
) = 1;
2339 gimplify_and_add (t
, &EH_FILTER_FAILURE (f
));
2341 t
= build2 (TRY_CATCH_EXPR
, void_type_node
, *stmt_p
, NULL
);
2342 append_to_statement_list (f
, &TREE_OPERAND (t
, 1));
2345 append_to_statement_list (t
, stmt_p
);
2348 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2351 list2chain (tree list
)
2355 for (t
= list
; t
; t
= TREE_CHAIN (t
))
2357 tree var
= TREE_VALUE (t
);
2359 TREE_CHAIN (var
) = TREE_VALUE (TREE_CHAIN (t
));
2361 TREE_CHAIN (var
) = NULL_TREE
;
2364 return list
? TREE_VALUE (list
) : NULL_TREE
;
2368 /* Remove barriers in REGION->EXIT's block. Note that this is only
2369 valid for OMP_PARALLEL regions. Since the end of a parallel region
2370 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2371 left a barrier at the end of the OMP_PARALLEL region can now be
2375 remove_exit_barrier (struct omp_region
*region
)
2377 block_stmt_iterator si
;
2378 basic_block exit_bb
;
2383 exit_bb
= region
->exit
;
2385 /* If the parallel region doesn't return, we don't have REGION->EXIT
2390 /* The last insn in the block will be the parallel's OMP_RETURN. The
2391 workshare's OMP_RETURN will be in a preceding block. The kinds of
2392 statements that can appear in between are extremely limited -- no
2393 memory operations at all. Here, we allow nothing at all, so the
2394 only thing we allow to precede this OMP_RETURN is a label. */
2395 si
= bsi_last (exit_bb
);
2396 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
2398 if (!bsi_end_p (si
) && TREE_CODE (bsi_stmt (si
)) != LABEL_EXPR
)
2401 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
2403 si
= bsi_last (e
->src
);
2407 if (TREE_CODE (t
) == OMP_RETURN
)
2408 OMP_RETURN_NOWAIT (t
) = 1;
2413 remove_exit_barriers (struct omp_region
*region
)
2415 if (region
->type
== OMP_PARALLEL
)
2416 remove_exit_barrier (region
);
2420 region
= region
->inner
;
2421 remove_exit_barriers (region
);
2422 while (region
->next
)
2424 region
= region
->next
;
2425 remove_exit_barriers (region
);
2430 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
2431 calls. These can't be declared as const functions, but
2432 within one parallel body they are constant, so they can be
2433 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
2434 which are declared const. */
2437 optimize_omp_library_calls (void)
2440 block_stmt_iterator bsi
;
2442 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
]);
2444 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
]);
2447 for (bsi
= bsi_start (bb
); !bsi_end_p (bsi
); bsi_next (&bsi
))
2449 tree stmt
= bsi_stmt (bsi
);
2450 tree call
= get_call_expr_in (stmt
);
2454 && (decl
= get_callee_fndecl (call
))
2455 && DECL_EXTERNAL (decl
)
2456 && TREE_PUBLIC (decl
)
2457 && DECL_INITIAL (decl
) == NULL
)
2461 if (DECL_NAME (decl
) == thr_num_id
)
2462 built_in
= built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
];
2463 else if (DECL_NAME (decl
) == num_thr_id
)
2464 built_in
= built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
];
2468 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
2469 || call_expr_nargs (call
) != 0)
2472 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
2475 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
2476 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl
)))
2477 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in
))))
2480 CALL_EXPR_FN (call
) = build_fold_addr_expr (built_in
);
2485 /* Expand the OpenMP parallel directive starting at REGION. */
2488 expand_omp_parallel (struct omp_region
*region
)
2490 basic_block entry_bb
, exit_bb
, new_bb
;
2491 struct function
*child_cfun
;
2492 tree child_fn
, block
, t
, ws_args
;
2493 block_stmt_iterator si
;
2497 entry_stmt
= last_stmt (region
->entry
);
2498 child_fn
= OMP_PARALLEL_FN (entry_stmt
);
2499 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
2500 /* If this function has been already instrumented, make sure
2501 the child function isn't instrumented again. */
2502 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
2504 entry_bb
= region
->entry
;
2505 exit_bb
= region
->exit
;
2507 if (is_combined_parallel (region
))
2508 ws_args
= region
->ws_args
;
2510 ws_args
= NULL_TREE
;
2512 if (child_cfun
->cfg
)
2514 /* Due to inlining, it may happen that we have already outlined
2515 the region, in which case all we need to do is make the
2516 sub-graph unreachable and emit the parallel call. */
2517 edge entry_succ_e
, exit_succ_e
;
2518 block_stmt_iterator si
;
2520 entry_succ_e
= single_succ_edge (entry_bb
);
2522 si
= bsi_last (entry_bb
);
2523 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_PARALLEL
);
2524 bsi_remove (&si
, true);
2529 exit_succ_e
= single_succ_edge (exit_bb
);
2530 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
2532 remove_edge_and_dominated_blocks (entry_succ_e
);
2536 /* If the parallel region needs data sent from the parent
2537 function, then the very first statement (except possible
2538 tree profile counter updates) of the parallel body
2539 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2540 &.OMP_DATA_O is passed as an argument to the child function,
2541 we need to replace it with the argument as seen by the child
2544 In most cases, this will end up being the identity assignment
2545 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2546 a function call that has been inlined, the original PARM_DECL
2547 .OMP_DATA_I may have been converted into a different local
2548 variable. In which case, we need to keep the assignment. */
2549 if (OMP_PARALLEL_DATA_ARG (entry_stmt
))
2551 basic_block entry_succ_bb
= single_succ (entry_bb
);
2552 block_stmt_iterator si
;
2553 tree parcopy_stmt
= NULL_TREE
, arg
, narg
;
2555 for (si
= bsi_start (entry_succ_bb
); ; bsi_next (&si
))
2559 gcc_assert (!bsi_end_p (si
));
2560 stmt
= bsi_stmt (si
);
2561 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
2564 arg
= GIMPLE_STMT_OPERAND (stmt
, 1);
2566 if (TREE_CODE (arg
) == ADDR_EXPR
2567 && TREE_OPERAND (arg
, 0)
2568 == OMP_PARALLEL_DATA_ARG (entry_stmt
))
2570 parcopy_stmt
= stmt
;
2575 gcc_assert (parcopy_stmt
!= NULL_TREE
);
2576 arg
= DECL_ARGUMENTS (child_fn
);
2578 if (!gimple_in_ssa_p (cfun
))
2580 if (GIMPLE_STMT_OPERAND (parcopy_stmt
, 0) == arg
)
2581 bsi_remove (&si
, true);
2583 GIMPLE_STMT_OPERAND (parcopy_stmt
, 1) = arg
;
2587 /* If we are in ssa form, we must load the value from the default
2588 definition of the argument. That should not be defined now,
2589 since the argument is not used uninitialized. */
2590 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
2591 narg
= make_ssa_name (arg
, build_empty_stmt ());
2592 set_default_def (arg
, narg
);
2593 GIMPLE_STMT_OPERAND (parcopy_stmt
, 1) = narg
;
2594 update_stmt (parcopy_stmt
);
2598 /* Declare local variables needed in CHILD_CFUN. */
2599 block
= DECL_INITIAL (child_fn
);
2600 BLOCK_VARS (block
) = list2chain (child_cfun
->unexpanded_var_list
);
2601 DECL_SAVED_TREE (child_fn
) = bb_stmt_list (single_succ (entry_bb
));
2603 /* Reset DECL_CONTEXT on function arguments. */
2604 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= TREE_CHAIN (t
))
2605 DECL_CONTEXT (t
) = child_fn
;
2607 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2609 si
= bsi_last (entry_bb
);
2611 gcc_assert (t
&& TREE_CODE (t
) == OMP_PARALLEL
);
2612 bsi_remove (&si
, true);
2613 e
= split_block (entry_bb
, t
);
2615 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
2617 /* Convert OMP_RETURN into a RETURN_EXPR. */
2620 si
= bsi_last (exit_bb
);
2621 gcc_assert (!bsi_end_p (si
)
2622 && TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
2623 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
2624 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2625 bsi_remove (&si
, true);
2628 /* Move the parallel region into CHILD_CFUN. */
2630 if (gimple_in_ssa_p (cfun
))
2632 push_cfun (child_cfun
);
2634 init_ssa_operands ();
2635 cfun
->gimple_df
->in_ssa_p
= true;
2638 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
);
2640 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
2642 /* Inform the callgraph about the new function. */
2643 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
2644 = cfun
->curr_properties
;
2645 cgraph_add_new_function (child_fn
, true);
2647 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2648 fixed in a following pass. */
2649 push_cfun (child_cfun
);
2651 optimize_omp_library_calls ();
2652 rebuild_cgraph_edges ();
2654 /* Some EH regions might become dead, see PR34608. If
2655 pass_cleanup_cfg isn't the first pass to happen with the
2656 new child, these dead EH edges might cause problems.
2657 Clean them up now. */
2658 if (flag_exceptions
)
2661 tree save_current
= current_function_decl
;
2662 bool changed
= false;
2664 current_function_decl
= child_fn
;
2666 changed
|= tree_purge_dead_eh_edges (bb
);
2668 cleanup_tree_cfg ();
2669 current_function_decl
= save_current
;
2674 /* Emit a library call to launch the children threads. */
2675 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
2676 update_ssa (TODO_update_ssa_only_virtuals
);
2680 /* A subroutine of expand_omp_for. Generate code for a parallel
2681 loop with any schedule. Given parameters:
2683 for (V = N1; V cond N2; V += STEP) BODY;
2685 where COND is "<" or ">", we generate pseudocode
2687 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2688 if (more) goto L0; else goto L3;
2695 if (V cond iend) goto L1; else goto L2;
2697 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2700 If this is a combined omp parallel loop, instead of the call to
2701 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2704 expand_omp_for_generic (struct omp_region
*region
,
2705 struct omp_for_data
*fd
,
2706 enum built_in_function start_fn
,
2707 enum built_in_function next_fn
)
2709 tree type
, istart0
, iend0
, iend
, phi
;
2710 tree t
, vmain
, vback
;
2711 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
;
2712 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
2713 block_stmt_iterator si
;
2714 bool in_combined_parallel
= is_combined_parallel (region
);
2715 bool broken_loop
= region
->cont
== NULL
;
2718 gcc_assert (!broken_loop
|| !in_combined_parallel
);
2720 type
= TREE_TYPE (fd
->v
);
2722 istart0
= create_tmp_var (long_integer_type_node
, ".istart0");
2723 iend0
= create_tmp_var (long_integer_type_node
, ".iend0");
2724 TREE_ADDRESSABLE (istart0
) = 1;
2725 TREE_ADDRESSABLE (iend0
) = 1;
2726 if (gimple_in_ssa_p (cfun
))
2728 add_referenced_var (istart0
);
2729 add_referenced_var (iend0
);
2732 entry_bb
= region
->entry
;
2733 cont_bb
= region
->cont
;
2734 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
2735 gcc_assert (broken_loop
2736 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
2737 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
2738 l1_bb
= single_succ (l0_bb
);
2741 l2_bb
= create_empty_bb (cont_bb
);
2742 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
2743 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
2747 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
2748 exit_bb
= region
->exit
;
2750 si
= bsi_last (entry_bb
);
2751 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
2752 if (in_combined_parallel
)
2754 /* In a combined parallel loop, emit a call to
2755 GOMP_loop_foo_next. */
2756 t
= build_call_expr (built_in_decls
[next_fn
], 2,
2757 build_fold_addr_expr (istart0
),
2758 build_fold_addr_expr (iend0
));
2762 tree t0
, t1
, t2
, t3
, t4
;
2763 /* If this is not a combined parallel loop, emit a call to
2764 GOMP_loop_foo_start in ENTRY_BB. */
2765 t4
= build_fold_addr_expr (iend0
);
2766 t3
= build_fold_addr_expr (istart0
);
2767 t2
= fold_convert (long_integer_type_node
, fd
->step
);
2768 t1
= fold_convert (long_integer_type_node
, fd
->n2
);
2769 t0
= fold_convert (long_integer_type_node
, fd
->n1
);
2772 t
= fold_convert (long_integer_type_node
, fd
->chunk_size
);
2773 t
= build_call_expr (built_in_decls
[start_fn
], 6,
2774 t0
, t1
, t2
, t
, t3
, t4
);
2777 t
= build_call_expr (built_in_decls
[start_fn
], 5,
2778 t0
, t1
, t2
, t3
, t4
);
2780 t
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2781 true, BSI_SAME_STMT
);
2782 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2783 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2785 /* V may be used outside of the loop (e.g., to handle lastprivate clause).
2786 If this is the case, its value is undefined if the loop is not entered
2787 at all. To handle this case, set its initial value to N1. */
2788 if (gimple_in_ssa_p (cfun
))
2790 e
= find_edge (entry_bb
, l3_bb
);
2791 for (phi
= phi_nodes (l3_bb
); phi
; phi
= PHI_CHAIN (phi
))
2792 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == fd
->v
)
2793 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), fd
->n1
);
2797 t
= build_gimple_modify_stmt (fd
->v
, fd
->n1
);
2798 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2801 /* Remove the OMP_FOR statement. */
2802 bsi_remove (&si
, true);
2804 /* Iteration setup for sequential loop goes in L0_BB. */
2805 si
= bsi_start (l0_bb
);
2806 t
= fold_convert (type
, istart0
);
2807 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2808 false, BSI_CONTINUE_LINKING
);
2809 t
= build_gimple_modify_stmt (fd
->v
, t
);
2810 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2811 if (gimple_in_ssa_p (cfun
))
2812 SSA_NAME_DEF_STMT (fd
->v
) = t
;
2814 t
= fold_convert (type
, iend0
);
2815 iend
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2816 false, BSI_CONTINUE_LINKING
);
2820 /* Code to control the increment and predicate for the sequential
2821 loop goes in the CONT_BB. */
2822 si
= bsi_last (cont_bb
);
2824 gcc_assert (TREE_CODE (t
) == OMP_CONTINUE
);
2825 vmain
= TREE_OPERAND (t
, 1);
2826 vback
= TREE_OPERAND (t
, 0);
2828 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->step
);
2829 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2830 true, BSI_SAME_STMT
);
2831 t
= build_gimple_modify_stmt (vback
, t
);
2832 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2833 if (gimple_in_ssa_p (cfun
))
2834 SSA_NAME_DEF_STMT (vback
) = t
;
2836 t
= build2 (fd
->cond_code
, boolean_type_node
, vback
, iend
);
2837 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2838 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2840 /* Remove OMP_CONTINUE. */
2841 bsi_remove (&si
, true);
2843 /* Emit code to get the next parallel iteration in L2_BB. */
2844 si
= bsi_start (l2_bb
);
2846 t
= build_call_expr (built_in_decls
[next_fn
], 2,
2847 build_fold_addr_expr (istart0
),
2848 build_fold_addr_expr (iend0
));
2849 t
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2850 false, BSI_CONTINUE_LINKING
);
2851 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2852 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2855 /* Add the loop cleanup function. */
2856 si
= bsi_last (exit_bb
);
2857 if (OMP_RETURN_NOWAIT (bsi_stmt (si
)))
2858 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END_NOWAIT
];
2860 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END
];
2861 t
= build_call_expr (t
, 0);
2862 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2863 bsi_remove (&si
, true);
2865 /* Connect the new blocks. */
2866 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
2867 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
2871 e
= find_edge (cont_bb
, l3_bb
);
2872 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
2874 for (phi
= phi_nodes (l3_bb
); phi
; phi
= PHI_CHAIN (phi
))
2875 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
2876 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
2879 find_edge (cont_bb
, l1_bb
)->flags
= EDGE_TRUE_VALUE
;
2880 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
2881 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
2883 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
2884 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
2885 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
2886 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
2887 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
2888 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
2889 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
2890 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
2895 /* A subroutine of expand_omp_for. Generate code for a parallel
2896 loop with static schedule and no specified chunk size. Given
2899 for (V = N1; V cond N2; V += STEP) BODY;
2901 where COND is "<" or ">", we generate pseudocode
2907 n = (adj + N2 - N1) / STEP;
2909 q += (q * nthreads != n);
2911 e0 = min(s0 + q, n);
2913 if (s0 >= e0) goto L2; else goto L0;
2919 if (V cond e) goto L1;
2924 expand_omp_for_static_nochunk (struct omp_region
*region
,
2925 struct omp_for_data
*fd
)
2927 tree n
, q
, s0
, e0
, e
, t
, nthreads
, threadid
;
2928 tree type
, vmain
, vback
;
2929 basic_block entry_bb
, exit_bb
, seq_start_bb
, body_bb
, cont_bb
;
2931 block_stmt_iterator si
;
2933 type
= TREE_TYPE (fd
->v
);
2935 entry_bb
= region
->entry
;
2936 cont_bb
= region
->cont
;
2937 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
2938 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
2939 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
2940 body_bb
= single_succ (seq_start_bb
);
2941 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
2942 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
2943 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
2944 exit_bb
= region
->exit
;
2946 /* Iteration space partitioning goes in ENTRY_BB. */
2947 si
= bsi_last (entry_bb
);
2948 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
2950 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
2951 t
= fold_convert (type
, t
);
2952 nthreads
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2953 true, BSI_SAME_STMT
);
2955 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
2956 t
= fold_convert (type
, t
);
2957 threadid
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2958 true, BSI_SAME_STMT
);
2960 fd
->n1
= force_gimple_operand_bsi (&si
,
2961 fold_convert (type
, fd
->n1
),
2963 true, BSI_SAME_STMT
);
2965 fd
->n2
= force_gimple_operand_bsi (&si
,
2966 fold_convert (type
, fd
->n2
),
2968 true, BSI_SAME_STMT
);
2970 fd
->step
= force_gimple_operand_bsi (&si
,
2971 fold_convert (type
, fd
->step
),
2973 true, BSI_SAME_STMT
);
2975 t
= build_int_cst (type
, (fd
->cond_code
== LT_EXPR
? -1 : 1));
2976 t
= fold_build2 (PLUS_EXPR
, type
, fd
->step
, t
);
2977 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n2
);
2978 t
= fold_build2 (MINUS_EXPR
, type
, t
, fd
->n1
);
2979 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, t
, fd
->step
);
2980 t
= fold_convert (type
, t
);
2981 n
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2983 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, n
, nthreads
);
2984 q
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2986 t
= fold_build2 (MULT_EXPR
, type
, q
, nthreads
);
2987 t
= fold_build2 (NE_EXPR
, type
, t
, n
);
2988 t
= fold_build2 (PLUS_EXPR
, type
, q
, t
);
2989 q
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2991 t
= build2 (MULT_EXPR
, type
, q
, threadid
);
2992 s0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2994 t
= fold_build2 (PLUS_EXPR
, type
, s0
, q
);
2995 t
= fold_build2 (MIN_EXPR
, type
, t
, n
);
2996 e0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2998 t
= fold_convert (type
, s0
);
2999 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3000 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3001 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
3002 true, BSI_SAME_STMT
);
3003 t
= build_gimple_modify_stmt (fd
->v
, t
);
3004 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3005 if (gimple_in_ssa_p (cfun
))
3006 SSA_NAME_DEF_STMT (fd
->v
) = t
;
3008 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
3009 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3010 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3012 /* Remove the OMP_FOR statement. */
3013 bsi_remove (&si
, true);
3015 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3016 si
= bsi_start (seq_start_bb
);
3018 t
= fold_convert (type
, e0
);
3019 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3020 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3021 e
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3022 false, BSI_CONTINUE_LINKING
);
3024 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3025 si
= bsi_last (cont_bb
);
3027 gcc_assert (TREE_CODE (t
) == OMP_CONTINUE
);
3028 vmain
= TREE_OPERAND (t
, 1);
3029 vback
= TREE_OPERAND (t
, 0);
3031 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->step
);
3032 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
3033 true, BSI_SAME_STMT
);
3034 t
= build_gimple_modify_stmt (vback
, t
);
3035 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3036 if (gimple_in_ssa_p (cfun
))
3037 SSA_NAME_DEF_STMT (vback
) = t
;
3039 t
= build2 (fd
->cond_code
, boolean_type_node
, vback
, e
);
3040 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3041 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3043 /* Remove the OMP_CONTINUE statement. */
3044 bsi_remove (&si
, true);
3046 /* Replace the OMP_RETURN with a barrier, or nothing. */
3047 si
= bsi_last (exit_bb
);
3048 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)))
3049 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
3050 false, BSI_SAME_STMT
);
3051 bsi_remove (&si
, true);
3053 /* Connect all the blocks. */
3054 find_edge (entry_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
3055 find_edge (entry_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
3057 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
3058 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
3060 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, entry_bb
);
3061 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
3062 recompute_dominator (CDI_DOMINATORS
, body_bb
));
3063 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
3064 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
3068 /* A subroutine of expand_omp_for. Generate code for a parallel
3069 loop with static schedule and a specified chunk size. Given
3072 for (V = N1; V cond N2; V += STEP) BODY;
3074 where COND is "<" or ">", we generate pseudocode
3080 n = (adj + N2 - N1) / STEP;
3082 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3083 here so that V is defined
3084 if the loop is not entered
3086 s0 = (trip * nthreads + threadid) * CHUNK;
3087 e0 = min(s0 + CHUNK, n);
3088 if (s0 < n) goto L1; else goto L4;
3095 if (V cond e) goto L2; else goto L3;
3103 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
3105 tree n
, s0
, e0
, e
, t
, phi
, nphi
, args
;
3106 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
3107 tree type
, cont
, v_main
, v_back
, v_extra
;
3108 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
3109 basic_block trip_update_bb
, cont_bb
, fin_bb
;
3110 block_stmt_iterator si
;
3113 type
= TREE_TYPE (fd
->v
);
3115 entry_bb
= region
->entry
;
3116 se
= split_block (entry_bb
, last_stmt (entry_bb
));
3118 iter_part_bb
= se
->dest
;
3119 cont_bb
= region
->cont
;
3120 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
3121 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
3122 == FALLTHRU_EDGE (cont_bb
)->dest
);
3123 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
3124 body_bb
= single_succ (seq_start_bb
);
3125 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
3126 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3127 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
3128 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
3129 exit_bb
= region
->exit
;
3131 /* Trip and adjustment setup goes in ENTRY_BB. */
3132 si
= bsi_last (entry_bb
);
3133 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
3135 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
3136 t
= fold_convert (type
, t
);
3137 nthreads
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3138 true, BSI_SAME_STMT
);
3140 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
3141 t
= fold_convert (type
, t
);
3142 threadid
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3143 true, BSI_SAME_STMT
);
3145 fd
->n1
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->n1
),
3147 true, BSI_SAME_STMT
);
3148 fd
->n2
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->n2
),
3150 true, BSI_SAME_STMT
);
3151 fd
->step
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->step
),
3153 true, BSI_SAME_STMT
);
3155 = force_gimple_operand_bsi (&si
, fold_convert (type
,
3158 true, BSI_SAME_STMT
);
3160 t
= build_int_cst (type
, (fd
->cond_code
== LT_EXPR
? -1 : 1));
3161 t
= fold_build2 (PLUS_EXPR
, type
, fd
->step
, t
);
3162 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n2
);
3163 t
= fold_build2 (MINUS_EXPR
, type
, t
, fd
->n1
);
3164 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, t
, fd
->step
);
3165 t
= fold_convert (type
, t
);
3166 n
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3167 true, BSI_SAME_STMT
);
3169 trip_var
= create_tmp_var (type
, ".trip");
3170 if (gimple_in_ssa_p (cfun
))
3172 add_referenced_var (trip_var
);
3173 trip_init
= make_ssa_name (trip_var
, NULL_TREE
);
3174 trip_main
= make_ssa_name (trip_var
, NULL_TREE
);
3175 trip_back
= make_ssa_name (trip_var
, NULL_TREE
);
3179 trip_init
= trip_var
;
3180 trip_main
= trip_var
;
3181 trip_back
= trip_var
;
3184 t
= build_gimple_modify_stmt (trip_init
, build_int_cst (type
, 0));
3185 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3186 if (gimple_in_ssa_p (cfun
))
3187 SSA_NAME_DEF_STMT (trip_init
) = t
;
3189 t
= fold_build2 (MULT_EXPR
, type
, threadid
, fd
->chunk_size
);
3190 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3191 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3192 v_extra
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3193 true, BSI_SAME_STMT
);
3195 /* Remove the OMP_FOR. */
3196 bsi_remove (&si
, true);
3198 /* Iteration space partitioning goes in ITER_PART_BB. */
3199 si
= bsi_last (iter_part_bb
);
3201 t
= fold_build2 (MULT_EXPR
, type
, trip_main
, nthreads
);
3202 t
= fold_build2 (PLUS_EXPR
, type
, t
, threadid
);
3203 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->chunk_size
);
3204 s0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3205 false, BSI_CONTINUE_LINKING
);
3207 t
= fold_build2 (PLUS_EXPR
, type
, s0
, fd
->chunk_size
);
3208 t
= fold_build2 (MIN_EXPR
, type
, t
, n
);
3209 e0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3210 false, BSI_CONTINUE_LINKING
);
3212 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
3213 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3214 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3216 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3217 si
= bsi_start (seq_start_bb
);
3219 t
= fold_convert (type
, s0
);
3220 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3221 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3222 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
3223 false, BSI_CONTINUE_LINKING
);
3224 t
= build_gimple_modify_stmt (fd
->v
, t
);
3225 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3226 if (gimple_in_ssa_p (cfun
))
3227 SSA_NAME_DEF_STMT (fd
->v
) = t
;
3229 t
= fold_convert (type
, e0
);
3230 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3231 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3232 e
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3233 false, BSI_CONTINUE_LINKING
);
3235 /* The code controlling the sequential loop goes in CONT_BB,
3236 replacing the OMP_CONTINUE. */
3237 si
= bsi_last (cont_bb
);
3238 cont
= bsi_stmt (si
);
3239 gcc_assert (TREE_CODE (cont
) == OMP_CONTINUE
);
3240 v_main
= TREE_OPERAND (cont
, 1);
3241 v_back
= TREE_OPERAND (cont
, 0);
3243 t
= build2 (PLUS_EXPR
, type
, v_main
, fd
->step
);
3244 t
= build_gimple_modify_stmt (v_back
, t
);
3245 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3246 if (gimple_in_ssa_p (cfun
))
3247 SSA_NAME_DEF_STMT (v_back
) = t
;
3249 t
= build2 (fd
->cond_code
, boolean_type_node
, v_back
, e
);
3250 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3251 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3253 /* Remove OMP_CONTINUE. */
3254 bsi_remove (&si
, true);
3256 /* Trip update code goes into TRIP_UPDATE_BB. */
3257 si
= bsi_start (trip_update_bb
);
3259 t
= build_int_cst (type
, 1);
3260 t
= build2 (PLUS_EXPR
, type
, trip_main
, t
);
3261 t
= build_gimple_modify_stmt (trip_back
, t
);
3262 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3263 if (gimple_in_ssa_p (cfun
))
3264 SSA_NAME_DEF_STMT (trip_back
) = t
;
3266 /* Replace the OMP_RETURN with a barrier, or nothing. */
3267 si
= bsi_last (exit_bb
);
3268 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)))
3269 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
3270 false, BSI_SAME_STMT
);
3271 bsi_remove (&si
, true);
3273 /* Connect the new blocks. */
3274 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
3275 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
3277 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
3278 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
3280 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
3282 if (gimple_in_ssa_p (cfun
))
3284 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3285 remove arguments of the phi nodes in fin_bb. We need to create
3286 appropriate phi nodes in iter_part_bb instead. */
3287 se
= single_pred_edge (fin_bb
);
3288 re
= single_succ_edge (trip_update_bb
);
3289 ene
= single_succ_edge (entry_bb
);
3291 args
= PENDING_STMT (re
);
3292 PENDING_STMT (re
) = NULL_TREE
;
3293 for (phi
= phi_nodes (fin_bb
);
3295 phi
= PHI_CHAIN (phi
), args
= TREE_CHAIN (args
))
3297 t
= PHI_RESULT (phi
);
3298 gcc_assert (t
== TREE_PURPOSE (args
));
3299 nphi
= create_phi_node (t
, iter_part_bb
);
3300 SSA_NAME_DEF_STMT (t
) = nphi
;
3302 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
3303 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3304 need to use v_extra instead. */
3307 add_phi_arg (nphi
, t
, ene
);
3308 add_phi_arg (nphi
, TREE_VALUE (args
), re
);
3310 gcc_assert (!phi
&& !args
);
3311 while ((phi
= phi_nodes (fin_bb
)) != NULL_TREE
)
3312 remove_phi_node (phi
, NULL_TREE
, false);
3314 /* Make phi node for trip. */
3315 phi
= create_phi_node (trip_main
, iter_part_bb
);
3316 SSA_NAME_DEF_STMT (trip_main
) = phi
;
3317 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
));
3318 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
));
3321 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
3322 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
3323 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
3324 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
3325 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
3326 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
3327 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
3328 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
3329 recompute_dominator (CDI_DOMINATORS
, body_bb
));
3333 /* Expand the OpenMP loop defined by REGION. */
3336 expand_omp_for (struct omp_region
*region
)
3338 struct omp_for_data fd
;
3340 extract_omp_for_data (last_stmt (region
->entry
), &fd
);
3341 region
->sched_kind
= fd
.sched_kind
;
3343 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
3344 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
3345 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
3348 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
3349 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
3350 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
3353 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
3355 && region
->cont
!= NULL
)
3357 if (fd
.chunk_size
== NULL
)
3358 expand_omp_for_static_nochunk (region
, &fd
);
3360 expand_omp_for_static_chunk (region
, &fd
);
3364 int fn_index
= fd
.sched_kind
+ fd
.have_ordered
* 4;
3365 int start_ix
= BUILT_IN_GOMP_LOOP_STATIC_START
+ fn_index
;
3366 int next_ix
= BUILT_IN_GOMP_LOOP_STATIC_NEXT
+ fn_index
;
3367 expand_omp_for_generic (region
, &fd
, start_ix
, next_ix
);
3370 update_ssa (TODO_update_ssa_only_virtuals
);
3374 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3376 v = GOMP_sections_start (n);
3393 v = GOMP_sections_next ();
3398 If this is a combined parallel sections, replace the call to
3399 GOMP_sections_start with call to GOMP_sections_next. */
3402 expand_omp_sections (struct omp_region
*region
)
3404 tree label_vec
, l1
, l2
, t
, u
, sections_stmt
, vin
, vmain
, vnext
, cont
;
3405 unsigned i
, casei
, len
;
3406 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
3407 block_stmt_iterator si
;
3408 struct omp_region
*inner
;
3409 bool exit_reachable
= region
->cont
!= NULL
;
3411 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
3412 entry_bb
= region
->entry
;
3413 l0_bb
= single_succ (entry_bb
);
3414 l1_bb
= region
->cont
;
3415 l2_bb
= region
->exit
;
3418 gcc_assert (single_pred (l2_bb
) == l0_bb
);
3419 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
3420 l1
= tree_block_label (l1_bb
);
3421 l2
= tree_block_label (l2_bb
);
3425 default_bb
= create_empty_bb (l0_bb
);
3427 l2
= tree_block_label (default_bb
);
3430 /* We will build a switch() with enough cases for all the
3431 OMP_SECTION regions, a '0' case to handle the end of more work
3432 and a default case to abort if something goes wrong. */
3433 len
= EDGE_COUNT (l0_bb
->succs
);
3434 label_vec
= make_tree_vec (len
+ 1);
3436 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3437 OMP_SECTIONS statement. */
3438 si
= bsi_last (entry_bb
);
3439 sections_stmt
= bsi_stmt (si
);
3440 gcc_assert (TREE_CODE (sections_stmt
) == OMP_SECTIONS
);
3441 vin
= OMP_SECTIONS_CONTROL (sections_stmt
);
3442 if (!is_combined_parallel (region
))
3444 /* If we are not inside a combined parallel+sections region,
3445 call GOMP_sections_start. */
3446 t
= build_int_cst (unsigned_type_node
,
3447 exit_reachable
? len
- 1 : len
);
3448 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_START
];
3449 t
= build_call_expr (u
, 1, t
);
3453 /* Otherwise, call GOMP_sections_next. */
3454 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
];
3455 t
= build_call_expr (u
, 0);
3457 t
= build_gimple_modify_stmt (vin
, t
);
3458 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3459 if (gimple_in_ssa_p (cfun
))
3460 SSA_NAME_DEF_STMT (vin
) = t
;
3461 bsi_remove (&si
, true);
3463 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3464 si
= bsi_last (l0_bb
);
3465 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SECTIONS_SWITCH
);
3468 cont
= last_stmt (l1_bb
);
3469 gcc_assert (TREE_CODE (cont
) == OMP_CONTINUE
);
3470 vmain
= TREE_OPERAND (cont
, 1);
3471 vnext
= TREE_OPERAND (cont
, 0);
3479 t
= build3 (SWITCH_EXPR
, void_type_node
, vmain
, NULL
, label_vec
);
3480 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3481 bsi_remove (&si
, true);
3486 t
= build3 (CASE_LABEL_EXPR
, void_type_node
,
3487 build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
3488 TREE_VEC_ELT (label_vec
, 0) = t
;
3492 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3493 for (inner
= region
->inner
, casei
= 1;
3495 inner
= inner
->next
, i
++, casei
++)
3497 basic_block s_entry_bb
, s_exit_bb
;
3499 s_entry_bb
= inner
->entry
;
3500 s_exit_bb
= inner
->exit
;
3502 t
= tree_block_label (s_entry_bb
);
3503 u
= build_int_cst (unsigned_type_node
, casei
);
3504 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, u
, NULL
, t
);
3505 TREE_VEC_ELT (label_vec
, i
) = u
;
3507 si
= bsi_last (s_entry_bb
);
3508 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SECTION
);
3509 gcc_assert (i
< len
|| OMP_SECTION_LAST (bsi_stmt (si
)));
3510 bsi_remove (&si
, true);
3511 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
3513 if (s_exit_bb
== NULL
)
3516 si
= bsi_last (s_exit_bb
);
3517 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
3518 bsi_remove (&si
, true);
3520 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
3523 /* Error handling code goes in DEFAULT_BB. */
3524 t
= tree_block_label (default_bb
);
3525 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, NULL
, NULL
, t
);
3526 TREE_VEC_ELT (label_vec
, len
) = u
;
3527 make_edge (l0_bb
, default_bb
, 0);
3529 si
= bsi_start (default_bb
);
3530 t
= build_call_expr (built_in_decls
[BUILT_IN_TRAP
], 0);
3531 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3535 /* Code to get the next section goes in L1_BB. */
3536 si
= bsi_last (l1_bb
);
3537 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_CONTINUE
);
3539 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
], 0);
3540 t
= build_gimple_modify_stmt (vnext
, t
);
3541 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3542 if (gimple_in_ssa_p (cfun
))
3543 SSA_NAME_DEF_STMT (vnext
) = t
;
3544 bsi_remove (&si
, true);
3546 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
3548 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3549 si
= bsi_last (l2_bb
);
3550 if (OMP_RETURN_NOWAIT (bsi_stmt (si
)))
3551 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END_NOWAIT
];
3553 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END
];
3554 t
= build_call_expr (t
, 0);
3555 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3556 bsi_remove (&si
, true);
3559 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
3563 /* Expand code for an OpenMP single directive. We've already expanded
3564 much of the code, here we simply place the GOMP_barrier call. */
3567 expand_omp_single (struct omp_region
*region
)
3569 basic_block entry_bb
, exit_bb
;
3570 block_stmt_iterator si
;
3571 bool need_barrier
= false;
3573 entry_bb
= region
->entry
;
3574 exit_bb
= region
->exit
;
3576 si
= bsi_last (entry_bb
);
3577 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3578 be removed. We need to ensure that the thread that entered the single
3579 does not exit before the data is copied out by the other threads. */
3580 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si
)),
3581 OMP_CLAUSE_COPYPRIVATE
))
3582 need_barrier
= true;
3583 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SINGLE
);
3584 bsi_remove (&si
, true);
3585 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3587 si
= bsi_last (exit_bb
);
3588 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)) || need_barrier
)
3589 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
3590 false, BSI_SAME_STMT
);
3591 bsi_remove (&si
, true);
3592 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
3596 /* Generic expansion for OpenMP synchronization directives: master,
3597 ordered and critical. All we need to do here is remove the entry
3598 and exit markers for REGION. */
3601 expand_omp_synch (struct omp_region
*region
)
3603 basic_block entry_bb
, exit_bb
;
3604 block_stmt_iterator si
;
3606 entry_bb
= region
->entry
;
3607 exit_bb
= region
->exit
;
3609 si
= bsi_last (entry_bb
);
3610 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SINGLE
3611 || TREE_CODE (bsi_stmt (si
)) == OMP_MASTER
3612 || TREE_CODE (bsi_stmt (si
)) == OMP_ORDERED
3613 || TREE_CODE (bsi_stmt (si
)) == OMP_CRITICAL
);
3614 bsi_remove (&si
, true);
3615 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3619 si
= bsi_last (exit_bb
);
3620 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
3621 bsi_remove (&si
, true);
3622 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
3626 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3627 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3628 size of the data type, and thus usable to find the index of the builtin
3629 decl. Returns false if the expression is not of the proper form. */
3632 expand_omp_atomic_fetch_op (basic_block load_bb
,
3633 tree addr
, tree loaded_val
,
3634 tree stored_val
, int index
)
3636 enum built_in_function base
;
3637 tree decl
, itype
, call
;
3638 enum insn_code
*optab
;
3640 basic_block store_bb
= single_succ (load_bb
);
3641 block_stmt_iterator bsi
;
3644 /* We expect to find the following sequences:
3647 OMP_ATOMIC_LOAD (tmp, mem)
3650 val = tmp OP something; (or: something OP tmp)
3653 ???FIXME: Allow a more flexible sequence.
3654 Perhaps use data flow to pick the statements.
3658 bsi
= bsi_after_labels (store_bb
);
3659 stmt
= bsi_stmt (bsi
);
3660 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
3663 if (TREE_CODE (bsi_stmt (bsi
)) != OMP_ATOMIC_STORE
)
3666 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt
, 0), stored_val
, 0))
3669 rhs
= GIMPLE_STMT_OPERAND (stmt
, 1);
3671 /* Check for one of the supported fetch-op operations. */
3672 switch (TREE_CODE (rhs
))
3675 case POINTER_PLUS_EXPR
:
3676 base
= BUILT_IN_FETCH_AND_ADD_N
;
3677 optab
= sync_add_optab
;
3680 base
= BUILT_IN_FETCH_AND_SUB_N
;
3681 optab
= sync_add_optab
;
3684 base
= BUILT_IN_FETCH_AND_AND_N
;
3685 optab
= sync_and_optab
;
3688 base
= BUILT_IN_FETCH_AND_OR_N
;
3689 optab
= sync_ior_optab
;
3692 base
= BUILT_IN_FETCH_AND_XOR_N
;
3693 optab
= sync_xor_optab
;
3698 /* Make sure the expression is of the proper form. */
3699 if (operand_equal_p (TREE_OPERAND (rhs
, 0), loaded_val
, 0))
3700 rhs
= TREE_OPERAND (rhs
, 1);
3701 else if (commutative_tree_code (TREE_CODE (rhs
))
3702 && operand_equal_p (TREE_OPERAND (rhs
, 1), loaded_val
, 0))
3703 rhs
= TREE_OPERAND (rhs
, 0);
3707 decl
= built_in_decls
[base
+ index
+ 1];
3708 itype
= TREE_TYPE (TREE_TYPE (decl
));
3710 if (optab
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
3713 bsi
= bsi_last (load_bb
);
3714 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3715 call
= build_call_expr (decl
, 2, addr
, fold_convert (itype
, rhs
));
3716 force_gimple_operand_bsi (&bsi
, call
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3717 bsi_remove (&bsi
, true);
3719 bsi
= bsi_last (store_bb
);
3720 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3721 bsi_remove (&bsi
, true);
3722 bsi
= bsi_last (store_bb
);
3723 bsi_remove (&bsi
, true);
3725 if (gimple_in_ssa_p (cfun
))
3726 update_ssa (TODO_update_ssa_no_phi
);
3731 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3735 newval = rhs; // with oldval replacing *addr in rhs
3736 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3737 if (oldval != newval)
3740 INDEX is log2 of the size of the data type, and thus usable to find the
3741 index of the builtin decl. */
3744 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
3745 tree addr
, tree loaded_val
, tree stored_val
,
3748 tree loadedi
, storedi
, initial
, new_stored
, new_storedi
, old_vali
;
3749 tree type
, itype
, cmpxchg
, iaddr
;
3750 block_stmt_iterator bsi
;
3751 basic_block loop_header
= single_succ (load_bb
);
3755 cmpxchg
= built_in_decls
[BUILT_IN_VAL_COMPARE_AND_SWAP_N
+ index
+ 1];
3756 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
3757 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
3759 if (sync_compare_and_swap
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
3762 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3763 bsi
= bsi_last (load_bb
);
3764 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3765 initial
= force_gimple_operand_bsi (&bsi
, build_fold_indirect_ref (addr
),
3766 true, NULL_TREE
, true, BSI_SAME_STMT
);
3767 /* Move the value to the LOADED_VAL temporary. */
3768 if (gimple_in_ssa_p (cfun
))
3770 gcc_assert (phi_nodes (loop_header
) == NULL_TREE
);
3771 phi
= create_phi_node (loaded_val
, loop_header
);
3772 SSA_NAME_DEF_STMT (loaded_val
) = phi
;
3773 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
3777 bsi_insert_before (&bsi
,
3778 build_gimple_modify_stmt (loaded_val
, initial
),
3780 bsi_remove (&bsi
, true);
3782 bsi
= bsi_last (store_bb
);
3783 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3785 /* For floating-point values, we'll need to view-convert them to integers
3786 so that we can perform the atomic compare and swap. Simplify the
3787 following code by always setting up the "i"ntegral variables. */
3788 if (INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
3790 loadedi
= loaded_val
;
3791 storedi
= stored_val
;
3796 loadedi
= force_gimple_operand_bsi (&bsi
,
3797 build1 (VIEW_CONVERT_EXPR
, itype
,
3799 NULL_TREE
, true, BSI_SAME_STMT
);
3801 force_gimple_operand_bsi (&bsi
,
3802 build1 (VIEW_CONVERT_EXPR
, itype
,
3803 stored_val
), true, NULL_TREE
, true,
3805 iaddr
= fold_convert (build_pointer_type (itype
), addr
);
3808 /* Build the compare&swap statement. */
3809 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
3810 new_storedi
= force_gimple_operand_bsi (&bsi
,
3811 fold_convert (itype
, new_storedi
),
3813 true, BSI_SAME_STMT
);
3814 if (storedi
== stored_val
)
3815 new_stored
= new_storedi
;
3817 new_stored
= force_gimple_operand_bsi (&bsi
,
3818 build1 (VIEW_CONVERT_EXPR
, type
,
3820 NULL_TREE
, true, BSI_SAME_STMT
);
3822 if (gimple_in_ssa_p (cfun
))
3826 old_vali
= create_tmp_var (itype
, NULL
);
3827 x
= build_gimple_modify_stmt (old_vali
, loadedi
);
3828 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3830 x
= build_gimple_modify_stmt (loaded_val
, new_stored
);
3831 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3834 /* Note that we always perform the comparison as an integer, even for
3835 floating point. This allows the atomic operation to properly
3836 succeed even with NaNs and -0.0. */
3837 x
= build3 (COND_EXPR
, void_type_node
,
3838 build2 (NE_EXPR
, boolean_type_node
,
3839 new_storedi
, old_vali
), NULL_TREE
, NULL_TREE
);
3840 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3843 e
= single_succ_edge (store_bb
);
3844 e
->flags
&= ~EDGE_FALLTHRU
;
3845 e
->flags
|= EDGE_FALSE_VALUE
;
3847 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
3849 /* Copy the new value to loaded_val (we already did that before the condition
3850 if we are not in SSA). */
3851 if (gimple_in_ssa_p (cfun
))
3853 phi
= phi_nodes (loop_header
);
3854 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_stored
);
3857 /* Remove OMP_ATOMIC_STORE. */
3858 bsi_remove (&bsi
, true);
3860 if (gimple_in_ssa_p (cfun
))
3861 update_ssa (TODO_update_ssa_no_phi
);
3866 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3868 GOMP_atomic_start ();
3872 The result is not globally atomic, but works so long as all parallel
3873 references are within #pragma omp atomic directives. According to
3874 responses received from omp@openmp.org, appears to be within spec.
3875 Which makes sense, since that's how several other compilers handle
3876 this situation as well.
3877 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3878 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3881 OMP_ATOMIC_LOAD (loaded_val, addr) with
3885 OMP_ATOMIC_ATORE (stored_val) with
3890 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
3891 tree addr
, tree loaded_val
, tree stored_val
)
3893 block_stmt_iterator bsi
;
3896 bsi
= bsi_last (load_bb
);
3897 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3899 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
];
3900 t
= build_function_call_expr (t
, 0);
3901 force_gimple_operand_bsi (&bsi
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3903 t
= build_gimple_modify_stmt (loaded_val
, build_fold_indirect_ref (addr
));
3904 if (gimple_in_ssa_p (cfun
))
3905 SSA_NAME_DEF_STMT (loaded_val
) = t
;
3906 bsi_insert_before (&bsi
, t
, BSI_SAME_STMT
);
3907 bsi_remove (&bsi
, true);
3909 bsi
= bsi_last (store_bb
);
3910 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3912 t
= build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr
)),
3914 bsi_insert_before (&bsi
, t
, BSI_SAME_STMT
);
3916 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
];
3917 t
= build_function_call_expr (t
, 0);
3918 force_gimple_operand_bsi (&bsi
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3919 bsi_remove (&bsi
, true);
3921 if (gimple_in_ssa_p (cfun
))
3922 update_ssa (TODO_update_ssa_no_phi
);
3926 /* Expand an OMP_ATOMIC statement. We try to expand
3927 using expand_omp_atomic_fetch_op. If it failed, we try to
3928 call expand_omp_atomic_pipeline, and if it fails too, the
3929 ultimate fallback is wrapping the operation in a mutex
3930 (expand_omp_atomic_mutex). REGION is the atomic region built
3931 by build_omp_regions_1(). */
3934 expand_omp_atomic (struct omp_region
*region
)
3936 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
3937 tree load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
3938 tree loaded_val
= TREE_OPERAND (load
, 0);
3939 tree addr
= TREE_OPERAND (load
, 1);
3940 tree stored_val
= TREE_OPERAND (store
, 0);
3941 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
3942 HOST_WIDE_INT index
;
3944 /* Make sure the type is one of the supported sizes. */
3945 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
3946 index
= exact_log2 (index
);
3947 if (index
>= 0 && index
<= 4)
3949 unsigned int align
= TYPE_ALIGN_UNIT (type
);
3951 /* __sync builtins require strict data alignment. */
3952 if (exact_log2 (align
) >= index
)
3954 /* When possible, use specialized atomic update functions. */
3955 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
3956 && store_bb
== single_succ (load_bb
))
3958 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
3959 loaded_val
, stored_val
, index
))
3963 /* If we don't have specialized __sync builtins, try and implement
3964 as a compare and swap loop. */
3965 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
3966 loaded_val
, stored_val
, index
))
3971 /* The ultimate fallback is wrapping the operation in a mutex. */
3972 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
3976 /* Expand the parallel region tree rooted at REGION. Expansion
3977 proceeds in depth-first order. Innermost regions are expanded
3978 first. This way, parallel regions that require a new function to
3979 be created (e.g., OMP_PARALLEL) can be expanded without having any
3980 internal dependencies in their body. */
3983 expand_omp (struct omp_region
*region
)
3987 /* First, determine whether this is a combined parallel+workshare
3989 if (region
->type
== OMP_PARALLEL
)
3990 determine_parallel_type (region
);
3993 expand_omp (region
->inner
);
3995 switch (region
->type
)
3998 expand_omp_parallel (region
);
4002 expand_omp_for (region
);
4006 expand_omp_sections (region
);
4010 /* Individual omp sections are handled together with their
4011 parent OMP_SECTIONS region. */
4015 expand_omp_single (region
);
4021 expand_omp_synch (region
);
4024 case OMP_ATOMIC_LOAD
:
4025 expand_omp_atomic (region
);
4033 region
= region
->next
;
4038 /* Helper for build_omp_regions. Scan the dominator tree starting at
4039 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
4040 true, the function ends once a single tree is built (otherwise, whole
4041 forest of OMP constructs may be built). */
4044 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
4047 block_stmt_iterator si
;
4052 if (!bsi_end_p (si
) && OMP_DIRECTIVE_P (bsi_stmt (si
)))
4054 struct omp_region
*region
;
4055 enum tree_code code
;
4057 stmt
= bsi_stmt (si
);
4058 code
= TREE_CODE (stmt
);
4059 if (code
== OMP_RETURN
)
4061 /* STMT is the return point out of region PARENT. Mark it
4062 as the exit point and make PARENT the immediately
4063 enclosing region. */
4064 gcc_assert (parent
);
4067 parent
= parent
->outer
;
4069 else if (code
== OMP_ATOMIC_STORE
)
4071 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
4073 gcc_assert (parent
);
4074 gcc_assert (parent
->type
== OMP_ATOMIC_LOAD
);
4077 parent
= parent
->outer
;
4080 else if (code
== OMP_CONTINUE
)
4082 gcc_assert (parent
);
4085 else if (code
== OMP_SECTIONS_SWITCH
)
4087 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4092 /* Otherwise, this directive becomes the parent for a new
4094 region
= new_omp_region (bb
, code
, parent
);
4099 if (single_tree
&& !parent
)
4102 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4104 son
= next_dom_son (CDI_DOMINATORS
, son
))
4105 build_omp_regions_1 (son
, parent
, single_tree
);
4108 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4112 build_omp_regions_root (basic_block root
)
4114 gcc_assert (root_omp_region
== NULL
);
4115 build_omp_regions_1 (root
, NULL
, true);
4116 gcc_assert (root_omp_region
!= NULL
);
4119 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4122 omp_expand_local (basic_block head
)
4124 build_omp_regions_root (head
);
4125 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4127 fprintf (dump_file
, "\nOMP region tree\n\n");
4128 dump_omp_region (dump_file
, root_omp_region
, 0);
4129 fprintf (dump_file
, "\n");
4132 remove_exit_barriers (root_omp_region
);
4133 expand_omp (root_omp_region
);
4135 free_omp_regions ();
4138 /* Scan the CFG and build a tree of OMP regions. Return the root of
4139 the OMP region tree. */
4142 build_omp_regions (void)
4144 gcc_assert (root_omp_region
== NULL
);
4145 calculate_dominance_info (CDI_DOMINATORS
);
4146 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
4150 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4153 execute_expand_omp (void)
4155 build_omp_regions ();
4157 if (!root_omp_region
)
4162 fprintf (dump_file
, "\nOMP region tree\n\n");
4163 dump_omp_region (dump_file
, root_omp_region
, 0);
4164 fprintf (dump_file
, "\n");
4167 remove_exit_barriers (root_omp_region
);
4169 expand_omp (root_omp_region
);
4171 cleanup_tree_cfg ();
4173 free_omp_regions ();
4178 /* OMP expansion in SSA form. For testing purposes only. */
4181 gate_expand_omp_ssa (void)
4183 return flag_openmp_ssa
&& flag_openmp
!= 0 && errorcount
== 0;
4186 struct tree_opt_pass pass_expand_omp_ssa
=
4188 "ompexpssa", /* name */
4189 gate_expand_omp_ssa
, /* gate */
4190 execute_expand_omp
, /* execute */
4193 0, /* static_pass_number */
4195 PROP_gimple_any
, /* properties_required */
4196 PROP_gimple_lomp
, /* properties_provided */
4197 0, /* properties_destroyed */
4198 0, /* todo_flags_start */
4199 TODO_dump_func
, /* todo_flags_finish */
4203 /* OMP expansion -- the default pass, run before creation of SSA form. */
4206 gate_expand_omp (void)
4208 return ((!flag_openmp_ssa
|| !optimize
)
4209 && flag_openmp
!= 0 && errorcount
== 0);
4212 struct tree_opt_pass pass_expand_omp
=
4214 "ompexp", /* name */
4215 gate_expand_omp
, /* gate */
4216 execute_expand_omp
, /* execute */
4219 0, /* static_pass_number */
4221 PROP_gimple_any
, /* properties_required */
4222 PROP_gimple_lomp
, /* properties_provided */
4223 0, /* properties_destroyed */
4224 0, /* todo_flags_start */
4225 TODO_dump_func
, /* todo_flags_finish */
4229 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4231 /* Lower the OpenMP sections directive in *STMT_P. */
4234 lower_omp_sections (tree
*stmt_p
, omp_context
*ctx
)
4236 tree new_stmt
, stmt
, body
, bind
, block
, ilist
, olist
, new_body
, control
;
4238 tree_stmt_iterator tsi
;
4243 push_gimplify_context ();
4247 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt
), &ilist
, &dlist
, ctx
);
4249 tsi
= tsi_start (OMP_SECTIONS_BODY (stmt
));
4250 for (len
= 0; !tsi_end_p (tsi
); len
++, tsi_next (&tsi
))
4253 tsi
= tsi_start (OMP_SECTIONS_BODY (stmt
));
4254 body
= alloc_stmt_list ();
4255 for (i
= 0; i
< len
; i
++, tsi_next (&tsi
))
4258 tree sec_start
, sec_end
;
4260 sec_start
= tsi_stmt (tsi
);
4261 sctx
= maybe_lookup_ctx (sec_start
);
4264 append_to_statement_list (sec_start
, &body
);
4266 lower_omp (&OMP_SECTION_BODY (sec_start
), sctx
);
4267 append_to_statement_list (OMP_SECTION_BODY (sec_start
), &body
);
4268 OMP_SECTION_BODY (sec_start
) = NULL
;
4272 tree l
= alloc_stmt_list ();
4273 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt
), NULL
,
4275 append_to_statement_list (l
, &body
);
4276 OMP_SECTION_LAST (sec_start
) = 1;
4279 sec_end
= make_node (OMP_RETURN
);
4280 append_to_statement_list (sec_end
, &body
);
4283 block
= make_node (BLOCK
);
4284 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, body
, block
);
4287 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt
), &olist
, ctx
);
4289 pop_gimplify_context (NULL_TREE
);
4290 record_vars_into (ctx
->block_vars
, ctx
->cb
.dst_fn
);
4292 new_stmt
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4293 TREE_SIDE_EFFECTS (new_stmt
) = 1;
4295 new_body
= alloc_stmt_list ();
4296 append_to_statement_list (ilist
, &new_body
);
4297 append_to_statement_list (stmt
, &new_body
);
4298 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH
), &new_body
);
4299 append_to_statement_list (bind
, &new_body
);
4301 control
= create_tmp_var (unsigned_type_node
, ".section");
4302 t
= build2 (OMP_CONTINUE
, void_type_node
, control
, control
);
4303 OMP_SECTIONS_CONTROL (stmt
) = control
;
4304 append_to_statement_list (t
, &new_body
);
4306 append_to_statement_list (olist
, &new_body
);
4307 append_to_statement_list (dlist
, &new_body
);
4309 maybe_catch_exception (&new_body
);
4311 t
= make_node (OMP_RETURN
);
4312 OMP_RETURN_NOWAIT (t
) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt
),
4314 append_to_statement_list (t
, &new_body
);
4316 BIND_EXPR_BODY (new_stmt
) = new_body
;
4317 OMP_SECTIONS_BODY (stmt
) = NULL
;
4323 /* A subroutine of lower_omp_single. Expand the simple form of
4324 an OMP_SINGLE, without a copyprivate clause:
4326 if (GOMP_single_start ())
4328 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4330 FIXME. It may be better to delay expanding the logic of this until
4331 pass_expand_omp. The expanded logic may make the job more difficult
4332 to a synchronization analysis pass. */
4335 lower_omp_single_simple (tree single_stmt
, tree
*pre_p
)
4339 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_START
], 0);
4340 t
= build3 (COND_EXPR
, void_type_node
, t
,
4341 OMP_SINGLE_BODY (single_stmt
), NULL
);
4342 gimplify_and_add (t
, pre_p
);
4346 /* A subroutine of lower_omp_single. Expand the simple form of
4347 an OMP_SINGLE, with a copyprivate clause:
4349 #pragma omp single copyprivate (a, b, c)
4351 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4354 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4360 GOMP_single_copy_end (©out);
4371 FIXME. It may be better to delay expanding the logic of this until
4372 pass_expand_omp. The expanded logic may make the job more difficult
4373 to a synchronization analysis pass. */
4376 lower_omp_single_copy (tree single_stmt
, tree
*pre_p
, omp_context
*ctx
)
4378 tree ptr_type
, t
, l0
, l1
, l2
, copyin_seq
;
4380 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
4382 ptr_type
= build_pointer_type (ctx
->record_type
);
4383 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
4385 l0
= create_artificial_label ();
4386 l1
= create_artificial_label ();
4387 l2
= create_artificial_label ();
4389 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_START
], 0);
4390 t
= fold_convert (ptr_type
, t
);
4391 t
= build_gimple_modify_stmt (ctx
->receiver_decl
, t
);
4392 gimplify_and_add (t
, pre_p
);
4394 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
4395 build_int_cst (ptr_type
, 0));
4396 t
= build3 (COND_EXPR
, void_type_node
, t
,
4397 build_and_jump (&l0
), build_and_jump (&l1
));
4398 gimplify_and_add (t
, pre_p
);
4400 t
= build1 (LABEL_EXPR
, void_type_node
, l0
);
4401 gimplify_and_add (t
, pre_p
);
4403 append_to_statement_list (OMP_SINGLE_BODY (single_stmt
), pre_p
);
4406 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt
), pre_p
,
4409 t
= build_fold_addr_expr (ctx
->sender_decl
);
4410 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_END
], 1, t
);
4411 gimplify_and_add (t
, pre_p
);
4413 t
= build_and_jump (&l2
);
4414 gimplify_and_add (t
, pre_p
);
4416 t
= build1 (LABEL_EXPR
, void_type_node
, l1
);
4417 gimplify_and_add (t
, pre_p
);
4419 append_to_statement_list (copyin_seq
, pre_p
);
4421 t
= build1 (LABEL_EXPR
, void_type_node
, l2
);
4422 gimplify_and_add (t
, pre_p
);
4426 /* Expand code for an OpenMP single directive. */
4429 lower_omp_single (tree
*stmt_p
, omp_context
*ctx
)
4431 tree t
, bind
, block
, single_stmt
= *stmt_p
, dlist
;
4433 push_gimplify_context ();
4435 block
= make_node (BLOCK
);
4436 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4437 TREE_SIDE_EFFECTS (bind
) = 1;
4439 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt
),
4440 &BIND_EXPR_BODY (bind
), &dlist
, ctx
);
4441 lower_omp (&OMP_SINGLE_BODY (single_stmt
), ctx
);
4443 append_to_statement_list (single_stmt
, &BIND_EXPR_BODY (bind
));
4445 if (ctx
->record_type
)
4446 lower_omp_single_copy (single_stmt
, &BIND_EXPR_BODY (bind
), ctx
);
4448 lower_omp_single_simple (single_stmt
, &BIND_EXPR_BODY (bind
));
4450 OMP_SINGLE_BODY (single_stmt
) = NULL
;
4452 append_to_statement_list (dlist
, &BIND_EXPR_BODY (bind
));
4454 maybe_catch_exception (&BIND_EXPR_BODY (bind
));
4456 t
= make_node (OMP_RETURN
);
4457 OMP_RETURN_NOWAIT (t
) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt
),
4459 append_to_statement_list (t
, &BIND_EXPR_BODY (bind
));
4461 pop_gimplify_context (bind
);
4463 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4464 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4468 /* Expand code for an OpenMP master directive. */
4471 lower_omp_master (tree
*stmt_p
, omp_context
*ctx
)
4473 tree bind
, block
, stmt
= *stmt_p
, lab
= NULL
, x
;
4475 push_gimplify_context ();
4477 block
= make_node (BLOCK
);
4478 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4479 TREE_SIDE_EFFECTS (bind
) = 1;
4481 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4483 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4484 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
4485 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
4486 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4488 lower_omp (&OMP_MASTER_BODY (stmt
), ctx
);
4489 maybe_catch_exception (&OMP_MASTER_BODY (stmt
));
4490 append_to_statement_list (OMP_MASTER_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4491 OMP_MASTER_BODY (stmt
) = NULL
;
4493 x
= build1 (LABEL_EXPR
, void_type_node
, lab
);
4494 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4496 x
= make_node (OMP_RETURN
);
4497 OMP_RETURN_NOWAIT (x
) = 1;
4498 append_to_statement_list (x
, &BIND_EXPR_BODY (bind
));
4500 pop_gimplify_context (bind
);
4502 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4503 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4507 /* Expand code for an OpenMP ordered directive. */
4510 lower_omp_ordered (tree
*stmt_p
, omp_context
*ctx
)
4512 tree bind
, block
, stmt
= *stmt_p
, x
;
4514 push_gimplify_context ();
4516 block
= make_node (BLOCK
);
4517 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4518 TREE_SIDE_EFFECTS (bind
) = 1;
4520 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4522 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ORDERED_START
], 0);
4523 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4525 lower_omp (&OMP_ORDERED_BODY (stmt
), ctx
);
4526 maybe_catch_exception (&OMP_ORDERED_BODY (stmt
));
4527 append_to_statement_list (OMP_ORDERED_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4528 OMP_ORDERED_BODY (stmt
) = NULL
;
4530 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ORDERED_END
], 0);
4531 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4533 x
= make_node (OMP_RETURN
);
4534 OMP_RETURN_NOWAIT (x
) = 1;
4535 append_to_statement_list (x
, &BIND_EXPR_BODY (bind
));
4537 pop_gimplify_context (bind
);
4539 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4540 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4544 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4545 substitution of a couple of function calls. But in the NAMED case,
4546 requires that languages coordinate a symbol name. It is therefore
4547 best put here in common code. */
4549 static GTY((param1_is (tree
), param2_is (tree
)))
4550 splay_tree critical_name_mutexes
;
4553 lower_omp_critical (tree
*stmt_p
, omp_context
*ctx
)
4555 tree bind
, block
, stmt
= *stmt_p
;
4556 tree t
, lock
, unlock
, name
;
4558 name
= OMP_CRITICAL_NAME (stmt
);
4564 if (!critical_name_mutexes
)
4565 critical_name_mutexes
4566 = splay_tree_new_ggc (splay_tree_compare_pointers
);
4568 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
4573 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
4575 new_str
= ACONCAT ((".gomp_critical_user_",
4576 IDENTIFIER_POINTER (name
), NULL
));
4577 DECL_NAME (decl
) = get_identifier (new_str
);
4578 TREE_PUBLIC (decl
) = 1;
4579 TREE_STATIC (decl
) = 1;
4580 DECL_COMMON (decl
) = 1;
4581 DECL_ARTIFICIAL (decl
) = 1;
4582 DECL_IGNORED_P (decl
) = 1;
4583 varpool_finalize_decl (decl
);
4585 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
4586 (splay_tree_value
) decl
);
4589 decl
= (tree
) n
->value
;
4591 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_START
];
4592 lock
= build_call_expr (lock
, 1, build_fold_addr_expr (decl
));
4594 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_END
];
4595 unlock
= build_call_expr (unlock
, 1, build_fold_addr_expr (decl
));
4599 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_START
];
4600 lock
= build_call_expr (lock
, 0);
4602 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_END
];
4603 unlock
= build_call_expr (unlock
, 0);
4606 push_gimplify_context ();
4608 block
= make_node (BLOCK
);
4609 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4610 TREE_SIDE_EFFECTS (bind
) = 1;
4612 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4614 gimplify_and_add (lock
, &BIND_EXPR_BODY (bind
));
4616 lower_omp (&OMP_CRITICAL_BODY (stmt
), ctx
);
4617 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt
));
4618 append_to_statement_list (OMP_CRITICAL_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4619 OMP_CRITICAL_BODY (stmt
) = NULL
;
4621 gimplify_and_add (unlock
, &BIND_EXPR_BODY (bind
));
4623 t
= make_node (OMP_RETURN
);
4624 OMP_RETURN_NOWAIT (t
) = 1;
4625 append_to_statement_list (t
, &BIND_EXPR_BODY (bind
));
4627 pop_gimplify_context (bind
);
4628 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4629 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4633 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4634 for a lastprivate clause. Given a loop control predicate of (V
4635 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4636 is appended to *DLIST, iterator initialization is appended to
4640 lower_omp_for_lastprivate (struct omp_for_data
*fd
, tree
*body_p
,
4641 tree
*dlist
, struct omp_context
*ctx
)
4643 tree clauses
, cond
, stmts
, vinit
, t
;
4644 enum tree_code cond_code
;
4646 cond_code
= fd
->cond_code
;
4647 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
4649 /* When possible, use a strict equality expression. This can let VRP
4650 type optimizations deduce the value and remove a copy. */
4651 if (host_integerp (fd
->step
, 0))
4653 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->step
);
4654 if (step
== 1 || step
== -1)
4655 cond_code
= EQ_EXPR
;
4658 cond
= build2 (cond_code
, boolean_type_node
, fd
->v
, fd
->n2
);
4660 clauses
= OMP_FOR_CLAUSES (fd
->for_stmt
);
4662 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
4665 append_to_statement_list (stmts
, dlist
);
4667 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4669 if (cond_code
== EQ_EXPR
4670 && host_integerp (fd
->n2
, 0)
4671 && ! integer_zerop (fd
->n2
))
4672 vinit
= build_int_cst (TREE_TYPE (fd
->v
), 0);
4674 /* Initialize the iterator variable, so that threads that don't execute
4675 any iterations don't execute the lastprivate clauses by accident. */
4676 t
= build_gimple_modify_stmt (fd
->v
, vinit
);
4677 gimplify_and_add (t
, body_p
);
4682 /* Lower code for an OpenMP loop directive. */
4685 lower_omp_for (tree
*stmt_p
, omp_context
*ctx
)
4687 tree t
, stmt
, ilist
, dlist
, new_stmt
, *body_p
, *rhs_p
;
4688 struct omp_for_data fd
;
4692 push_gimplify_context ();
4694 lower_omp (&OMP_FOR_PRE_BODY (stmt
), ctx
);
4695 lower_omp (&OMP_FOR_BODY (stmt
), ctx
);
4697 /* Move declaration of temporaries in the loop body before we make
4699 if (TREE_CODE (OMP_FOR_BODY (stmt
)) == BIND_EXPR
)
4700 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt
)), ctx
->cb
.dst_fn
);
4702 new_stmt
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4703 TREE_SIDE_EFFECTS (new_stmt
) = 1;
4704 body_p
= &BIND_EXPR_BODY (new_stmt
);
4706 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4709 append_to_statement_list (OMP_FOR_PRE_BODY (stmt
), body_p
);
4710 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt
), body_p
, &dlist
, ctx
);
4712 /* Lower the header expressions. At this point, we can assume that
4713 the header is of the form:
4715 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4717 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4718 using the .omp_data_s mapping, if needed. */
4719 rhs_p
= &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt
), 1);
4720 if (!is_gimple_min_invariant (*rhs_p
))
4721 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4723 rhs_p
= &TREE_OPERAND (OMP_FOR_COND (stmt
), 1);
4724 if (!is_gimple_min_invariant (*rhs_p
))
4725 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4727 rhs_p
= &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt
), 1), 1);
4728 if (!is_gimple_min_invariant (*rhs_p
))
4729 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4731 /* Once lowered, extract the bounds and clauses. */
4732 extract_omp_for_data (stmt
, &fd
);
4734 lower_omp_for_lastprivate (&fd
, body_p
, &dlist
, ctx
);
4736 append_to_statement_list (stmt
, body_p
);
4738 append_to_statement_list (OMP_FOR_BODY (stmt
), body_p
);
4740 t
= build2 (OMP_CONTINUE
, void_type_node
, fd
.v
, fd
.v
);
4741 append_to_statement_list (t
, body_p
);
4743 /* After the loop, add exit clauses. */
4744 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt
), body_p
, ctx
);
4745 append_to_statement_list (dlist
, body_p
);
4747 maybe_catch_exception (body_p
);
4749 /* Region exit marker goes at the end of the loop body. */
4750 t
= make_node (OMP_RETURN
);
4751 OMP_RETURN_NOWAIT (t
) = fd
.have_nowait
;
4752 append_to_statement_list (t
, body_p
);
4754 pop_gimplify_context (NULL_TREE
);
4755 record_vars_into (ctx
->block_vars
, ctx
->cb
.dst_fn
);
4757 OMP_FOR_BODY (stmt
) = NULL_TREE
;
4758 OMP_FOR_PRE_BODY (stmt
) = NULL_TREE
;
4762 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4766 check_combined_parallel (tree
*tp
, int *walk_subtrees
, void *data
)
4768 struct walk_stmt_info
*wi
= data
;
4769 int *info
= wi
->info
;
4772 switch (TREE_CODE (*tp
))
4776 *info
= *info
== 0 ? 1 : -1;
4785 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4786 information for the directive. */
4789 lower_omp_parallel (tree
*stmt_p
, omp_context
*ctx
)
4791 tree clauses
, par_bind
, par_body
, new_body
, bind
;
4792 tree olist
, ilist
, par_olist
, par_ilist
;
4793 tree stmt
, child_fn
, t
;
4797 clauses
= OMP_PARALLEL_CLAUSES (stmt
);
4798 par_bind
= OMP_PARALLEL_BODY (stmt
);
4799 par_body
= BIND_EXPR_BODY (par_bind
);
4800 child_fn
= ctx
->cb
.dst_fn
;
4801 if (!OMP_PARALLEL_COMBINED (stmt
))
4803 struct walk_stmt_info wi
;
4806 memset (&wi
, 0, sizeof (wi
));
4807 wi
.callback
= check_combined_parallel
;
4810 walk_stmts (&wi
, &par_bind
);
4812 OMP_PARALLEL_COMBINED (stmt
) = 1;
4815 push_gimplify_context ();
4817 par_olist
= NULL_TREE
;
4818 par_ilist
= NULL_TREE
;
4819 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
4820 lower_omp (&par_body
, ctx
);
4821 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
4823 /* Declare all the variables created by mapping and the variables
4824 declared in the scope of the parallel body. */
4825 record_vars_into (ctx
->block_vars
, child_fn
);
4826 record_vars_into (BIND_EXPR_VARS (par_bind
), child_fn
);
4828 if (ctx
->record_type
)
4830 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_data_o");
4831 OMP_PARALLEL_DATA_ARG (stmt
) = ctx
->sender_decl
;
4836 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
4837 lower_send_shared_vars (&ilist
, &olist
, ctx
);
4839 /* Once all the expansions are done, sequence all the different
4840 fragments inside OMP_PARALLEL_BODY. */
4841 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4842 append_to_statement_list (ilist
, &BIND_EXPR_BODY (bind
));
4844 new_body
= alloc_stmt_list ();
4846 if (ctx
->record_type
)
4848 t
= build_fold_addr_expr (ctx
->sender_decl
);
4849 /* fixup_child_record_type might have changed receiver_decl's type. */
4850 t
= fold_convert (TREE_TYPE (ctx
->receiver_decl
), t
);
4851 t
= build_gimple_modify_stmt (ctx
->receiver_decl
, t
);
4852 append_to_statement_list (t
, &new_body
);
4855 append_to_statement_list (par_ilist
, &new_body
);
4856 append_to_statement_list (par_body
, &new_body
);
4857 append_to_statement_list (par_olist
, &new_body
);
4858 maybe_catch_exception (&new_body
);
4859 t
= make_node (OMP_RETURN
);
4860 append_to_statement_list (t
, &new_body
);
4861 OMP_PARALLEL_BODY (stmt
) = new_body
;
4863 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4864 append_to_statement_list (olist
, &BIND_EXPR_BODY (bind
));
4868 pop_gimplify_context (NULL_TREE
);
4872 /* Pass *TP back through the gimplifier within the context determined by WI.
4873 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
4874 flags on ADDR_EXPR. */
4877 lower_regimplify (tree
*tp
, struct walk_stmt_info
*wi
)
4879 enum gimplify_status gs
;
4883 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_lvalue
, fb_lvalue
);
4884 else if (wi
->val_only
)
4885 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_val
, fb_rvalue
);
4887 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_formal_tmp_var
, fb_rvalue
);
4888 gcc_assert (gs
== GS_ALL_DONE
);
4891 tsi_link_before (&wi
->tsi
, pre
, TSI_SAME_STMT
);
4894 /* Copy EXP into a temporary. Insert the initialization statement before TSI. */
4897 init_tmp_var (tree exp
, tree_stmt_iterator
*tsi
)
4901 t
= create_tmp_var (TREE_TYPE (exp
), NULL
);
4902 DECL_GIMPLE_REG_P (t
) = 1;
4903 stmt
= build_gimple_modify_stmt (t
, exp
);
4904 SET_EXPR_LOCUS (stmt
, EXPR_LOCUS (tsi_stmt (*tsi
)));
4905 tsi_link_before (tsi
, stmt
, TSI_SAME_STMT
);
4910 /* Similarly, but copy from the temporary and insert the statement
4911 after the iterator. */
4914 save_tmp_var (tree exp
, tree_stmt_iterator
*tsi
)
4918 t
= create_tmp_var (TREE_TYPE (exp
), NULL
);
4919 DECL_GIMPLE_REG_P (t
) = 1;
4920 stmt
= build_gimple_modify_stmt (exp
, t
);
4921 SET_EXPR_LOCUS (stmt
, EXPR_LOCUS (tsi_stmt (*tsi
)));
4922 tsi_link_after (tsi
, stmt
, TSI_SAME_STMT
);
4927 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
4930 lower_omp_1 (tree
*tp
, int *walk_subtrees
, void *data
)
4932 struct walk_stmt_info
*wi
= data
;
4933 omp_context
*ctx
= wi
->info
;
4936 /* If we have issued syntax errors, avoid doing any heavy lifting.
4937 Just replace the OpenMP directives with a NOP to avoid
4938 confusing RTL expansion. */
4939 if (errorcount
&& OMP_DIRECTIVE_P (*tp
))
4941 *tp
= build_empty_stmt ();
4946 switch (TREE_CODE (*tp
))
4949 ctx
= maybe_lookup_ctx (t
);
4950 lower_omp_parallel (tp
, ctx
);
4954 ctx
= maybe_lookup_ctx (t
);
4956 lower_omp_for (tp
, ctx
);
4960 ctx
= maybe_lookup_ctx (t
);
4962 lower_omp_sections (tp
, ctx
);
4966 ctx
= maybe_lookup_ctx (t
);
4968 lower_omp_single (tp
, ctx
);
4972 ctx
= maybe_lookup_ctx (t
);
4974 lower_omp_master (tp
, ctx
);
4978 ctx
= maybe_lookup_ctx (t
);
4980 lower_omp_ordered (tp
, ctx
);
4984 ctx
= maybe_lookup_ctx (t
);
4986 lower_omp_critical (tp
, ctx
);
4990 if (ctx
&& DECL_HAS_VALUE_EXPR_P (t
))
4992 lower_regimplify (&t
, wi
);
4996 t
= save_tmp_var (t
, &wi
->tsi
);
4998 t
= init_tmp_var (t
, &wi
->tsi
);
5006 lower_regimplify (tp
, wi
);
5010 case ARRAY_RANGE_REF
:
5014 case VIEW_CONVERT_EXPR
:
5016 lower_regimplify (tp
, wi
);
5023 wi
->val_only
= true;
5024 lower_regimplify (&TREE_OPERAND (t
, 0), wi
);
5029 if (!TYPE_P (t
) && !DECL_P (t
))
5038 lower_omp (tree
*stmt_p
, omp_context
*ctx
)
5040 struct walk_stmt_info wi
;
5042 memset (&wi
, 0, sizeof (wi
));
5043 wi
.callback
= lower_omp_1
;
5046 wi
.want_locations
= true;
5048 walk_stmts (&wi
, stmt_p
);
5051 /* Main entry point. */
5054 execute_lower_omp (void)
5056 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
5057 delete_omp_context
);
5059 scan_omp (&DECL_SAVED_TREE (current_function_decl
), NULL
);
5060 gcc_assert (parallel_nesting_level
== 0);
5062 if (all_contexts
->root
)
5063 lower_omp (&DECL_SAVED_TREE (current_function_decl
), NULL
);
5067 splay_tree_delete (all_contexts
);
5068 all_contexts
= NULL
;
5074 gate_lower_omp (void)
5076 return flag_openmp
!= 0;
5079 struct tree_opt_pass pass_lower_omp
=
5081 "omplower", /* name */
5082 gate_lower_omp
, /* gate */
5083 execute_lower_omp
, /* execute */
5086 0, /* static_pass_number */
5088 PROP_gimple_any
, /* properties_required */
5089 PROP_gimple_lomp
, /* properties_provided */
5090 0, /* properties_destroyed */
5091 0, /* todo_flags_start */
5092 TODO_dump_func
, /* todo_flags_finish */
5096 /* The following is a utility to diagnose OpenMP structured block violations.
5097 It is not part of the "omplower" pass, as that's invoked too late. It
5098 should be invoked by the respective front ends after gimplification. */
5100 static splay_tree all_labels
;
5102 /* Check for mismatched contexts and generate an error if needed. Return
5103 true if an error is detected. */
5106 diagnose_sb_0 (tree
*stmt_p
, tree branch_ctx
, tree label_ctx
)
5110 if ((label_ctx
? TREE_VALUE (label_ctx
) : NULL
) == branch_ctx
)
5113 /* Try to avoid confusing the user by producing and error message
5114 with correct "exit" or "enter" verbage. We prefer "exit"
5115 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5116 if (branch_ctx
== NULL
)
5122 if (TREE_VALUE (label_ctx
) == branch_ctx
)
5127 label_ctx
= TREE_CHAIN (label_ctx
);
5132 error ("invalid exit from OpenMP structured block");
5134 error ("invalid entry to OpenMP structured block");
5136 *stmt_p
= build_empty_stmt ();
5140 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5141 where in the tree each label is found. */
5144 diagnose_sb_1 (tree
*tp
, int *walk_subtrees
, void *data
)
5146 struct walk_stmt_info
*wi
= data
;
5147 tree context
= (tree
) wi
->info
;
5152 switch (TREE_CODE (t
))
5157 walk_tree (&OMP_CLAUSES (t
), diagnose_sb_1
, wi
, NULL
);
5163 /* The minimal context here is just a tree of statements. */
5164 inner_context
= tree_cons (NULL
, t
, context
);
5165 wi
->info
= inner_context
;
5166 walk_stmts (wi
, &OMP_BODY (t
));
5171 walk_tree (&OMP_FOR_CLAUSES (t
), diagnose_sb_1
, wi
, NULL
);
5172 inner_context
= tree_cons (NULL
, t
, context
);
5173 wi
->info
= inner_context
;
5174 walk_tree (&OMP_FOR_INIT (t
), diagnose_sb_1
, wi
, NULL
);
5175 walk_tree (&OMP_FOR_COND (t
), diagnose_sb_1
, wi
, NULL
);
5176 walk_tree (&OMP_FOR_INCR (t
), diagnose_sb_1
, wi
, NULL
);
5177 walk_stmts (wi
, &OMP_FOR_PRE_BODY (t
));
5178 walk_stmts (wi
, &OMP_FOR_BODY (t
));
5183 splay_tree_insert (all_labels
, (splay_tree_key
) LABEL_EXPR_LABEL (t
),
5184 (splay_tree_value
) context
);
5194 /* Pass 2: Check each branch and see if its context differs from that of
5195 the destination label's context. */
5198 diagnose_sb_2 (tree
*tp
, int *walk_subtrees
, void *data
)
5200 struct walk_stmt_info
*wi
= data
;
5201 tree context
= (tree
) wi
->info
;
5206 switch (TREE_CODE (t
))
5211 walk_tree (&OMP_CLAUSES (t
), diagnose_sb_2
, wi
, NULL
);
5218 walk_stmts (wi
, &OMP_BODY (t
));
5223 walk_tree (&OMP_FOR_CLAUSES (t
), diagnose_sb_2
, wi
, NULL
);
5225 walk_tree (&OMP_FOR_INIT (t
), diagnose_sb_2
, wi
, NULL
);
5226 walk_tree (&OMP_FOR_COND (t
), diagnose_sb_2
, wi
, NULL
);
5227 walk_tree (&OMP_FOR_INCR (t
), diagnose_sb_2
, wi
, NULL
);
5228 walk_stmts (wi
, &OMP_FOR_PRE_BODY (t
));
5229 walk_stmts (wi
, &OMP_FOR_BODY (t
));
5235 tree lab
= GOTO_DESTINATION (t
);
5236 if (TREE_CODE (lab
) != LABEL_DECL
)
5239 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
5240 diagnose_sb_0 (tp
, context
, n
? (tree
) n
->value
: NULL_TREE
);
5246 tree vec
= SWITCH_LABELS (t
);
5247 int i
, len
= TREE_VEC_LENGTH (vec
);
5248 for (i
= 0; i
< len
; ++i
)
5250 tree lab
= CASE_LABEL (TREE_VEC_ELT (vec
, i
));
5251 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
5252 if (diagnose_sb_0 (tp
, context
, (tree
) n
->value
))
5259 diagnose_sb_0 (tp
, context
, NULL_TREE
);
5270 diagnose_omp_structured_block_errors (tree fndecl
)
5272 tree save_current
= current_function_decl
;
5273 struct walk_stmt_info wi
;
5275 current_function_decl
= fndecl
;
5277 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
5279 memset (&wi
, 0, sizeof (wi
));
5280 wi
.callback
= diagnose_sb_1
;
5281 walk_stmts (&wi
, &DECL_SAVED_TREE (fndecl
));
5283 memset (&wi
, 0, sizeof (wi
));
5284 wi
.callback
= diagnose_sb_2
;
5285 wi
.want_locations
= true;
5286 wi
.want_return_expr
= true;
5287 walk_stmts (&wi
, &DECL_SAVED_TREE (fndecl
));
5289 splay_tree_delete (all_labels
);
5292 current_function_decl
= save_current
;
5295 #include "gt-omp-low.h"