1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
40 #include "tree-pass.h"
43 #include "splay-tree.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context
*outer
;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
84 /* What to do with variables with implicitly determined sharing
86 enum omp_clause_default_kind default_kind
;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
93 /* True if this parallel directive is nested within another. */
98 /* A structure describing the main elements of a parallel loop. */
102 tree v
, n1
, n2
, step
, chunk_size
, for_stmt
;
103 enum tree_code cond_code
;
105 bool have_nowait
, have_ordered
;
106 enum omp_clause_schedule_kind sched_kind
;
110 static splay_tree all_contexts
;
111 static int parallel_nesting_level
;
112 struct omp_region
*root_omp_region
;
114 static void scan_omp (tree
*, omp_context
*);
115 static void lower_omp (tree
*, omp_context
*);
116 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
117 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
122 find_omp_clause (tree clauses
, enum tree_code kind
)
124 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
125 if (OMP_CLAUSE_CODE (clauses
) == kind
)
131 /* Return true if CTX is for an omp parallel. */
134 is_parallel_ctx (omp_context
*ctx
)
136 return TREE_CODE (ctx
->stmt
) == OMP_PARALLEL
;
140 /* Return true if REGION is a combined parallel+workshare region. */
143 is_combined_parallel (struct omp_region
*region
)
145 return region
->is_combined_parallel
;
149 /* Extract the header elements of parallel loop FOR_STMT and store
153 extract_omp_for_data (tree for_stmt
, struct omp_for_data
*fd
)
157 fd
->for_stmt
= for_stmt
;
160 t
= OMP_FOR_INIT (for_stmt
);
161 gcc_assert (TREE_CODE (t
) == GIMPLE_MODIFY_STMT
);
162 fd
->v
= GIMPLE_STMT_OPERAND (t
, 0);
163 gcc_assert (SSA_VAR_P (fd
->v
));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd
->v
)) == INTEGER_TYPE
);
165 var
= TREE_CODE (fd
->v
) == SSA_NAME
? SSA_NAME_VAR (fd
->v
) : fd
->v
;
166 fd
->n1
= GIMPLE_STMT_OPERAND (t
, 1);
168 t
= OMP_FOR_COND (for_stmt
);
169 fd
->cond_code
= TREE_CODE (t
);
170 gcc_assert (TREE_OPERAND (t
, 0) == var
);
171 fd
->n2
= TREE_OPERAND (t
, 1);
172 switch (fd
->cond_code
)
178 fd
->n2
= fold_build2 (PLUS_EXPR
, TREE_TYPE (fd
->n2
), fd
->n2
,
179 build_int_cst (TREE_TYPE (fd
->n2
), 1));
180 fd
->cond_code
= LT_EXPR
;
183 fd
->n2
= fold_build2 (MINUS_EXPR
, TREE_TYPE (fd
->n2
), fd
->n2
,
184 build_int_cst (TREE_TYPE (fd
->n2
), 1));
185 fd
->cond_code
= GT_EXPR
;
191 t
= OMP_FOR_INCR (fd
->for_stmt
);
192 gcc_assert (TREE_CODE (t
) == GIMPLE_MODIFY_STMT
);
193 gcc_assert (GIMPLE_STMT_OPERAND (t
, 0) == var
);
194 t
= GIMPLE_STMT_OPERAND (t
, 1);
195 gcc_assert (TREE_OPERAND (t
, 0) == var
);
196 switch (TREE_CODE (t
))
199 fd
->step
= TREE_OPERAND (t
, 1);
202 fd
->step
= TREE_OPERAND (t
, 1);
203 fd
->step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (fd
->step
), fd
->step
);
209 fd
->have_nowait
= fd
->have_ordered
= false;
210 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
211 fd
->chunk_size
= NULL_TREE
;
213 for (t
= OMP_FOR_CLAUSES (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
214 switch (OMP_CLAUSE_CODE (t
))
216 case OMP_CLAUSE_NOWAIT
:
217 fd
->have_nowait
= true;
219 case OMP_CLAUSE_ORDERED
:
220 fd
->have_ordered
= true;
222 case OMP_CLAUSE_SCHEDULE
:
223 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
224 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
230 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
231 gcc_assert (fd
->chunk_size
== NULL
);
232 else if (fd
->chunk_size
== NULL
)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
|| fd
->have_ordered
)
237 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
238 ? integer_zero_node
: integer_one_node
;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
259 # BLOCK 2 (PAR_ENTRY_BB)
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
286 workshare_safe_to_combine_p (basic_block par_entry_bb
, basic_block ws_entry_bb
)
288 struct omp_for_data fd
;
289 tree par_stmt
, ws_stmt
;
291 par_stmt
= last_stmt (par_entry_bb
);
292 ws_stmt
= last_stmt (ws_entry_bb
);
294 if (TREE_CODE (ws_stmt
) == OMP_SECTIONS
)
297 gcc_assert (TREE_CODE (ws_stmt
) == OMP_FOR
);
299 extract_omp_for_data (ws_stmt
, &fd
);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
306 if (!is_gimple_min_invariant (fd
.n1
)
307 || !is_gimple_min_invariant (fd
.n2
)
308 || !is_gimple_min_invariant (fd
.step
)
309 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
321 get_ws_args_for (tree ws_stmt
)
325 if (TREE_CODE (ws_stmt
) == OMP_FOR
)
327 struct omp_for_data fd
;
330 extract_omp_for_data (ws_stmt
, &fd
);
335 t
= fold_convert (long_integer_type_node
, fd
.chunk_size
);
336 ws_args
= tree_cons (NULL
, t
, ws_args
);
339 t
= fold_convert (long_integer_type_node
, fd
.step
);
340 ws_args
= tree_cons (NULL
, t
, ws_args
);
342 t
= fold_convert (long_integer_type_node
, fd
.n2
);
343 ws_args
= tree_cons (NULL
, t
, ws_args
);
345 t
= fold_convert (long_integer_type_node
, fd
.n1
);
346 ws_args
= tree_cons (NULL
, t
, ws_args
);
350 else if (TREE_CODE (ws_stmt
) == OMP_SECTIONS
)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb
= single_succ (bb_for_stmt (ws_stmt
));
356 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
357 t
= tree_cons (NULL
, t
, NULL
);
365 /* Discover whether REGION is a combined parallel+workshare region. */
368 determine_parallel_type (struct omp_region
*region
)
370 basic_block par_entry_bb
, par_exit_bb
;
371 basic_block ws_entry_bb
, ws_exit_bb
;
373 if (region
== NULL
|| region
->inner
== NULL
374 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
375 || region
->inner
->cont
== NULL
)
378 /* We only support parallel+for and parallel+sections. */
379 if (region
->type
!= OMP_PARALLEL
380 || (region
->inner
->type
!= OMP_FOR
381 && region
->inner
->type
!= OMP_SECTIONS
))
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb
= region
->entry
;
387 par_exit_bb
= region
->exit
;
388 ws_entry_bb
= region
->inner
->entry
;
389 ws_exit_bb
= region
->inner
->exit
;
391 if (single_succ (par_entry_bb
) == ws_entry_bb
392 && single_succ (ws_exit_bb
) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb
, ws_entry_bb
)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb
))
395 || (last_and_only_stmt (ws_entry_bb
)
396 && last_and_only_stmt (par_exit_bb
))))
398 tree ws_stmt
= last_stmt (ws_entry_bb
);
400 if (region
->inner
->type
== OMP_FOR
)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses
= OMP_FOR_CLAUSES (ws_stmt
);
412 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
414 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
417 region
->is_combined_parallel
= false;
418 region
->inner
->is_combined_parallel
= false;
423 region
->is_combined_parallel
= true;
424 region
->inner
->is_combined_parallel
= true;
425 region
->ws_args
= get_ws_args_for (ws_stmt
);
430 /* Return true if EXPR is variable sized. */
433 is_variable_sized (const_tree expr
)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
438 /* Return true if DECL is a reference type. */
441 is_reference (tree decl
)
443 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
451 lookup_decl (tree var
, omp_context
*ctx
)
454 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
459 maybe_lookup_decl (tree var
, omp_context
*ctx
)
462 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
463 return n
? *n
: NULL_TREE
;
467 lookup_field (tree var
, omp_context
*ctx
)
470 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
471 return (tree
) n
->value
;
475 maybe_lookup_field (tree var
, omp_context
*ctx
)
478 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
479 return n
? (tree
) n
->value
: NULL_TREE
;
482 /* Return true if DECL should be copied by pointer. SHARED_P is true
483 if DECL is to be shared. */
486 use_pointer_for_field (const_tree decl
, bool shared_p
)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
509 /* Do not use copy-in/copy-out for variables that have their
511 if (TREE_ADDRESSABLE (decl
))
518 /* Create a new VAR_DECL and copy information from VAR to it. */
521 copy_var_decl (tree var
, tree name
, tree type
)
523 tree copy
= build_decl (VAR_DECL
, name
, type
);
525 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
526 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
527 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
528 DECL_NO_TBAA_P (copy
) = DECL_NO_TBAA_P (var
);
529 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
530 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
531 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
532 TREE_USED (copy
) = 1;
533 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
538 /* Construct a new automatic decl similar to VAR. */
541 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
543 tree copy
= copy_var_decl (var
, name
, type
);
545 DECL_CONTEXT (copy
) = current_function_decl
;
546 TREE_CHAIN (copy
) = ctx
->block_vars
;
547 ctx
->block_vars
= copy
;
553 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
555 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
558 /* Build tree nodes to access the field for VAR on the receiver side. */
561 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
563 tree x
, field
= lookup_field (var
, ctx
);
565 /* If the receiver record type was remapped in the child function,
566 remap the field into the new record type. */
567 x
= maybe_lookup_field (field
, ctx
);
571 x
= build_fold_indirect_ref (ctx
->receiver_decl
);
572 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
574 x
= build_fold_indirect_ref (x
);
579 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
580 of a parallel, this is a component reference; for workshare constructs
581 this is some variable. */
584 build_outer_var_ref (tree var
, omp_context
*ctx
)
588 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
590 else if (is_variable_sized (var
))
592 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
593 x
= build_outer_var_ref (x
, ctx
);
594 x
= build_fold_indirect_ref (x
);
596 else if (is_parallel_ctx (ctx
))
598 bool by_ref
= use_pointer_for_field (var
, false);
599 x
= build_receiver_ref (var
, by_ref
, ctx
);
602 x
= lookup_decl (var
, ctx
->outer
);
603 else if (is_reference (var
))
604 /* This can happen with orphaned constructs. If var is reference, it is
605 possible it is shared and as such valid. */
610 if (is_reference (var
))
611 x
= build_fold_indirect_ref (x
);
616 /* Build tree nodes to access the field for VAR on the sender side. */
619 build_sender_ref (tree var
, omp_context
*ctx
)
621 tree field
= lookup_field (var
, ctx
);
622 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
623 ctx
->sender_decl
, field
, NULL
);
626 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
629 install_var_field (tree var
, bool by_ref
, omp_context
*ctx
)
633 gcc_assert (!splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
635 type
= TREE_TYPE (var
);
637 type
= build_pointer_type (type
);
639 field
= build_decl (FIELD_DECL
, DECL_NAME (var
), type
);
641 /* Remember what variable this field was created for. This does have a
642 side effect of making dwarf2out ignore this member, so for helpful
643 debugging we clear it later in delete_omp_context. */
644 DECL_ABSTRACT_ORIGIN (field
) = var
;
646 insert_field_into_struct (ctx
->record_type
, field
);
648 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
649 (splay_tree_value
) field
);
653 install_var_local (tree var
, omp_context
*ctx
)
655 tree new_var
= omp_copy_decl_1 (var
, ctx
);
656 insert_decl_map (&ctx
->cb
, var
, new_var
);
660 /* Adjust the replacement for DECL in CTX for the new context. This means
661 copying the DECL_VALUE_EXPR, and fixing up the type. */
664 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
668 new_decl
= lookup_decl (decl
, ctx
);
670 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
672 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
673 && DECL_HAS_VALUE_EXPR_P (decl
))
675 tree ve
= DECL_VALUE_EXPR (decl
);
676 walk_tree (&ve
, copy_body_r
, &ctx
->cb
, NULL
);
677 SET_DECL_VALUE_EXPR (new_decl
, ve
);
678 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
681 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
683 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
684 if (size
== error_mark_node
)
685 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
686 DECL_SIZE (new_decl
) = size
;
688 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
689 if (size
== error_mark_node
)
690 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
691 DECL_SIZE_UNIT (new_decl
) = size
;
695 /* The callback for remap_decl. Search all containing contexts for a
696 mapping of the variable; this avoids having to duplicate the splay
697 tree ahead of time. We know a mapping doesn't already exist in the
698 given context. Create new mappings to implement default semantics. */
701 omp_copy_decl (tree var
, copy_body_data
*cb
)
703 omp_context
*ctx
= (omp_context
*) cb
;
706 if (TREE_CODE (var
) == LABEL_DECL
)
708 new_var
= create_artificial_label ();
709 DECL_CONTEXT (new_var
) = current_function_decl
;
710 insert_decl_map (&ctx
->cb
, var
, new_var
);
714 while (!is_parallel_ctx (ctx
))
719 new_var
= maybe_lookup_decl (var
, ctx
);
724 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
727 return error_mark_node
;
731 /* Return the parallel region associated with STMT. */
733 /* Debugging dumps for parallel regions. */
734 void dump_omp_region (FILE *, struct omp_region
*, int);
735 void debug_omp_region (struct omp_region
*);
736 void debug_all_omp_regions (void);
738 /* Dump the parallel region tree rooted at REGION. */
741 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
743 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
744 tree_code_name
[region
->type
]);
747 dump_omp_region (file
, region
->inner
, indent
+ 4);
751 fprintf (file
, "%*sbb %d: OMP_CONTINUE\n", indent
, "",
752 region
->cont
->index
);
756 fprintf (file
, "%*sbb %d: OMP_RETURN\n", indent
, "",
757 region
->exit
->index
);
759 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
762 dump_omp_region (file
, region
->next
, indent
);
766 debug_omp_region (struct omp_region
*region
)
768 dump_omp_region (stderr
, region
, 0);
772 debug_all_omp_regions (void)
774 dump_omp_region (stderr
, root_omp_region
, 0);
778 /* Create a new parallel region starting at STMT inside region PARENT. */
781 new_omp_region (basic_block bb
, enum tree_code type
, struct omp_region
*parent
)
783 struct omp_region
*region
= xcalloc (1, sizeof (*region
));
785 region
->outer
= parent
;
791 /* This is a nested region. Add it to the list of inner
792 regions in PARENT. */
793 region
->next
= parent
->inner
;
794 parent
->inner
= region
;
798 /* This is a toplevel region. Add it to the list of toplevel
799 regions in ROOT_OMP_REGION. */
800 region
->next
= root_omp_region
;
801 root_omp_region
= region
;
807 /* Release the memory associated with the region tree rooted at REGION. */
810 free_omp_region_1 (struct omp_region
*region
)
812 struct omp_region
*i
, *n
;
814 for (i
= region
->inner
; i
; i
= n
)
817 free_omp_region_1 (i
);
823 /* Release the memory for the entire omp region tree. */
826 free_omp_regions (void)
828 struct omp_region
*r
, *n
;
829 for (r
= root_omp_region
; r
; r
= n
)
832 free_omp_region_1 (r
);
834 root_omp_region
= NULL
;
838 /* Create a new context, with OUTER_CTX being the surrounding context. */
841 new_omp_context (tree stmt
, omp_context
*outer_ctx
)
843 omp_context
*ctx
= XCNEW (omp_context
);
845 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
846 (splay_tree_value
) ctx
);
851 ctx
->outer
= outer_ctx
;
852 ctx
->cb
= outer_ctx
->cb
;
853 ctx
->cb
.block
= NULL
;
854 ctx
->depth
= outer_ctx
->depth
+ 1;
858 ctx
->cb
.src_fn
= current_function_decl
;
859 ctx
->cb
.dst_fn
= current_function_decl
;
860 ctx
->cb
.src_node
= cgraph_node (current_function_decl
);
861 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
862 ctx
->cb
.src_cfun
= cfun
;
863 ctx
->cb
.copy_decl
= omp_copy_decl
;
864 ctx
->cb
.eh_region
= -1;
865 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
869 ctx
->cb
.decl_map
= pointer_map_create ();
874 /* Destroy a omp_context data structures. Called through the splay tree
875 value delete callback. */
878 delete_omp_context (splay_tree_value value
)
880 omp_context
*ctx
= (omp_context
*) value
;
882 pointer_map_destroy (ctx
->cb
.decl_map
);
885 splay_tree_delete (ctx
->field_map
);
887 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
888 it produces corrupt debug information. */
889 if (ctx
->record_type
)
892 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
893 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
899 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
903 fixup_child_record_type (omp_context
*ctx
)
905 tree f
, type
= ctx
->record_type
;
907 /* ??? It isn't sufficient to just call remap_type here, because
908 variably_modified_type_p doesn't work the way we expect for
909 record types. Testing each field for whether it needs remapping
910 and creating a new record by hand works, however. */
911 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
912 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
916 tree name
, new_fields
= NULL
;
918 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
919 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
920 name
= build_decl (TYPE_DECL
, name
, type
);
921 TYPE_NAME (type
) = name
;
923 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= TREE_CHAIN (f
))
925 tree new_f
= copy_node (f
);
926 DECL_CONTEXT (new_f
) = type
;
927 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
928 TREE_CHAIN (new_f
) = new_fields
;
931 /* Arrange to be able to look up the receiver field
932 given the sender field. */
933 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
934 (splay_tree_value
) new_f
);
936 TYPE_FIELDS (type
) = nreverse (new_fields
);
940 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
943 /* Instantiate decls as necessary in CTX to satisfy the data sharing
944 specified by CLAUSES. */
947 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
950 bool scan_array_reductions
= false;
952 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
956 switch (OMP_CLAUSE_CODE (c
))
958 case OMP_CLAUSE_PRIVATE
:
959 decl
= OMP_CLAUSE_DECL (c
);
960 if (!is_variable_sized (decl
))
961 install_var_local (decl
, ctx
);
964 case OMP_CLAUSE_SHARED
:
965 gcc_assert (is_parallel_ctx (ctx
));
966 decl
= OMP_CLAUSE_DECL (c
);
967 gcc_assert (!is_variable_sized (decl
));
968 by_ref
= use_pointer_for_field (decl
, true);
969 /* Global variables don't need to be copied,
970 the receiver side will use them directly. */
971 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
973 if (! TREE_READONLY (decl
)
974 || TREE_ADDRESSABLE (decl
)
976 || is_reference (decl
))
978 install_var_field (decl
, by_ref
, ctx
);
979 install_var_local (decl
, ctx
);
982 /* We don't need to copy const scalar vars back. */
983 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
986 case OMP_CLAUSE_LASTPRIVATE
:
987 /* Let the corresponding firstprivate clause create
989 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
993 case OMP_CLAUSE_FIRSTPRIVATE
:
994 case OMP_CLAUSE_REDUCTION
:
995 decl
= OMP_CLAUSE_DECL (c
);
997 if (is_variable_sized (decl
))
999 else if (is_parallel_ctx (ctx
)
1000 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
,
1003 by_ref
= use_pointer_for_field (decl
, false);
1004 install_var_field (decl
, by_ref
, ctx
);
1006 install_var_local (decl
, ctx
);
1009 case OMP_CLAUSE_COPYPRIVATE
:
1011 scan_omp (&OMP_CLAUSE_DECL (c
), ctx
->outer
);
1014 case OMP_CLAUSE_COPYIN
:
1015 decl
= OMP_CLAUSE_DECL (c
);
1016 by_ref
= use_pointer_for_field (decl
, false);
1017 install_var_field (decl
, by_ref
, ctx
);
1020 case OMP_CLAUSE_DEFAULT
:
1021 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1025 case OMP_CLAUSE_NUM_THREADS
:
1026 case OMP_CLAUSE_SCHEDULE
:
1028 scan_omp (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1031 case OMP_CLAUSE_NOWAIT
:
1032 case OMP_CLAUSE_ORDERED
:
1040 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1042 switch (OMP_CLAUSE_CODE (c
))
1044 case OMP_CLAUSE_LASTPRIVATE
:
1045 /* Let the corresponding firstprivate clause create
1047 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1051 case OMP_CLAUSE_PRIVATE
:
1052 case OMP_CLAUSE_FIRSTPRIVATE
:
1053 case OMP_CLAUSE_REDUCTION
:
1054 decl
= OMP_CLAUSE_DECL (c
);
1055 if (is_variable_sized (decl
))
1056 install_var_local (decl
, ctx
);
1057 fixup_remapped_decl (decl
, ctx
,
1058 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1059 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1060 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1061 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1062 scan_array_reductions
= true;
1065 case OMP_CLAUSE_SHARED
:
1066 decl
= OMP_CLAUSE_DECL (c
);
1067 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1068 fixup_remapped_decl (decl
, ctx
, false);
1071 case OMP_CLAUSE_COPYPRIVATE
:
1072 case OMP_CLAUSE_COPYIN
:
1073 case OMP_CLAUSE_DEFAULT
:
1075 case OMP_CLAUSE_NUM_THREADS
:
1076 case OMP_CLAUSE_SCHEDULE
:
1077 case OMP_CLAUSE_NOWAIT
:
1078 case OMP_CLAUSE_ORDERED
:
1086 if (scan_array_reductions
)
1087 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1088 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1089 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1091 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c
), ctx
);
1092 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c
), ctx
);
1096 /* Create a new name for omp child function. Returns an identifier. */
1098 static GTY(()) unsigned int tmp_ompfn_id_num
;
1101 create_omp_child_function_name (void)
1103 tree name
= DECL_ASSEMBLER_NAME (current_function_decl
);
1104 size_t len
= IDENTIFIER_LENGTH (name
);
1105 char *tmp_name
, *prefix
;
1107 prefix
= alloca (len
+ sizeof ("_omp_fn"));
1108 memcpy (prefix
, IDENTIFIER_POINTER (name
), len
);
1109 strcpy (prefix
+ len
, "_omp_fn");
1110 #ifndef NO_DOT_IN_LABEL
1112 #elif !defined NO_DOLLAR_IN_LABEL
1115 ASM_FORMAT_PRIVATE_NAME (tmp_name
, prefix
, tmp_ompfn_id_num
++);
1116 return get_identifier (tmp_name
);
1119 /* Build a decl for the omp child function. It'll not contain a body
1120 yet, just the bare decl. */
1123 create_omp_child_function (omp_context
*ctx
)
1125 tree decl
, type
, name
, t
;
1127 name
= create_omp_child_function_name ();
1128 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1130 decl
= build_decl (FUNCTION_DECL
, name
, type
);
1131 decl
= lang_hooks
.decls
.pushdecl (decl
);
1133 ctx
->cb
.dst_fn
= decl
;
1135 TREE_STATIC (decl
) = 1;
1136 TREE_USED (decl
) = 1;
1137 DECL_ARTIFICIAL (decl
) = 1;
1138 DECL_IGNORED_P (decl
) = 0;
1139 TREE_PUBLIC (decl
) = 0;
1140 DECL_UNINLINABLE (decl
) = 1;
1141 DECL_EXTERNAL (decl
) = 0;
1142 DECL_CONTEXT (decl
) = NULL_TREE
;
1143 DECL_INITIAL (decl
) = make_node (BLOCK
);
1145 t
= build_decl (RESULT_DECL
, NULL_TREE
, void_type_node
);
1146 DECL_ARTIFICIAL (t
) = 1;
1147 DECL_IGNORED_P (t
) = 1;
1148 DECL_RESULT (decl
) = t
;
1150 t
= build_decl (PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1151 DECL_ARTIFICIAL (t
) = 1;
1152 DECL_ARG_TYPE (t
) = ptr_type_node
;
1153 DECL_CONTEXT (t
) = current_function_decl
;
1155 DECL_ARGUMENTS (decl
) = t
;
1156 ctx
->receiver_decl
= t
;
1158 /* Allocate memory for the function structure. The call to
1159 allocate_struct_function clobbers CFUN, so we need to restore
1161 push_struct_function (decl
);
1162 DECL_SOURCE_LOCATION (decl
) = EXPR_LOCATION (ctx
->stmt
);
1163 cfun
->function_end_locus
= EXPR_LOCATION (ctx
->stmt
);
1168 /* Scan an OpenMP parallel directive. */
1171 scan_omp_parallel (tree
*stmt_p
, omp_context
*outer_ctx
)
1176 /* Ignore parallel directives with empty bodies, unless there
1177 are copyin clauses. */
1179 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p
))
1180 && find_omp_clause (OMP_CLAUSES (*stmt_p
), OMP_CLAUSE_COPYIN
) == NULL
)
1182 *stmt_p
= build_empty_stmt ();
1186 ctx
= new_omp_context (*stmt_p
, outer_ctx
);
1187 if (parallel_nesting_level
> 1)
1188 ctx
->is_nested
= true;
1189 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1190 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1191 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1192 name
= create_tmp_var_name (".omp_data_s");
1193 name
= build_decl (TYPE_DECL
, name
, ctx
->record_type
);
1194 TYPE_NAME (ctx
->record_type
) = name
;
1195 create_omp_child_function (ctx
);
1196 OMP_PARALLEL_FN (*stmt_p
) = ctx
->cb
.dst_fn
;
1198 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p
), ctx
);
1199 scan_omp (&OMP_PARALLEL_BODY (*stmt_p
), ctx
);
1201 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1202 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1205 layout_type (ctx
->record_type
);
1206 fixup_child_record_type (ctx
);
1211 /* Scan an OpenMP loop directive. */
1214 scan_omp_for (tree
*stmt_p
, omp_context
*outer_ctx
)
1220 ctx
= new_omp_context (stmt
, outer_ctx
);
1222 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt
), ctx
);
1224 scan_omp (&OMP_FOR_PRE_BODY (stmt
), ctx
);
1225 scan_omp (&OMP_FOR_INIT (stmt
), ctx
);
1226 scan_omp (&OMP_FOR_COND (stmt
), ctx
);
1227 scan_omp (&OMP_FOR_INCR (stmt
), ctx
);
1228 scan_omp (&OMP_FOR_BODY (stmt
), ctx
);
1231 /* Scan an OpenMP sections directive. */
1234 scan_omp_sections (tree
*stmt_p
, omp_context
*outer_ctx
)
1240 ctx
= new_omp_context (stmt
, outer_ctx
);
1241 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt
), ctx
);
1242 scan_omp (&OMP_SECTIONS_BODY (stmt
), ctx
);
1245 /* Scan an OpenMP single directive. */
1248 scan_omp_single (tree
*stmt_p
, omp_context
*outer_ctx
)
1250 tree stmt
= *stmt_p
;
1254 ctx
= new_omp_context (stmt
, outer_ctx
);
1255 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1256 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1257 name
= create_tmp_var_name (".omp_copy_s");
1258 name
= build_decl (TYPE_DECL
, name
, ctx
->record_type
);
1259 TYPE_NAME (ctx
->record_type
) = name
;
1261 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt
), ctx
);
1262 scan_omp (&OMP_SINGLE_BODY (stmt
), ctx
);
1264 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1265 ctx
->record_type
= NULL
;
1267 layout_type (ctx
->record_type
);
1271 /* Check OpenMP nesting restrictions. */
1273 check_omp_nesting_restrictions (tree t
, omp_context
*ctx
)
1275 switch (TREE_CODE (t
))
1280 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1281 switch (TREE_CODE (ctx
->stmt
))
1288 warning (0, "work-sharing region may not be closely nested inside "
1289 "of work-sharing, critical, ordered or master region");
1298 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1299 switch (TREE_CODE (ctx
->stmt
))
1304 warning (0, "master region may not be closely nested inside "
1305 "of work-sharing region");
1314 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1315 switch (TREE_CODE (ctx
->stmt
))
1318 warning (0, "ordered region may not be closely nested inside "
1319 "of critical region");
1322 if (find_omp_clause (OMP_CLAUSES (ctx
->stmt
),
1323 OMP_CLAUSE_ORDERED
) == NULL
)
1324 warning (0, "ordered region must be closely nested inside "
1325 "a loop region with an ordered clause");
1334 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1335 if (TREE_CODE (ctx
->stmt
) == OMP_CRITICAL
1336 && OMP_CRITICAL_NAME (t
) == OMP_CRITICAL_NAME (ctx
->stmt
))
1338 warning (0, "critical region may not be nested inside a critical "
1339 "region with the same name");
1349 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1352 scan_omp_1 (tree
*tp
, int *walk_subtrees
, void *data
)
1354 struct walk_stmt_info
*wi
= data
;
1355 omp_context
*ctx
= wi
->info
;
1358 if (EXPR_HAS_LOCATION (t
))
1359 input_location
= EXPR_LOCATION (t
);
1361 /* Check the OpenMP nesting restrictions. */
1362 if (OMP_DIRECTIVE_P (t
) && ctx
!= NULL
)
1363 check_omp_nesting_restrictions (t
, ctx
);
1366 switch (TREE_CODE (t
))
1369 parallel_nesting_level
++;
1370 scan_omp_parallel (tp
, ctx
);
1371 parallel_nesting_level
--;
1375 scan_omp_for (tp
, ctx
);
1379 scan_omp_sections (tp
, ctx
);
1383 scan_omp_single (tp
, ctx
);
1390 ctx
= new_omp_context (*tp
, ctx
);
1391 scan_omp (&OMP_BODY (*tp
), ctx
);
1399 for (var
= BIND_EXPR_VARS (t
); var
; var
= TREE_CHAIN (var
))
1400 insert_decl_map (&ctx
->cb
, var
, var
);
1409 *tp
= remap_decl (t
, &ctx
->cb
);
1413 if (ctx
&& TYPE_P (t
))
1414 *tp
= remap_type (t
, &ctx
->cb
);
1415 else if (!DECL_P (t
))
1424 /* Scan all the statements starting at STMT_P. CTX contains context
1425 information about the OpenMP directives and clauses found during
1429 scan_omp (tree
*stmt_p
, omp_context
*ctx
)
1431 location_t saved_location
;
1432 struct walk_stmt_info wi
;
1434 memset (&wi
, 0, sizeof (wi
));
1435 wi
.callback
= scan_omp_1
;
1437 wi
.want_bind_expr
= (ctx
!= NULL
);
1438 wi
.want_locations
= true;
1440 saved_location
= input_location
;
1441 walk_stmts (&wi
, stmt_p
);
1442 input_location
= saved_location
;
1445 /* Re-gimplification and code generation routines. */
1447 /* Build a call to GOMP_barrier. */
1450 build_omp_barrier (void)
1452 return build_call_expr (built_in_decls
[BUILT_IN_GOMP_BARRIER
], 0);
1455 /* If a context was created for STMT when it was scanned, return it. */
1457 static omp_context
*
1458 maybe_lookup_ctx (tree stmt
)
1461 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
1462 return n
? (omp_context
*) n
->value
: NULL
;
1466 /* Find the mapping for DECL in CTX or the immediately enclosing
1467 context that has a mapping for DECL.
1469 If CTX is a nested parallel directive, we may have to use the decl
1470 mappings created in CTX's parent context. Suppose that we have the
1471 following parallel nesting (variable UIDs showed for clarity):
1474 #omp parallel shared(iD.1562) -> outer parallel
1475 iD.1562 = iD.1562 + 1;
1477 #omp parallel shared (iD.1562) -> inner parallel
1478 iD.1562 = iD.1562 - 1;
1480 Each parallel structure will create a distinct .omp_data_s structure
1481 for copying iD.1562 in/out of the directive:
1483 outer parallel .omp_data_s.1.i -> iD.1562
1484 inner parallel .omp_data_s.2.i -> iD.1562
1486 A shared variable mapping will produce a copy-out operation before
1487 the parallel directive and a copy-in operation after it. So, in
1488 this case we would have:
1491 .omp_data_o.1.i = iD.1562;
1492 #omp parallel shared(iD.1562) -> outer parallel
1493 .omp_data_i.1 = &.omp_data_o.1
1494 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1496 .omp_data_o.2.i = iD.1562; -> **
1497 #omp parallel shared(iD.1562) -> inner parallel
1498 .omp_data_i.2 = &.omp_data_o.2
1499 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1502 ** This is a problem. The symbol iD.1562 cannot be referenced
1503 inside the body of the outer parallel region. But since we are
1504 emitting this copy operation while expanding the inner parallel
1505 directive, we need to access the CTX structure of the outer
1506 parallel directive to get the correct mapping:
1508 .omp_data_o.2.i = .omp_data_i.1->i
1510 Since there may be other workshare or parallel directives enclosing
1511 the parallel directive, it may be necessary to walk up the context
1512 parent chain. This is not a problem in general because nested
1513 parallelism happens only rarely. */
1516 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
1521 gcc_assert (ctx
->is_nested
);
1523 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
1524 t
= maybe_lookup_decl (decl
, up
);
1526 gcc_assert (t
|| is_global_var (decl
));
1528 return t
? t
: decl
;
1532 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1533 in outer contexts. */
1536 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
1542 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
1543 t
= maybe_lookup_decl (decl
, up
);
1545 return t
? t
: decl
;
1549 /* Construct the initialization value for reduction CLAUSE. */
1552 omp_reduction_init (tree clause
, tree type
)
1554 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
1561 case TRUTH_ORIF_EXPR
:
1562 case TRUTH_XOR_EXPR
:
1564 return fold_convert (type
, integer_zero_node
);
1567 case TRUTH_AND_EXPR
:
1568 case TRUTH_ANDIF_EXPR
:
1570 return fold_convert (type
, integer_one_node
);
1573 return fold_convert (type
, integer_minus_one_node
);
1576 if (SCALAR_FLOAT_TYPE_P (type
))
1578 REAL_VALUE_TYPE max
, min
;
1579 if (HONOR_INFINITIES (TYPE_MODE (type
)))
1582 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
1585 real_maxval (&min
, 1, TYPE_MODE (type
));
1586 return build_real (type
, min
);
1590 gcc_assert (INTEGRAL_TYPE_P (type
));
1591 return TYPE_MIN_VALUE (type
);
1595 if (SCALAR_FLOAT_TYPE_P (type
))
1597 REAL_VALUE_TYPE max
;
1598 if (HONOR_INFINITIES (TYPE_MODE (type
)))
1601 real_maxval (&max
, 0, TYPE_MODE (type
));
1602 return build_real (type
, max
);
1606 gcc_assert (INTEGRAL_TYPE_P (type
));
1607 return TYPE_MAX_VALUE (type
);
1615 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1616 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1617 private variables. Initialization statements go in ILIST, while calls
1618 to destructors go in DLIST. */
1621 lower_rec_input_clauses (tree clauses
, tree
*ilist
, tree
*dlist
,
1624 tree_stmt_iterator diter
;
1625 tree c
, dtor
, copyin_seq
, x
, ptr
;
1626 bool copyin_by_ref
= false;
1627 bool lastprivate_firstprivate
= false;
1630 *dlist
= alloc_stmt_list ();
1631 diter
= tsi_start (*dlist
);
1634 /* Do all the fixed sized types in the first pass, and the variable sized
1635 types in the second pass. This makes sure that the scalar arguments to
1636 the variable sized types are processed before we use them in the
1637 variable sized operations. */
1638 for (pass
= 0; pass
< 2; ++pass
)
1640 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1642 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
1648 case OMP_CLAUSE_PRIVATE
:
1649 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
1652 case OMP_CLAUSE_SHARED
:
1653 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
1655 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
1658 case OMP_CLAUSE_FIRSTPRIVATE
:
1659 case OMP_CLAUSE_COPYIN
:
1660 case OMP_CLAUSE_REDUCTION
:
1662 case OMP_CLAUSE_LASTPRIVATE
:
1663 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1665 lastprivate_firstprivate
= true;
1674 new_var
= var
= OMP_CLAUSE_DECL (c
);
1675 if (c_kind
!= OMP_CLAUSE_COPYIN
)
1676 new_var
= lookup_decl (var
, ctx
);
1678 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
1683 else if (is_variable_sized (var
))
1685 /* For variable sized types, we need to allocate the
1686 actual storage here. Call alloca and store the
1687 result in the pointer decl that we created elsewhere. */
1691 ptr
= DECL_VALUE_EXPR (new_var
);
1692 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
1693 ptr
= TREE_OPERAND (ptr
, 0);
1694 gcc_assert (DECL_P (ptr
));
1696 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
1697 x
= build_call_expr (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
1698 x
= fold_convert (TREE_TYPE (ptr
), x
);
1699 x
= build_gimple_modify_stmt (ptr
, x
);
1700 gimplify_and_add (x
, ilist
);
1702 else if (is_reference (var
))
1704 /* For references that are being privatized for Fortran,
1705 allocate new backing storage for the new pointer
1706 variable. This allows us to avoid changing all the
1707 code that expects a pointer to something that expects
1708 a direct variable. Note that this doesn't apply to
1709 C++, since reference types are disallowed in data
1710 sharing clauses there, except for NRV optimized
1715 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
1716 if (TREE_CONSTANT (x
))
1718 const char *name
= NULL
;
1719 if (DECL_NAME (var
))
1720 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
1722 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
1724 gimple_add_tmp_var (x
);
1725 x
= build_fold_addr_expr_with_type (x
, TREE_TYPE (new_var
));
1729 x
= build_call_expr (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
1730 x
= fold_convert (TREE_TYPE (new_var
), x
);
1733 x
= build_gimple_modify_stmt (new_var
, x
);
1734 gimplify_and_add (x
, ilist
);
1736 new_var
= build_fold_indirect_ref (new_var
);
1738 else if (c_kind
== OMP_CLAUSE_REDUCTION
1739 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1747 switch (OMP_CLAUSE_CODE (c
))
1749 case OMP_CLAUSE_SHARED
:
1750 /* Shared global vars are just accessed directly. */
1751 if (is_global_var (new_var
))
1753 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1754 needs to be delayed until after fixup_child_record_type so
1755 that we get the correct type during the dereference. */
1756 by_ref
= use_pointer_for_field (var
, true);
1757 x
= build_receiver_ref (var
, by_ref
, ctx
);
1758 SET_DECL_VALUE_EXPR (new_var
, x
);
1759 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
1761 /* ??? If VAR is not passed by reference, and the variable
1762 hasn't been initialized yet, then we'll get a warning for
1763 the store into the omp_data_s structure. Ideally, we'd be
1764 able to notice this and not store anything at all, but
1765 we're generating code too early. Suppress the warning. */
1767 TREE_NO_WARNING (var
) = 1;
1770 case OMP_CLAUSE_LASTPRIVATE
:
1771 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1775 case OMP_CLAUSE_PRIVATE
:
1776 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
);
1778 gimplify_and_add (x
, ilist
);
1782 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
1786 gimplify_stmt (&dtor
);
1787 tsi_link_before (&diter
, dtor
, TSI_SAME_STMT
);
1791 case OMP_CLAUSE_FIRSTPRIVATE
:
1792 x
= build_outer_var_ref (var
, ctx
);
1793 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
1794 gimplify_and_add (x
, ilist
);
1798 case OMP_CLAUSE_COPYIN
:
1799 by_ref
= use_pointer_for_field (var
, false);
1800 x
= build_receiver_ref (var
, by_ref
, ctx
);
1801 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
1802 append_to_statement_list (x
, ©in_seq
);
1803 copyin_by_ref
|= by_ref
;
1806 case OMP_CLAUSE_REDUCTION
:
1807 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1809 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c
), ilist
);
1810 OMP_CLAUSE_REDUCTION_INIT (c
) = NULL
;
1814 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
1815 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
1816 x
= build_gimple_modify_stmt (new_var
, x
);
1817 gimplify_and_add (x
, ilist
);
1827 /* The copyin sequence is not to be executed by the main thread, since
1828 that would result in self-copies. Perhaps not visible to scalars,
1829 but it certainly is to C++ operator=. */
1832 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
1833 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
1834 build_int_cst (TREE_TYPE (x
), 0));
1835 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
1836 gimplify_and_add (x
, ilist
);
1839 /* If any copyin variable is passed by reference, we must ensure the
1840 master thread doesn't modify it before it is copied over in all
1841 threads. Similarly for variables in both firstprivate and
1842 lastprivate clauses we need to ensure the lastprivate copying
1843 happens after firstprivate copying in all threads. */
1844 if (copyin_by_ref
|| lastprivate_firstprivate
)
1845 gimplify_and_add (build_omp_barrier (), ilist
);
1849 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1850 both parallel and workshare constructs. PREDICATE may be NULL if it's
1854 lower_lastprivate_clauses (tree clauses
, tree predicate
, tree
*stmt_list
,
1857 tree sub_list
, x
, c
;
1859 /* Early exit if there are no lastprivate clauses. */
1860 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
1861 if (clauses
== NULL
)
1863 /* If this was a workshare clause, see if it had been combined
1864 with its parallel. In that case, look for the clauses on the
1865 parallel statement itself. */
1866 if (is_parallel_ctx (ctx
))
1870 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
1873 clauses
= find_omp_clause (OMP_PARALLEL_CLAUSES (ctx
->stmt
),
1874 OMP_CLAUSE_LASTPRIVATE
);
1875 if (clauses
== NULL
)
1879 sub_list
= alloc_stmt_list ();
1881 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1885 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_LASTPRIVATE
)
1888 var
= OMP_CLAUSE_DECL (c
);
1889 new_var
= lookup_decl (var
, ctx
);
1891 x
= build_outer_var_ref (var
, ctx
);
1892 if (is_reference (var
))
1893 new_var
= build_fold_indirect_ref (new_var
);
1894 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
1895 append_to_statement_list (x
, &sub_list
);
1899 x
= build3 (COND_EXPR
, void_type_node
, predicate
, sub_list
, NULL
);
1903 gimplify_and_add (x
, stmt_list
);
1907 /* Generate code to implement the REDUCTION clauses. */
1910 lower_reduction_clauses (tree clauses
, tree
*stmt_list
, omp_context
*ctx
)
1912 tree sub_list
= NULL
, x
, c
;
1915 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1916 update in that case, otherwise use a lock. */
1917 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
1918 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
1920 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1922 /* Never use OMP_ATOMIC for array reductions. */
1932 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1934 tree var
, ref
, new_var
;
1935 enum tree_code code
;
1937 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
1940 var
= OMP_CLAUSE_DECL (c
);
1941 new_var
= lookup_decl (var
, ctx
);
1942 if (is_reference (var
))
1943 new_var
= build_fold_indirect_ref (new_var
);
1944 ref
= build_outer_var_ref (var
, ctx
);
1945 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
1947 /* reduction(-:var) sums up the partial results, so it acts
1948 identically to reduction(+:var). */
1949 if (code
== MINUS_EXPR
)
1954 tree addr
= build_fold_addr_expr (ref
);
1956 addr
= save_expr (addr
);
1957 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
1958 x
= fold_build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
1959 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
1960 gimplify_and_add (x
, stmt_list
);
1964 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1966 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
1968 if (is_reference (var
))
1969 ref
= build_fold_addr_expr (ref
);
1970 SET_DECL_VALUE_EXPR (placeholder
, ref
);
1971 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
1972 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c
), &sub_list
);
1973 OMP_CLAUSE_REDUCTION_MERGE (c
) = NULL
;
1974 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
1978 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
1979 ref
= build_outer_var_ref (var
, ctx
);
1980 x
= build_gimple_modify_stmt (ref
, x
);
1981 append_to_statement_list (x
, &sub_list
);
1985 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
], 0);
1986 gimplify_and_add (x
, stmt_list
);
1988 gimplify_and_add (sub_list
, stmt_list
);
1990 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
], 0);
1991 gimplify_and_add (x
, stmt_list
);
1995 /* Generate code to implement the COPYPRIVATE clauses. */
1998 lower_copyprivate_clauses (tree clauses
, tree
*slist
, tree
*rlist
,
2003 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2008 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2011 var
= OMP_CLAUSE_DECL (c
);
2012 by_ref
= use_pointer_for_field (var
, false);
2014 ref
= build_sender_ref (var
, ctx
);
2015 x
= (ctx
->is_nested
) ? lookup_decl_in_outer_ctx (var
, ctx
) : var
;
2016 x
= by_ref
? build_fold_addr_expr (x
) : x
;
2017 x
= build_gimple_modify_stmt (ref
, x
);
2018 gimplify_and_add (x
, slist
);
2020 ref
= build_receiver_ref (var
, by_ref
, ctx
);
2021 if (is_reference (var
))
2023 ref
= build_fold_indirect_ref (ref
);
2024 var
= build_fold_indirect_ref (var
);
2026 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, var
, ref
);
2027 gimplify_and_add (x
, rlist
);
2032 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2033 and REDUCTION from the sender (aka parent) side. */
2036 lower_send_clauses (tree clauses
, tree
*ilist
, tree
*olist
, omp_context
*ctx
)
2040 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2042 tree val
, ref
, x
, var
;
2043 bool by_ref
, do_in
= false, do_out
= false;
2045 switch (OMP_CLAUSE_CODE (c
))
2047 case OMP_CLAUSE_FIRSTPRIVATE
:
2048 case OMP_CLAUSE_COPYIN
:
2049 case OMP_CLAUSE_LASTPRIVATE
:
2050 case OMP_CLAUSE_REDUCTION
:
2056 var
= val
= OMP_CLAUSE_DECL (c
);
2058 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2060 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2061 && is_global_var (var
))
2063 if (is_variable_sized (val
))
2065 by_ref
= use_pointer_for_field (val
, false);
2067 switch (OMP_CLAUSE_CODE (c
))
2069 case OMP_CLAUSE_FIRSTPRIVATE
:
2070 case OMP_CLAUSE_COPYIN
:
2074 case OMP_CLAUSE_LASTPRIVATE
:
2075 if (by_ref
|| is_reference (val
))
2077 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2085 case OMP_CLAUSE_REDUCTION
:
2087 do_out
= !(by_ref
|| is_reference (val
));
2096 ref
= build_sender_ref (val
, ctx
);
2097 x
= by_ref
? build_fold_addr_expr (var
) : var
;
2098 x
= build_gimple_modify_stmt (ref
, x
);
2099 gimplify_and_add (x
, ilist
);
2104 ref
= build_sender_ref (val
, ctx
);
2105 x
= build_gimple_modify_stmt (var
, ref
);
2106 gimplify_and_add (x
, olist
);
2111 /* Generate code to implement SHARED from the sender (aka parent) side.
2112 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2113 got automatically shared. */
2116 lower_send_shared_vars (tree
*ilist
, tree
*olist
, omp_context
*ctx
)
2118 tree var
, ovar
, nvar
, f
, x
;
2120 if (ctx
->record_type
== NULL
)
2123 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= TREE_CHAIN (f
))
2125 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2126 nvar
= maybe_lookup_decl (ovar
, ctx
);
2127 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2132 /* If CTX is a nested parallel directive. Find the immediately
2133 enclosing parallel or workshare construct that contains a
2134 mapping for OVAR. */
2136 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2138 if (use_pointer_for_field (ovar
, true))
2140 x
= build_sender_ref (ovar
, ctx
);
2141 var
= build_fold_addr_expr (var
);
2142 x
= build_gimple_modify_stmt (x
, var
);
2143 gimplify_and_add (x
, ilist
);
2147 x
= build_sender_ref (ovar
, ctx
);
2148 x
= build_gimple_modify_stmt (x
, var
);
2149 gimplify_and_add (x
, ilist
);
2151 x
= build_sender_ref (ovar
, ctx
);
2152 x
= build_gimple_modify_stmt (var
, x
);
2153 gimplify_and_add (x
, olist
);
2158 /* Build the function calls to GOMP_parallel_start etc to actually
2159 generate the parallel operation. REGION is the parallel region
2160 being expanded. BB is the block where to insert the code. WS_ARGS
2161 will be set if this is a call to a combined parallel+workshare
2162 construct, it contains the list of additional arguments needed by
2163 the workshare construct. */
2166 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2167 tree entry_stmt
, tree ws_args
)
2169 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2170 block_stmt_iterator si
;
2173 clauses
= OMP_PARALLEL_CLAUSES (entry_stmt
);
2175 /* Determine what flavor of GOMP_parallel_start we will be
2177 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2178 if (is_combined_parallel (region
))
2180 switch (region
->inner
->type
)
2183 start_ix
= BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2184 + region
->inner
->sched_kind
;
2187 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2194 /* By default, the value of NUM_THREADS is zero (selected at run time)
2195 and there is no conditional. */
2197 val
= build_int_cst (unsigned_type_node
, 0);
2199 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2201 cond
= OMP_CLAUSE_IF_EXPR (c
);
2203 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2205 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2207 /* Ensure 'val' is of the correct type. */
2208 val
= fold_convert (unsigned_type_node
, val
);
2210 /* If we found the clause 'if (cond)', build either
2211 (cond != 0) or (cond ? val : 1u). */
2214 block_stmt_iterator si
;
2216 cond
= gimple_boolify (cond
);
2218 if (integer_zerop (val
))
2219 val
= fold_build2 (EQ_EXPR
, unsigned_type_node
, cond
,
2220 build_int_cst (TREE_TYPE (cond
), 0));
2223 basic_block cond_bb
, then_bb
, else_bb
;
2224 edge e
, e_then
, e_else
;
2225 tree t
, tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2227 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2228 if (gimple_in_ssa_p (cfun
))
2230 tmp_then
= make_ssa_name (tmp_var
, NULL_TREE
);
2231 tmp_else
= make_ssa_name (tmp_var
, NULL_TREE
);
2232 tmp_join
= make_ssa_name (tmp_var
, NULL_TREE
);
2241 e
= split_block (bb
, NULL
);
2246 then_bb
= create_empty_bb (cond_bb
);
2247 else_bb
= create_empty_bb (then_bb
);
2248 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
2249 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
2251 t
= build3 (COND_EXPR
, void_type_node
,
2252 cond
, NULL_TREE
, NULL_TREE
);
2254 si
= bsi_start (cond_bb
);
2255 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2257 si
= bsi_start (then_bb
);
2258 t
= build_gimple_modify_stmt (tmp_then
, val
);
2259 if (gimple_in_ssa_p (cfun
))
2260 SSA_NAME_DEF_STMT (tmp_then
) = t
;
2261 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2263 si
= bsi_start (else_bb
);
2264 t
= build_gimple_modify_stmt (tmp_else
,
2265 build_int_cst (unsigned_type_node
, 1));
2266 if (gimple_in_ssa_p (cfun
))
2267 SSA_NAME_DEF_STMT (tmp_else
) = t
;
2268 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2270 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
2271 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
2272 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
2273 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
2275 if (gimple_in_ssa_p (cfun
))
2277 tree phi
= create_phi_node (tmp_join
, bb
);
2278 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
2279 add_phi_arg (phi
, tmp_then
, e_then
);
2280 add_phi_arg (phi
, tmp_else
, e_else
);
2286 si
= bsi_start (bb
);
2287 val
= force_gimple_operand_bsi (&si
, val
, true, NULL_TREE
,
2288 false, BSI_CONTINUE_LINKING
);
2292 t
= OMP_PARALLEL_DATA_ARG (entry_stmt
);
2294 t1
= null_pointer_node
;
2296 t1
= build_fold_addr_expr (t
);
2297 t2
= build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt
));
2301 tree args
= tree_cons (NULL
, t2
,
2302 tree_cons (NULL
, t1
,
2303 tree_cons (NULL
, val
, ws_args
)));
2304 t
= build_function_call_expr (built_in_decls
[start_ix
], args
);
2307 t
= build_call_expr (built_in_decls
[start_ix
], 3, t2
, t1
, val
);
2309 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2310 false, BSI_CONTINUE_LINKING
);
2312 t
= OMP_PARALLEL_DATA_ARG (entry_stmt
);
2314 t
= null_pointer_node
;
2316 t
= build_fold_addr_expr (t
);
2317 t
= build_call_expr (OMP_PARALLEL_FN (entry_stmt
), 1, t
);
2318 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2319 false, BSI_CONTINUE_LINKING
);
2321 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_PARALLEL_END
], 0);
2322 force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2323 false, BSI_CONTINUE_LINKING
);
2327 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2328 handler. This prevents programs from violating the structured
2329 block semantics with throws. */
2332 maybe_catch_exception (tree
*stmt_p
)
2336 if (!flag_exceptions
)
2339 if (lang_protect_cleanup_actions
)
2340 t
= lang_protect_cleanup_actions ();
2342 t
= build_call_expr (built_in_decls
[BUILT_IN_TRAP
], 0);
2343 f
= build2 (EH_FILTER_EXPR
, void_type_node
, NULL
, NULL
);
2344 EH_FILTER_MUST_NOT_THROW (f
) = 1;
2345 gimplify_and_add (t
, &EH_FILTER_FAILURE (f
));
2347 t
= build2 (TRY_CATCH_EXPR
, void_type_node
, *stmt_p
, NULL
);
2348 append_to_statement_list (f
, &TREE_OPERAND (t
, 1));
2351 append_to_statement_list (t
, stmt_p
);
2354 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2357 list2chain (tree list
)
2361 for (t
= list
; t
; t
= TREE_CHAIN (t
))
2363 tree var
= TREE_VALUE (t
);
2365 TREE_CHAIN (var
) = TREE_VALUE (TREE_CHAIN (t
));
2367 TREE_CHAIN (var
) = NULL_TREE
;
2370 return list
? TREE_VALUE (list
) : NULL_TREE
;
2374 /* Remove barriers in REGION->EXIT's block. Note that this is only
2375 valid for OMP_PARALLEL regions. Since the end of a parallel region
2376 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2377 left a barrier at the end of the OMP_PARALLEL region can now be
2381 remove_exit_barrier (struct omp_region
*region
)
2383 block_stmt_iterator si
;
2384 basic_block exit_bb
;
2389 exit_bb
= region
->exit
;
2391 /* If the parallel region doesn't return, we don't have REGION->EXIT
2396 /* The last insn in the block will be the parallel's OMP_RETURN. The
2397 workshare's OMP_RETURN will be in a preceding block. The kinds of
2398 statements that can appear in between are extremely limited -- no
2399 memory operations at all. Here, we allow nothing at all, so the
2400 only thing we allow to precede this OMP_RETURN is a label. */
2401 si
= bsi_last (exit_bb
);
2402 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
2404 if (!bsi_end_p (si
) && TREE_CODE (bsi_stmt (si
)) != LABEL_EXPR
)
2407 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
2409 si
= bsi_last (e
->src
);
2413 if (TREE_CODE (t
) == OMP_RETURN
)
2414 OMP_RETURN_NOWAIT (t
) = 1;
2419 remove_exit_barriers (struct omp_region
*region
)
2421 if (region
->type
== OMP_PARALLEL
)
2422 remove_exit_barrier (region
);
2426 region
= region
->inner
;
2427 remove_exit_barriers (region
);
2428 while (region
->next
)
2430 region
= region
->next
;
2431 remove_exit_barriers (region
);
2436 /* Expand the OpenMP parallel directive starting at REGION. */
2439 expand_omp_parallel (struct omp_region
*region
)
2441 basic_block entry_bb
, exit_bb
, new_bb
;
2442 struct function
*child_cfun
;
2443 tree child_fn
, block
, t
, ws_args
;
2444 block_stmt_iterator si
;
2448 entry_stmt
= last_stmt (region
->entry
);
2449 child_fn
= OMP_PARALLEL_FN (entry_stmt
);
2450 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
2452 entry_bb
= region
->entry
;
2453 exit_bb
= region
->exit
;
2455 if (is_combined_parallel (region
))
2456 ws_args
= region
->ws_args
;
2458 ws_args
= NULL_TREE
;
2460 if (child_cfun
->cfg
)
2462 /* Due to inlining, it may happen that we have already outlined
2463 the region, in which case all we need to do is make the
2464 sub-graph unreachable and emit the parallel call. */
2465 edge entry_succ_e
, exit_succ_e
;
2466 block_stmt_iterator si
;
2468 entry_succ_e
= single_succ_edge (entry_bb
);
2470 si
= bsi_last (entry_bb
);
2471 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_PARALLEL
);
2472 bsi_remove (&si
, true);
2477 exit_succ_e
= single_succ_edge (exit_bb
);
2478 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
2480 remove_edge_and_dominated_blocks (entry_succ_e
);
2484 /* If the parallel region needs data sent from the parent
2485 function, then the very first statement (except possible
2486 tree profile counter updates) of the parallel body
2487 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2488 &.OMP_DATA_O is passed as an argument to the child function,
2489 we need to replace it with the argument as seen by the child
2492 In most cases, this will end up being the identity assignment
2493 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2494 a function call that has been inlined, the original PARM_DECL
2495 .OMP_DATA_I may have been converted into a different local
2496 variable. In which case, we need to keep the assignment. */
2497 if (OMP_PARALLEL_DATA_ARG (entry_stmt
))
2499 basic_block entry_succ_bb
= single_succ (entry_bb
);
2500 block_stmt_iterator si
;
2501 tree parcopy_stmt
= NULL_TREE
, arg
, narg
;
2503 for (si
= bsi_start (entry_succ_bb
); ; bsi_next (&si
))
2507 gcc_assert (!bsi_end_p (si
));
2508 stmt
= bsi_stmt (si
);
2509 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
2512 arg
= GIMPLE_STMT_OPERAND (stmt
, 1);
2514 if (TREE_CODE (arg
) == ADDR_EXPR
2515 && TREE_OPERAND (arg
, 0)
2516 == OMP_PARALLEL_DATA_ARG (entry_stmt
))
2518 parcopy_stmt
= stmt
;
2523 gcc_assert (parcopy_stmt
!= NULL_TREE
);
2524 arg
= DECL_ARGUMENTS (child_fn
);
2526 if (!gimple_in_ssa_p (cfun
))
2528 if (GIMPLE_STMT_OPERAND (parcopy_stmt
, 0) == arg
)
2529 bsi_remove (&si
, true);
2531 GIMPLE_STMT_OPERAND (parcopy_stmt
, 1) = arg
;
2535 /* If we are in ssa form, we must load the value from the default
2536 definition of the argument. That should not be defined now,
2537 since the argument is not used uninitialized. */
2538 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
2539 narg
= make_ssa_name (arg
, build_empty_stmt ());
2540 set_default_def (arg
, narg
);
2541 GIMPLE_STMT_OPERAND (parcopy_stmt
, 1) = narg
;
2542 update_stmt (parcopy_stmt
);
2546 /* Declare local variables needed in CHILD_CFUN. */
2547 block
= DECL_INITIAL (child_fn
);
2548 BLOCK_VARS (block
) = list2chain (child_cfun
->unexpanded_var_list
);
2549 DECL_SAVED_TREE (child_fn
) = bb_stmt_list (single_succ (entry_bb
));
2551 /* Reset DECL_CONTEXT on function arguments. */
2552 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= TREE_CHAIN (t
))
2553 DECL_CONTEXT (t
) = child_fn
;
2555 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2557 si
= bsi_last (entry_bb
);
2559 gcc_assert (t
&& TREE_CODE (t
) == OMP_PARALLEL
);
2560 bsi_remove (&si
, true);
2561 e
= split_block (entry_bb
, t
);
2563 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
2565 /* Convert OMP_RETURN into a RETURN_EXPR. */
2568 si
= bsi_last (exit_bb
);
2569 gcc_assert (!bsi_end_p (si
)
2570 && TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
2571 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
2572 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2573 bsi_remove (&si
, true);
2576 /* Move the parallel region into CHILD_CFUN. */
2578 if (gimple_in_ssa_p (cfun
))
2580 push_cfun (child_cfun
);
2582 init_ssa_operands ();
2583 cfun
->gimple_df
->in_ssa_p
= true;
2586 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
);
2588 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
2590 /* Inform the callgraph about the new function. */
2591 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
2592 = cfun
->curr_properties
;
2593 cgraph_add_new_function (child_fn
, true);
2595 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2596 fixed in a following pass. */
2597 push_cfun (child_cfun
);
2598 rebuild_cgraph_edges ();
2602 /* Emit a library call to launch the children threads. */
2603 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
2604 update_ssa (TODO_update_ssa_only_virtuals
);
2608 /* A subroutine of expand_omp_for. Generate code for a parallel
2609 loop with any schedule. Given parameters:
2611 for (V = N1; V cond N2; V += STEP) BODY;
2613 where COND is "<" or ">", we generate pseudocode
2615 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2616 if (more) goto L0; else goto L3;
2623 if (V cond iend) goto L1; else goto L2;
2625 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2628 If this is a combined omp parallel loop, instead of the call to
2629 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2632 expand_omp_for_generic (struct omp_region
*region
,
2633 struct omp_for_data
*fd
,
2634 enum built_in_function start_fn
,
2635 enum built_in_function next_fn
)
2637 tree type
, istart0
, iend0
, iend
, phi
;
2638 tree t
, vmain
, vback
;
2639 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
;
2640 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
2641 block_stmt_iterator si
;
2642 bool in_combined_parallel
= is_combined_parallel (region
);
2643 bool broken_loop
= region
->cont
== NULL
;
2646 gcc_assert (!broken_loop
|| !in_combined_parallel
);
2648 type
= TREE_TYPE (fd
->v
);
2650 istart0
= create_tmp_var (long_integer_type_node
, ".istart0");
2651 iend0
= create_tmp_var (long_integer_type_node
, ".iend0");
2652 TREE_ADDRESSABLE (istart0
) = 1;
2653 TREE_ADDRESSABLE (iend0
) = 1;
2654 if (gimple_in_ssa_p (cfun
))
2656 add_referenced_var (istart0
);
2657 add_referenced_var (iend0
);
2660 entry_bb
= region
->entry
;
2661 cont_bb
= region
->cont
;
2662 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
2663 gcc_assert (broken_loop
2664 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
2665 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
2666 l1_bb
= single_succ (l0_bb
);
2669 l2_bb
= create_empty_bb (cont_bb
);
2670 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
2671 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
2675 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
2676 exit_bb
= region
->exit
;
2678 si
= bsi_last (entry_bb
);
2679 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
2680 if (in_combined_parallel
)
2682 /* In a combined parallel loop, emit a call to
2683 GOMP_loop_foo_next. */
2684 t
= build_call_expr (built_in_decls
[next_fn
], 2,
2685 build_fold_addr_expr (istart0
),
2686 build_fold_addr_expr (iend0
));
2690 tree t0
, t1
, t2
, t3
, t4
;
2691 /* If this is not a combined parallel loop, emit a call to
2692 GOMP_loop_foo_start in ENTRY_BB. */
2693 t4
= build_fold_addr_expr (iend0
);
2694 t3
= build_fold_addr_expr (istart0
);
2695 t2
= fold_convert (long_integer_type_node
, fd
->step
);
2696 t1
= fold_convert (long_integer_type_node
, fd
->n2
);
2697 t0
= fold_convert (long_integer_type_node
, fd
->n1
);
2700 t
= fold_convert (long_integer_type_node
, fd
->chunk_size
);
2701 t
= build_call_expr (built_in_decls
[start_fn
], 6,
2702 t0
, t1
, t2
, t
, t3
, t4
);
2705 t
= build_call_expr (built_in_decls
[start_fn
], 5,
2706 t0
, t1
, t2
, t3
, t4
);
2708 t
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2709 true, BSI_SAME_STMT
);
2710 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2711 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2713 /* V may be used outside of the loop (e.g., to handle lastprivate clause).
2714 If this is the case, its value is undefined if the loop is not entered
2715 at all. To handle this case, set its initial value to N1. */
2716 if (gimple_in_ssa_p (cfun
))
2718 e
= find_edge (entry_bb
, l3_bb
);
2719 for (phi
= phi_nodes (l3_bb
); phi
; phi
= PHI_CHAIN (phi
))
2720 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == fd
->v
)
2721 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), fd
->n1
);
2725 t
= build_gimple_modify_stmt (fd
->v
, fd
->n1
);
2726 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2729 /* Remove the OMP_FOR statement. */
2730 bsi_remove (&si
, true);
2732 /* Iteration setup for sequential loop goes in L0_BB. */
2733 si
= bsi_start (l0_bb
);
2734 t
= fold_convert (type
, istart0
);
2735 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2736 false, BSI_CONTINUE_LINKING
);
2737 t
= build_gimple_modify_stmt (fd
->v
, t
);
2738 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2739 if (gimple_in_ssa_p (cfun
))
2740 SSA_NAME_DEF_STMT (fd
->v
) = t
;
2742 t
= fold_convert (type
, iend0
);
2743 iend
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2744 false, BSI_CONTINUE_LINKING
);
2748 /* Code to control the increment and predicate for the sequential
2749 loop goes in the CONT_BB. */
2750 si
= bsi_last (cont_bb
);
2752 gcc_assert (TREE_CODE (t
) == OMP_CONTINUE
);
2753 vmain
= TREE_OPERAND (t
, 1);
2754 vback
= TREE_OPERAND (t
, 0);
2756 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->step
);
2757 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2758 true, BSI_SAME_STMT
);
2759 t
= build_gimple_modify_stmt (vback
, t
);
2760 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2761 if (gimple_in_ssa_p (cfun
))
2762 SSA_NAME_DEF_STMT (vback
) = t
;
2764 t
= build2 (fd
->cond_code
, boolean_type_node
, vback
, iend
);
2765 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2766 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2768 /* Remove OMP_CONTINUE. */
2769 bsi_remove (&si
, true);
2771 /* Emit code to get the next parallel iteration in L2_BB. */
2772 si
= bsi_start (l2_bb
);
2774 t
= build_call_expr (built_in_decls
[next_fn
], 2,
2775 build_fold_addr_expr (istart0
),
2776 build_fold_addr_expr (iend0
));
2777 t
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2778 false, BSI_CONTINUE_LINKING
);
2779 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2780 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
2783 /* Add the loop cleanup function. */
2784 si
= bsi_last (exit_bb
);
2785 if (OMP_RETURN_NOWAIT (bsi_stmt (si
)))
2786 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END_NOWAIT
];
2788 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END
];
2789 t
= build_call_expr (t
, 0);
2790 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
2791 bsi_remove (&si
, true);
2793 /* Connect the new blocks. */
2794 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
2795 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
2799 e
= find_edge (cont_bb
, l3_bb
);
2800 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
2802 for (phi
= phi_nodes (l3_bb
); phi
; phi
= PHI_CHAIN (phi
))
2803 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
2804 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
2807 find_edge (cont_bb
, l1_bb
)->flags
= EDGE_TRUE_VALUE
;
2808 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
2809 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
2811 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
2812 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
2813 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
2814 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
2815 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
2816 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
2817 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
2818 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
2823 /* A subroutine of expand_omp_for. Generate code for a parallel
2824 loop with static schedule and no specified chunk size. Given
2827 for (V = N1; V cond N2; V += STEP) BODY;
2829 where COND is "<" or ">", we generate pseudocode
2835 n = (adj + N2 - N1) / STEP;
2837 q += (q * nthreads != n);
2839 e0 = min(s0 + q, n);
2841 if (s0 >= e0) goto L2; else goto L0;
2847 if (V cond e) goto L1;
2852 expand_omp_for_static_nochunk (struct omp_region
*region
,
2853 struct omp_for_data
*fd
)
2855 tree n
, q
, s0
, e0
, e
, t
, nthreads
, threadid
;
2856 tree type
, vmain
, vback
;
2857 basic_block entry_bb
, exit_bb
, seq_start_bb
, body_bb
, cont_bb
;
2859 block_stmt_iterator si
;
2861 type
= TREE_TYPE (fd
->v
);
2863 entry_bb
= region
->entry
;
2864 cont_bb
= region
->cont
;
2865 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
2866 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
2867 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
2868 body_bb
= single_succ (seq_start_bb
);
2869 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
2870 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
2871 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
2872 exit_bb
= region
->exit
;
2874 /* Iteration space partitioning goes in ENTRY_BB. */
2875 si
= bsi_last (entry_bb
);
2876 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
2878 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
2879 t
= fold_convert (type
, t
);
2880 nthreads
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2881 true, BSI_SAME_STMT
);
2883 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
2884 t
= fold_convert (type
, t
);
2885 threadid
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2886 true, BSI_SAME_STMT
);
2888 fd
->n1
= force_gimple_operand_bsi (&si
,
2889 fold_convert (type
, fd
->n1
),
2891 true, BSI_SAME_STMT
);
2893 fd
->n2
= force_gimple_operand_bsi (&si
,
2894 fold_convert (type
, fd
->n2
),
2896 true, BSI_SAME_STMT
);
2898 fd
->step
= force_gimple_operand_bsi (&si
,
2899 fold_convert (type
, fd
->step
),
2901 true, BSI_SAME_STMT
);
2903 t
= build_int_cst (type
, (fd
->cond_code
== LT_EXPR
? -1 : 1));
2904 t
= fold_build2 (PLUS_EXPR
, type
, fd
->step
, t
);
2905 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n2
);
2906 t
= fold_build2 (MINUS_EXPR
, type
, t
, fd
->n1
);
2907 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, t
, fd
->step
);
2908 t
= fold_convert (type
, t
);
2909 n
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2911 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, n
, nthreads
);
2912 q
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2914 t
= fold_build2 (MULT_EXPR
, type
, q
, nthreads
);
2915 t
= fold_build2 (NE_EXPR
, type
, t
, n
);
2916 t
= fold_build2 (PLUS_EXPR
, type
, q
, t
);
2917 q
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2919 t
= build2 (MULT_EXPR
, type
, q
, threadid
);
2920 s0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2922 t
= fold_build2 (PLUS_EXPR
, type
, s0
, q
);
2923 t
= fold_build2 (MIN_EXPR
, type
, t
, n
);
2924 e0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
2926 t
= fold_convert (type
, s0
);
2927 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
2928 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
2929 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2930 true, BSI_SAME_STMT
);
2931 t
= build_gimple_modify_stmt (fd
->v
, t
);
2932 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2933 if (gimple_in_ssa_p (cfun
))
2934 SSA_NAME_DEF_STMT (fd
->v
) = t
;
2936 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
2937 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2938 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2940 /* Remove the OMP_FOR statement. */
2941 bsi_remove (&si
, true);
2943 /* Setup code for sequential iteration goes in SEQ_START_BB. */
2944 si
= bsi_start (seq_start_bb
);
2946 t
= fold_convert (type
, e0
);
2947 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
2948 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
2949 e
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
2950 false, BSI_CONTINUE_LINKING
);
2952 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
2953 si
= bsi_last (cont_bb
);
2955 gcc_assert (TREE_CODE (t
) == OMP_CONTINUE
);
2956 vmain
= TREE_OPERAND (t
, 1);
2957 vback
= TREE_OPERAND (t
, 0);
2959 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->step
);
2960 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
2961 true, BSI_SAME_STMT
);
2962 t
= build_gimple_modify_stmt (vback
, t
);
2963 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2964 if (gimple_in_ssa_p (cfun
))
2965 SSA_NAME_DEF_STMT (vback
) = t
;
2967 t
= build2 (fd
->cond_code
, boolean_type_node
, vback
, e
);
2968 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
2969 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
2971 /* Remove the OMP_CONTINUE statement. */
2972 bsi_remove (&si
, true);
2974 /* Replace the OMP_RETURN with a barrier, or nothing. */
2975 si
= bsi_last (exit_bb
);
2976 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)))
2977 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
2978 false, BSI_SAME_STMT
);
2979 bsi_remove (&si
, true);
2981 /* Connect all the blocks. */
2982 find_edge (entry_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
2983 find_edge (entry_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
2985 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
2986 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
2988 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, entry_bb
);
2989 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
2990 recompute_dominator (CDI_DOMINATORS
, body_bb
));
2991 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
2992 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
2996 /* A subroutine of expand_omp_for. Generate code for a parallel
2997 loop with static schedule and a specified chunk size. Given
3000 for (V = N1; V cond N2; V += STEP) BODY;
3002 where COND is "<" or ">", we generate pseudocode
3008 n = (adj + N2 - N1) / STEP;
3010 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3011 here so that V is defined
3012 if the loop is not entered
3014 s0 = (trip * nthreads + threadid) * CHUNK;
3015 e0 = min(s0 + CHUNK, n);
3016 if (s0 < n) goto L1; else goto L4;
3023 if (V cond e) goto L2; else goto L3;
3031 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
3033 tree n
, s0
, e0
, e
, t
, phi
, nphi
, args
;
3034 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
3035 tree type
, cont
, v_main
, v_back
, v_extra
;
3036 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
3037 basic_block trip_update_bb
, cont_bb
, fin_bb
;
3038 block_stmt_iterator si
;
3041 type
= TREE_TYPE (fd
->v
);
3043 entry_bb
= region
->entry
;
3044 se
= split_block (entry_bb
, last_stmt (entry_bb
));
3046 iter_part_bb
= se
->dest
;
3047 cont_bb
= region
->cont
;
3048 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
3049 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
3050 == FALLTHRU_EDGE (cont_bb
)->dest
);
3051 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
3052 body_bb
= single_succ (seq_start_bb
);
3053 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
3054 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3055 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
3056 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
3057 exit_bb
= region
->exit
;
3059 /* Trip and adjustment setup goes in ENTRY_BB. */
3060 si
= bsi_last (entry_bb
);
3061 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_FOR
);
3063 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
3064 t
= fold_convert (type
, t
);
3065 nthreads
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3066 true, BSI_SAME_STMT
);
3068 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
3069 t
= fold_convert (type
, t
);
3070 threadid
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3071 true, BSI_SAME_STMT
);
3073 fd
->n1
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->n1
),
3075 true, BSI_SAME_STMT
);
3076 fd
->n2
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->n2
),
3078 true, BSI_SAME_STMT
);
3079 fd
->step
= force_gimple_operand_bsi (&si
, fold_convert (type
, fd
->step
),
3081 true, BSI_SAME_STMT
);
3083 = force_gimple_operand_bsi (&si
, fold_convert (type
,
3086 true, BSI_SAME_STMT
);
3088 t
= build_int_cst (type
, (fd
->cond_code
== LT_EXPR
? -1 : 1));
3089 t
= fold_build2 (PLUS_EXPR
, type
, fd
->step
, t
);
3090 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n2
);
3091 t
= fold_build2 (MINUS_EXPR
, type
, t
, fd
->n1
);
3092 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, t
, fd
->step
);
3093 t
= fold_convert (type
, t
);
3094 n
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3095 true, BSI_SAME_STMT
);
3097 trip_var
= create_tmp_var (type
, ".trip");
3098 if (gimple_in_ssa_p (cfun
))
3100 add_referenced_var (trip_var
);
3101 trip_init
= make_ssa_name (trip_var
, NULL_TREE
);
3102 trip_main
= make_ssa_name (trip_var
, NULL_TREE
);
3103 trip_back
= make_ssa_name (trip_var
, NULL_TREE
);
3107 trip_init
= trip_var
;
3108 trip_main
= trip_var
;
3109 trip_back
= trip_var
;
3112 t
= build_gimple_modify_stmt (trip_init
, build_int_cst (type
, 0));
3113 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3114 if (gimple_in_ssa_p (cfun
))
3115 SSA_NAME_DEF_STMT (trip_init
) = t
;
3117 t
= fold_build2 (MULT_EXPR
, type
, threadid
, fd
->chunk_size
);
3118 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3119 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3120 v_extra
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3121 true, BSI_SAME_STMT
);
3123 /* Remove the OMP_FOR. */
3124 bsi_remove (&si
, true);
3126 /* Iteration space partitioning goes in ITER_PART_BB. */
3127 si
= bsi_last (iter_part_bb
);
3129 t
= fold_build2 (MULT_EXPR
, type
, trip_main
, nthreads
);
3130 t
= fold_build2 (PLUS_EXPR
, type
, t
, threadid
);
3131 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->chunk_size
);
3132 s0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3133 false, BSI_CONTINUE_LINKING
);
3135 t
= fold_build2 (PLUS_EXPR
, type
, s0
, fd
->chunk_size
);
3136 t
= fold_build2 (MIN_EXPR
, type
, t
, n
);
3137 e0
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3138 false, BSI_CONTINUE_LINKING
);
3140 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
3141 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3142 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3144 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3145 si
= bsi_start (seq_start_bb
);
3147 t
= fold_convert (type
, s0
);
3148 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3149 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3150 t
= force_gimple_operand_bsi (&si
, t
, false, NULL_TREE
,
3151 false, BSI_CONTINUE_LINKING
);
3152 t
= build_gimple_modify_stmt (fd
->v
, t
);
3153 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3154 if (gimple_in_ssa_p (cfun
))
3155 SSA_NAME_DEF_STMT (fd
->v
) = t
;
3157 t
= fold_convert (type
, e0
);
3158 t
= fold_build2 (MULT_EXPR
, type
, t
, fd
->step
);
3159 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->n1
);
3160 e
= force_gimple_operand_bsi (&si
, t
, true, NULL_TREE
,
3161 false, BSI_CONTINUE_LINKING
);
3163 /* The code controlling the sequential loop goes in CONT_BB,
3164 replacing the OMP_CONTINUE. */
3165 si
= bsi_last (cont_bb
);
3166 cont
= bsi_stmt (si
);
3167 gcc_assert (TREE_CODE (cont
) == OMP_CONTINUE
);
3168 v_main
= TREE_OPERAND (cont
, 1);
3169 v_back
= TREE_OPERAND (cont
, 0);
3171 t
= build2 (PLUS_EXPR
, type
, v_main
, fd
->step
);
3172 t
= build_gimple_modify_stmt (v_back
, t
);
3173 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3174 if (gimple_in_ssa_p (cfun
))
3175 SSA_NAME_DEF_STMT (v_back
) = t
;
3177 t
= build2 (fd
->cond_code
, boolean_type_node
, v_back
, e
);
3178 t
= build3 (COND_EXPR
, void_type_node
, t
, NULL_TREE
, NULL_TREE
);
3179 bsi_insert_before (&si
, t
, BSI_SAME_STMT
);
3181 /* Remove OMP_CONTINUE. */
3182 bsi_remove (&si
, true);
3184 /* Trip update code goes into TRIP_UPDATE_BB. */
3185 si
= bsi_start (trip_update_bb
);
3187 t
= build_int_cst (type
, 1);
3188 t
= build2 (PLUS_EXPR
, type
, trip_main
, t
);
3189 t
= build_gimple_modify_stmt (trip_back
, t
);
3190 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3191 if (gimple_in_ssa_p (cfun
))
3192 SSA_NAME_DEF_STMT (trip_back
) = t
;
3194 /* Replace the OMP_RETURN with a barrier, or nothing. */
3195 si
= bsi_last (exit_bb
);
3196 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)))
3197 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
3198 false, BSI_SAME_STMT
);
3199 bsi_remove (&si
, true);
3201 /* Connect the new blocks. */
3202 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
3203 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
3205 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
3206 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
3208 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
3210 if (gimple_in_ssa_p (cfun
))
3212 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3213 remove arguments of the phi nodes in fin_bb. We need to create
3214 appropriate phi nodes in iter_part_bb instead. */
3215 se
= single_pred_edge (fin_bb
);
3216 re
= single_succ_edge (trip_update_bb
);
3217 ene
= single_succ_edge (entry_bb
);
3219 args
= PENDING_STMT (re
);
3220 PENDING_STMT (re
) = NULL_TREE
;
3221 for (phi
= phi_nodes (fin_bb
);
3223 phi
= PHI_CHAIN (phi
), args
= TREE_CHAIN (args
))
3225 t
= PHI_RESULT (phi
);
3226 gcc_assert (t
== TREE_PURPOSE (args
));
3227 nphi
= create_phi_node (t
, iter_part_bb
);
3228 SSA_NAME_DEF_STMT (t
) = nphi
;
3230 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
3231 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3232 need to use v_extra instead. */
3235 add_phi_arg (nphi
, t
, ene
);
3236 add_phi_arg (nphi
, TREE_VALUE (args
), re
);
3238 gcc_assert (!phi
&& !args
);
3239 while ((phi
= phi_nodes (fin_bb
)) != NULL_TREE
)
3240 remove_phi_node (phi
, NULL_TREE
, false);
3242 /* Make phi node for trip. */
3243 phi
= create_phi_node (trip_main
, iter_part_bb
);
3244 SSA_NAME_DEF_STMT (trip_main
) = phi
;
3245 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
));
3246 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
));
3249 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
3250 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
3251 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
3252 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
3253 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
3254 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
3255 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
3256 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
3257 recompute_dominator (CDI_DOMINATORS
, body_bb
));
3261 /* Expand the OpenMP loop defined by REGION. */
3264 expand_omp_for (struct omp_region
*region
)
3266 struct omp_for_data fd
;
3268 extract_omp_for_data (last_stmt (region
->entry
), &fd
);
3269 region
->sched_kind
= fd
.sched_kind
;
3271 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
3273 && region
->cont
!= NULL
)
3275 if (fd
.chunk_size
== NULL
)
3276 expand_omp_for_static_nochunk (region
, &fd
);
3278 expand_omp_for_static_chunk (region
, &fd
);
3282 int fn_index
= fd
.sched_kind
+ fd
.have_ordered
* 4;
3283 int start_ix
= BUILT_IN_GOMP_LOOP_STATIC_START
+ fn_index
;
3284 int next_ix
= BUILT_IN_GOMP_LOOP_STATIC_NEXT
+ fn_index
;
3285 expand_omp_for_generic (region
, &fd
, start_ix
, next_ix
);
3288 update_ssa (TODO_update_ssa_only_virtuals
);
3292 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3294 v = GOMP_sections_start (n);
3311 v = GOMP_sections_next ();
3316 If this is a combined parallel sections, replace the call to
3317 GOMP_sections_start with call to GOMP_sections_next. */
3320 expand_omp_sections (struct omp_region
*region
)
3322 tree label_vec
, l1
, l2
, t
, u
, sections_stmt
, vin
, vmain
, vnext
, cont
;
3323 unsigned i
, casei
, len
;
3324 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
3325 block_stmt_iterator si
;
3326 struct omp_region
*inner
;
3327 bool exit_reachable
= region
->cont
!= NULL
;
3329 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
3330 entry_bb
= region
->entry
;
3331 l0_bb
= single_succ (entry_bb
);
3332 l1_bb
= region
->cont
;
3333 l2_bb
= region
->exit
;
3336 gcc_assert (single_pred (l2_bb
) == l0_bb
);
3337 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
3338 l1
= tree_block_label (l1_bb
);
3339 l2
= tree_block_label (l2_bb
);
3343 default_bb
= create_empty_bb (l0_bb
);
3345 l2
= tree_block_label (default_bb
);
3348 /* We will build a switch() with enough cases for all the
3349 OMP_SECTION regions, a '0' case to handle the end of more work
3350 and a default case to abort if something goes wrong. */
3351 len
= EDGE_COUNT (l0_bb
->succs
);
3352 label_vec
= make_tree_vec (len
+ 1);
3354 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3355 OMP_SECTIONS statement. */
3356 si
= bsi_last (entry_bb
);
3357 sections_stmt
= bsi_stmt (si
);
3358 gcc_assert (TREE_CODE (sections_stmt
) == OMP_SECTIONS
);
3359 vin
= OMP_SECTIONS_CONTROL (sections_stmt
);
3360 if (!is_combined_parallel (region
))
3362 /* If we are not inside a combined parallel+sections region,
3363 call GOMP_sections_start. */
3364 t
= build_int_cst (unsigned_type_node
,
3365 exit_reachable
? len
- 1 : len
);
3366 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_START
];
3367 t
= build_call_expr (u
, 1, t
);
3371 /* Otherwise, call GOMP_sections_next. */
3372 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
];
3373 t
= build_call_expr (u
, 0);
3375 t
= build_gimple_modify_stmt (vin
, t
);
3376 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3377 if (gimple_in_ssa_p (cfun
))
3378 SSA_NAME_DEF_STMT (vin
) = t
;
3379 bsi_remove (&si
, true);
3381 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3382 si
= bsi_last (l0_bb
);
3383 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SECTIONS_SWITCH
);
3386 cont
= last_stmt (l1_bb
);
3387 gcc_assert (TREE_CODE (cont
) == OMP_CONTINUE
);
3388 vmain
= TREE_OPERAND (cont
, 1);
3389 vnext
= TREE_OPERAND (cont
, 0);
3397 t
= build3 (SWITCH_EXPR
, void_type_node
, vmain
, NULL
, label_vec
);
3398 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3399 bsi_remove (&si
, true);
3404 t
= build3 (CASE_LABEL_EXPR
, void_type_node
,
3405 build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
3406 TREE_VEC_ELT (label_vec
, 0) = t
;
3410 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3411 for (inner
= region
->inner
, casei
= 1;
3413 inner
= inner
->next
, i
++, casei
++)
3415 basic_block s_entry_bb
, s_exit_bb
;
3417 s_entry_bb
= inner
->entry
;
3418 s_exit_bb
= inner
->exit
;
3420 t
= tree_block_label (s_entry_bb
);
3421 u
= build_int_cst (unsigned_type_node
, casei
);
3422 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, u
, NULL
, t
);
3423 TREE_VEC_ELT (label_vec
, i
) = u
;
3425 si
= bsi_last (s_entry_bb
);
3426 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SECTION
);
3427 gcc_assert (i
< len
|| OMP_SECTION_LAST (bsi_stmt (si
)));
3428 bsi_remove (&si
, true);
3429 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
3431 if (s_exit_bb
== NULL
)
3434 si
= bsi_last (s_exit_bb
);
3435 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
3436 bsi_remove (&si
, true);
3438 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
3441 /* Error handling code goes in DEFAULT_BB. */
3442 t
= tree_block_label (default_bb
);
3443 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, NULL
, NULL
, t
);
3444 TREE_VEC_ELT (label_vec
, len
) = u
;
3445 make_edge (l0_bb
, default_bb
, 0);
3447 si
= bsi_start (default_bb
);
3448 t
= build_call_expr (built_in_decls
[BUILT_IN_TRAP
], 0);
3449 bsi_insert_after (&si
, t
, BSI_CONTINUE_LINKING
);
3453 /* Code to get the next section goes in L1_BB. */
3454 si
= bsi_last (l1_bb
);
3455 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_CONTINUE
);
3457 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
], 0);
3458 t
= build_gimple_modify_stmt (vnext
, t
);
3459 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3460 if (gimple_in_ssa_p (cfun
))
3461 SSA_NAME_DEF_STMT (vnext
) = t
;
3462 bsi_remove (&si
, true);
3464 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
3466 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3467 si
= bsi_last (l2_bb
);
3468 if (OMP_RETURN_NOWAIT (bsi_stmt (si
)))
3469 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END_NOWAIT
];
3471 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END
];
3472 t
= build_call_expr (t
, 0);
3473 bsi_insert_after (&si
, t
, BSI_SAME_STMT
);
3474 bsi_remove (&si
, true);
3477 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
3481 /* Expand code for an OpenMP single directive. We've already expanded
3482 much of the code, here we simply place the GOMP_barrier call. */
3485 expand_omp_single (struct omp_region
*region
)
3487 basic_block entry_bb
, exit_bb
;
3488 block_stmt_iterator si
;
3489 bool need_barrier
= false;
3491 entry_bb
= region
->entry
;
3492 exit_bb
= region
->exit
;
3494 si
= bsi_last (entry_bb
);
3495 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3496 be removed. We need to ensure that the thread that entered the single
3497 does not exit before the data is copied out by the other threads. */
3498 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si
)),
3499 OMP_CLAUSE_COPYPRIVATE
))
3500 need_barrier
= true;
3501 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SINGLE
);
3502 bsi_remove (&si
, true);
3503 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3505 si
= bsi_last (exit_bb
);
3506 if (!OMP_RETURN_NOWAIT (bsi_stmt (si
)) || need_barrier
)
3507 force_gimple_operand_bsi (&si
, build_omp_barrier (), false, NULL_TREE
,
3508 false, BSI_SAME_STMT
);
3509 bsi_remove (&si
, true);
3510 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
3514 /* Generic expansion for OpenMP synchronization directives: master,
3515 ordered and critical. All we need to do here is remove the entry
3516 and exit markers for REGION. */
3519 expand_omp_synch (struct omp_region
*region
)
3521 basic_block entry_bb
, exit_bb
;
3522 block_stmt_iterator si
;
3524 entry_bb
= region
->entry
;
3525 exit_bb
= region
->exit
;
3527 si
= bsi_last (entry_bb
);
3528 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_SINGLE
3529 || TREE_CODE (bsi_stmt (si
)) == OMP_MASTER
3530 || TREE_CODE (bsi_stmt (si
)) == OMP_ORDERED
3531 || TREE_CODE (bsi_stmt (si
)) == OMP_CRITICAL
);
3532 bsi_remove (&si
, true);
3533 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3537 si
= bsi_last (exit_bb
);
3538 gcc_assert (TREE_CODE (bsi_stmt (si
)) == OMP_RETURN
);
3539 bsi_remove (&si
, true);
3540 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
3544 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3545 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3546 size of the data type, and thus usable to find the index of the builtin
3547 decl. Returns false if the expression is not of the proper form. */
3550 expand_omp_atomic_fetch_op (basic_block load_bb
,
3551 tree addr
, tree loaded_val
,
3552 tree stored_val
, int index
)
3554 enum built_in_function base
;
3555 tree decl
, itype
, call
;
3556 enum insn_code
*optab
;
3558 basic_block store_bb
= single_succ (load_bb
);
3559 block_stmt_iterator bsi
;
3562 /* We expect to find the following sequences:
3565 OMP_ATOMIC_LOAD (tmp, mem)
3568 val = tmp OP something; (or: something OP tmp)
3571 ???FIXME: Allow a more flexible sequence.
3572 Perhaps use data flow to pick the statements.
3576 bsi
= bsi_after_labels (store_bb
);
3577 stmt
= bsi_stmt (bsi
);
3578 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
3581 if (TREE_CODE (bsi_stmt (bsi
)) != OMP_ATOMIC_STORE
)
3584 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt
, 0), stored_val
, 0))
3587 rhs
= GIMPLE_STMT_OPERAND (stmt
, 1);
3589 /* Check for one of the supported fetch-op operations. */
3590 switch (TREE_CODE (rhs
))
3593 case POINTER_PLUS_EXPR
:
3594 base
= BUILT_IN_FETCH_AND_ADD_N
;
3595 optab
= sync_add_optab
;
3598 base
= BUILT_IN_FETCH_AND_SUB_N
;
3599 optab
= sync_add_optab
;
3602 base
= BUILT_IN_FETCH_AND_AND_N
;
3603 optab
= sync_and_optab
;
3606 base
= BUILT_IN_FETCH_AND_OR_N
;
3607 optab
= sync_ior_optab
;
3610 base
= BUILT_IN_FETCH_AND_XOR_N
;
3611 optab
= sync_xor_optab
;
3616 /* Make sure the expression is of the proper form. */
3617 if (operand_equal_p (TREE_OPERAND (rhs
, 0), loaded_val
, 0))
3618 rhs
= TREE_OPERAND (rhs
, 1);
3619 else if (commutative_tree_code (TREE_CODE (rhs
))
3620 && operand_equal_p (TREE_OPERAND (rhs
, 1), loaded_val
, 0))
3621 rhs
= TREE_OPERAND (rhs
, 0);
3625 decl
= built_in_decls
[base
+ index
+ 1];
3626 itype
= TREE_TYPE (TREE_TYPE (decl
));
3628 if (optab
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
3631 bsi
= bsi_last (load_bb
);
3632 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3633 call
= build_call_expr (decl
, 2, addr
, fold_convert (itype
, rhs
));
3634 force_gimple_operand_bsi (&bsi
, call
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3635 bsi_remove (&bsi
, true);
3637 bsi
= bsi_last (store_bb
);
3638 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3639 bsi_remove (&bsi
, true);
3640 bsi
= bsi_last (store_bb
);
3641 bsi_remove (&bsi
, true);
3643 if (gimple_in_ssa_p (cfun
))
3644 update_ssa (TODO_update_ssa_no_phi
);
3649 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3653 newval = rhs; // with oldval replacing *addr in rhs
3654 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3655 if (oldval != newval)
3658 INDEX is log2 of the size of the data type, and thus usable to find the
3659 index of the builtin decl. */
3662 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
3663 tree addr
, tree loaded_val
, tree stored_val
,
3666 tree loadedi
, storedi
, initial
, new_stored
, new_storedi
, old_vali
;
3667 tree type
, itype
, cmpxchg
, iaddr
;
3668 block_stmt_iterator bsi
;
3669 basic_block loop_header
= single_succ (load_bb
);
3673 cmpxchg
= built_in_decls
[BUILT_IN_VAL_COMPARE_AND_SWAP_N
+ index
+ 1];
3674 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
3675 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
3677 if (sync_compare_and_swap
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
3680 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3681 bsi
= bsi_last (load_bb
);
3682 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3683 initial
= force_gimple_operand_bsi (&bsi
, build_fold_indirect_ref (addr
),
3684 true, NULL_TREE
, true, BSI_SAME_STMT
);
3685 /* Move the value to the LOADED_VAL temporary. */
3686 if (gimple_in_ssa_p (cfun
))
3688 gcc_assert (phi_nodes (loop_header
) == NULL_TREE
);
3689 phi
= create_phi_node (loaded_val
, loop_header
);
3690 SSA_NAME_DEF_STMT (loaded_val
) = phi
;
3691 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
3695 bsi_insert_before (&bsi
,
3696 build_gimple_modify_stmt (loaded_val
, initial
),
3698 bsi_remove (&bsi
, true);
3700 bsi
= bsi_last (store_bb
);
3701 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3703 /* For floating-point values, we'll need to view-convert them to integers
3704 so that we can perform the atomic compare and swap. Simplify the
3705 following code by always setting up the "i"ntegral variables. */
3706 if (INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
3708 loadedi
= loaded_val
;
3709 storedi
= stored_val
;
3714 loadedi
= force_gimple_operand_bsi (&bsi
,
3715 build1 (VIEW_CONVERT_EXPR
, itype
,
3717 NULL_TREE
, true, BSI_SAME_STMT
);
3719 force_gimple_operand_bsi (&bsi
,
3720 build1 (VIEW_CONVERT_EXPR
, itype
,
3721 stored_val
), true, NULL_TREE
, true,
3723 iaddr
= fold_convert (build_pointer_type (itype
), addr
);
3726 /* Build the compare&swap statement. */
3727 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
3728 new_storedi
= force_gimple_operand_bsi (&bsi
,
3729 fold_convert (itype
, new_storedi
),
3731 true, BSI_SAME_STMT
);
3732 if (storedi
== stored_val
)
3733 new_stored
= new_storedi
;
3735 new_stored
= force_gimple_operand_bsi (&bsi
,
3736 build1 (VIEW_CONVERT_EXPR
, type
,
3738 NULL_TREE
, true, BSI_SAME_STMT
);
3740 if (gimple_in_ssa_p (cfun
))
3744 old_vali
= create_tmp_var (itype
, NULL
);
3745 x
= build_gimple_modify_stmt (old_vali
, loadedi
);
3746 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3748 x
= build_gimple_modify_stmt (loaded_val
, new_stored
);
3749 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3752 /* Note that we always perform the comparison as an integer, even for
3753 floating point. This allows the atomic operation to properly
3754 succeed even with NaNs and -0.0. */
3755 x
= build3 (COND_EXPR
, void_type_node
,
3756 build2 (NE_EXPR
, boolean_type_node
,
3757 new_storedi
, old_vali
), NULL_TREE
, NULL_TREE
);
3758 bsi_insert_before (&bsi
, x
, BSI_SAME_STMT
);
3761 e
= single_succ_edge (store_bb
);
3762 e
->flags
&= ~EDGE_FALLTHRU
;
3763 e
->flags
|= EDGE_FALSE_VALUE
;
3765 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
3767 /* Copy the new value to loaded_val (we already did that before the condition
3768 if we are not in SSA). */
3769 if (gimple_in_ssa_p (cfun
))
3771 phi
= phi_nodes (loop_header
);
3772 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_stored
);
3775 /* Remove OMP_ATOMIC_STORE. */
3776 bsi_remove (&bsi
, true);
3778 if (gimple_in_ssa_p (cfun
))
3779 update_ssa (TODO_update_ssa_no_phi
);
3784 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3786 GOMP_atomic_start ();
3790 The result is not globally atomic, but works so long as all parallel
3791 references are within #pragma omp atomic directives. According to
3792 responses received from omp@openmp.org, appears to be within spec.
3793 Which makes sense, since that's how several other compilers handle
3794 this situation as well.
3795 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3796 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3799 OMP_ATOMIC_LOAD (loaded_val, addr) with
3803 OMP_ATOMIC_ATORE (stored_val) with
3808 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
3809 tree addr
, tree loaded_val
, tree stored_val
)
3811 block_stmt_iterator bsi
;
3814 bsi
= bsi_last (load_bb
);
3815 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_LOAD
);
3817 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
];
3818 t
= build_function_call_expr (t
, 0);
3819 force_gimple_operand_bsi (&bsi
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3821 t
= build_gimple_modify_stmt (loaded_val
, build_fold_indirect_ref (addr
));
3822 if (gimple_in_ssa_p (cfun
))
3823 SSA_NAME_DEF_STMT (loaded_val
) = t
;
3824 bsi_insert_before (&bsi
, t
, BSI_SAME_STMT
);
3825 bsi_remove (&bsi
, true);
3827 bsi
= bsi_last (store_bb
);
3828 gcc_assert (TREE_CODE (bsi_stmt (bsi
)) == OMP_ATOMIC_STORE
);
3830 t
= build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr
)),
3832 bsi_insert_before (&bsi
, t
, BSI_SAME_STMT
);
3834 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
];
3835 t
= build_function_call_expr (t
, 0);
3836 force_gimple_operand_bsi (&bsi
, t
, true, NULL_TREE
, true, BSI_SAME_STMT
);
3837 bsi_remove (&bsi
, true);
3839 if (gimple_in_ssa_p (cfun
))
3840 update_ssa (TODO_update_ssa_no_phi
);
3844 /* Expand an OMP_ATOMIC statement. We try to expand
3845 using expand_omp_atomic_fetch_op. If it failed, we try to
3846 call expand_omp_atomic_pipeline, and if it fails too, the
3847 ultimate fallback is wrapping the operation in a mutex
3848 (expand_omp_atomic_mutex). REGION is the atomic region built
3849 by build_omp_regions_1(). */
3852 expand_omp_atomic (struct omp_region
*region
)
3854 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
3855 tree load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
3856 tree loaded_val
= TREE_OPERAND (load
, 0);
3857 tree addr
= TREE_OPERAND (load
, 1);
3858 tree stored_val
= TREE_OPERAND (store
, 0);
3859 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
3860 HOST_WIDE_INT index
;
3862 /* Make sure the type is one of the supported sizes. */
3863 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
3864 index
= exact_log2 (index
);
3865 if (index
>= 0 && index
<= 4)
3867 unsigned int align
= TYPE_ALIGN_UNIT (type
);
3869 /* __sync builtins require strict data alignment. */
3870 if (exact_log2 (align
) >= index
)
3872 /* When possible, use specialized atomic update functions. */
3873 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
3874 && store_bb
== single_succ (load_bb
))
3876 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
3877 loaded_val
, stored_val
, index
))
3881 /* If we don't have specialized __sync builtins, try and implement
3882 as a compare and swap loop. */
3883 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
3884 loaded_val
, stored_val
, index
))
3889 /* The ultimate fallback is wrapping the operation in a mutex. */
3890 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
3894 /* Expand the parallel region tree rooted at REGION. Expansion
3895 proceeds in depth-first order. Innermost regions are expanded
3896 first. This way, parallel regions that require a new function to
3897 be created (e.g., OMP_PARALLEL) can be expanded without having any
3898 internal dependencies in their body. */
3901 expand_omp (struct omp_region
*region
)
3906 expand_omp (region
->inner
);
3908 switch (region
->type
)
3911 expand_omp_parallel (region
);
3915 expand_omp_for (region
);
3919 expand_omp_sections (region
);
3923 /* Individual omp sections are handled together with their
3924 parent OMP_SECTIONS region. */
3928 expand_omp_single (region
);
3934 expand_omp_synch (region
);
3937 case OMP_ATOMIC_LOAD
:
3938 expand_omp_atomic (region
);
3946 region
= region
->next
;
3951 /* Helper for build_omp_regions. Scan the dominator tree starting at
3952 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
3953 true, the function ends once a single tree is built (otherwise, whole
3954 forest of OMP constructs may be built). */
3957 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
3960 block_stmt_iterator si
;
3965 if (!bsi_end_p (si
) && OMP_DIRECTIVE_P (bsi_stmt (si
)))
3967 struct omp_region
*region
;
3968 enum tree_code code
;
3970 stmt
= bsi_stmt (si
);
3971 code
= TREE_CODE (stmt
);
3972 if (code
== OMP_RETURN
)
3974 /* STMT is the return point out of region PARENT. Mark it
3975 as the exit point and make PARENT the immediately
3976 enclosing region. */
3977 gcc_assert (parent
);
3980 parent
= parent
->outer
;
3982 /* If REGION is a parallel region, determine whether it is
3983 a combined parallel+workshare region. */
3984 if (region
->type
== OMP_PARALLEL
)
3985 determine_parallel_type (region
);
3987 else if (code
== OMP_ATOMIC_STORE
)
3989 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
3991 gcc_assert (parent
);
3992 gcc_assert (parent
->type
== OMP_ATOMIC_LOAD
);
3995 parent
= parent
->outer
;
3998 else if (code
== OMP_CONTINUE
)
4000 gcc_assert (parent
);
4003 else if (code
== OMP_SECTIONS_SWITCH
)
4005 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4010 /* Otherwise, this directive becomes the parent for a new
4012 region
= new_omp_region (bb
, code
, parent
);
4017 if (single_tree
&& !parent
)
4020 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
4022 son
= next_dom_son (CDI_DOMINATORS
, son
))
4023 build_omp_regions_1 (son
, parent
, single_tree
);
4026 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4030 build_omp_regions_root (basic_block root
)
4032 gcc_assert (root_omp_region
== NULL
);
4033 build_omp_regions_1 (root
, NULL
, true);
4034 gcc_assert (root_omp_region
!= NULL
);
4037 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4040 omp_expand_local (basic_block head
)
4042 build_omp_regions_root (head
);
4043 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4045 fprintf (dump_file
, "\nOMP region tree\n\n");
4046 dump_omp_region (dump_file
, root_omp_region
, 0);
4047 fprintf (dump_file
, "\n");
4050 remove_exit_barriers (root_omp_region
);
4051 expand_omp (root_omp_region
);
4053 free_omp_regions ();
4056 /* Scan the CFG and build a tree of OMP regions. Return the root of
4057 the OMP region tree. */
4060 build_omp_regions (void)
4062 gcc_assert (root_omp_region
== NULL
);
4063 calculate_dominance_info (CDI_DOMINATORS
);
4064 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
4068 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4071 execute_expand_omp (void)
4073 build_omp_regions ();
4075 if (!root_omp_region
)
4080 fprintf (dump_file
, "\nOMP region tree\n\n");
4081 dump_omp_region (dump_file
, root_omp_region
, 0);
4082 fprintf (dump_file
, "\n");
4085 remove_exit_barriers (root_omp_region
);
4087 expand_omp (root_omp_region
);
4089 cleanup_tree_cfg ();
4091 free_omp_regions ();
4096 /* OMP expansion in SSA form. For testing purposes only. */
4099 gate_expand_omp_ssa (void)
4101 return flag_openmp_ssa
&& flag_openmp
!= 0 && errorcount
== 0;
4104 struct tree_opt_pass pass_expand_omp_ssa
=
4106 "ompexpssa", /* name */
4107 gate_expand_omp_ssa
, /* gate */
4108 execute_expand_omp
, /* execute */
4111 0, /* static_pass_number */
4113 PROP_gimple_any
, /* properties_required */
4114 PROP_gimple_lomp
, /* properties_provided */
4115 0, /* properties_destroyed */
4116 0, /* todo_flags_start */
4117 TODO_dump_func
, /* todo_flags_finish */
4121 /* OMP expansion -- the default pass, run before creation of SSA form. */
4124 gate_expand_omp (void)
4126 return ((!flag_openmp_ssa
|| !optimize
)
4127 && flag_openmp
!= 0 && errorcount
== 0);
4130 struct tree_opt_pass pass_expand_omp
=
4132 "ompexp", /* name */
4133 gate_expand_omp
, /* gate */
4134 execute_expand_omp
, /* execute */
4137 0, /* static_pass_number */
4139 PROP_gimple_any
, /* properties_required */
4140 PROP_gimple_lomp
, /* properties_provided */
4141 0, /* properties_destroyed */
4142 0, /* todo_flags_start */
4143 TODO_dump_func
, /* todo_flags_finish */
4147 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4149 /* Lower the OpenMP sections directive in *STMT_P. */
4152 lower_omp_sections (tree
*stmt_p
, omp_context
*ctx
)
4154 tree new_stmt
, stmt
, body
, bind
, block
, ilist
, olist
, new_body
, control
;
4156 tree_stmt_iterator tsi
;
4161 push_gimplify_context ();
4165 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt
), &ilist
, &dlist
, ctx
);
4167 tsi
= tsi_start (OMP_SECTIONS_BODY (stmt
));
4168 for (len
= 0; !tsi_end_p (tsi
); len
++, tsi_next (&tsi
))
4171 tsi
= tsi_start (OMP_SECTIONS_BODY (stmt
));
4172 body
= alloc_stmt_list ();
4173 for (i
= 0; i
< len
; i
++, tsi_next (&tsi
))
4176 tree sec_start
, sec_end
;
4178 sec_start
= tsi_stmt (tsi
);
4179 sctx
= maybe_lookup_ctx (sec_start
);
4182 append_to_statement_list (sec_start
, &body
);
4184 lower_omp (&OMP_SECTION_BODY (sec_start
), sctx
);
4185 append_to_statement_list (OMP_SECTION_BODY (sec_start
), &body
);
4186 OMP_SECTION_BODY (sec_start
) = NULL
;
4190 tree l
= alloc_stmt_list ();
4191 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt
), NULL
,
4193 append_to_statement_list (l
, &body
);
4194 OMP_SECTION_LAST (sec_start
) = 1;
4197 sec_end
= make_node (OMP_RETURN
);
4198 append_to_statement_list (sec_end
, &body
);
4201 block
= make_node (BLOCK
);
4202 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, body
, block
);
4205 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt
), &olist
, ctx
);
4207 pop_gimplify_context (NULL_TREE
);
4208 record_vars_into (ctx
->block_vars
, ctx
->cb
.dst_fn
);
4210 new_stmt
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4211 TREE_SIDE_EFFECTS (new_stmt
) = 1;
4213 new_body
= alloc_stmt_list ();
4214 append_to_statement_list (ilist
, &new_body
);
4215 append_to_statement_list (stmt
, &new_body
);
4216 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH
), &new_body
);
4217 append_to_statement_list (bind
, &new_body
);
4219 control
= create_tmp_var (unsigned_type_node
, ".section");
4220 t
= build2 (OMP_CONTINUE
, void_type_node
, control
, control
);
4221 OMP_SECTIONS_CONTROL (stmt
) = control
;
4222 append_to_statement_list (t
, &new_body
);
4224 append_to_statement_list (olist
, &new_body
);
4225 append_to_statement_list (dlist
, &new_body
);
4227 maybe_catch_exception (&new_body
);
4229 t
= make_node (OMP_RETURN
);
4230 OMP_RETURN_NOWAIT (t
) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt
),
4232 append_to_statement_list (t
, &new_body
);
4234 BIND_EXPR_BODY (new_stmt
) = new_body
;
4235 OMP_SECTIONS_BODY (stmt
) = NULL
;
4241 /* A subroutine of lower_omp_single. Expand the simple form of
4242 an OMP_SINGLE, without a copyprivate clause:
4244 if (GOMP_single_start ())
4246 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4248 FIXME. It may be better to delay expanding the logic of this until
4249 pass_expand_omp. The expanded logic may make the job more difficult
4250 to a synchronization analysis pass. */
4253 lower_omp_single_simple (tree single_stmt
, tree
*pre_p
)
4257 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_START
], 0);
4258 t
= build3 (COND_EXPR
, void_type_node
, t
,
4259 OMP_SINGLE_BODY (single_stmt
), NULL
);
4260 gimplify_and_add (t
, pre_p
);
4264 /* A subroutine of lower_omp_single. Expand the simple form of
4265 an OMP_SINGLE, with a copyprivate clause:
4267 #pragma omp single copyprivate (a, b, c)
4269 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4272 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4278 GOMP_single_copy_end (©out);
4289 FIXME. It may be better to delay expanding the logic of this until
4290 pass_expand_omp. The expanded logic may make the job more difficult
4291 to a synchronization analysis pass. */
4294 lower_omp_single_copy (tree single_stmt
, tree
*pre_p
, omp_context
*ctx
)
4296 tree ptr_type
, t
, l0
, l1
, l2
, copyin_seq
;
4298 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
4300 ptr_type
= build_pointer_type (ctx
->record_type
);
4301 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
4303 l0
= create_artificial_label ();
4304 l1
= create_artificial_label ();
4305 l2
= create_artificial_label ();
4307 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_START
], 0);
4308 t
= fold_convert (ptr_type
, t
);
4309 t
= build_gimple_modify_stmt (ctx
->receiver_decl
, t
);
4310 gimplify_and_add (t
, pre_p
);
4312 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
4313 build_int_cst (ptr_type
, 0));
4314 t
= build3 (COND_EXPR
, void_type_node
, t
,
4315 build_and_jump (&l0
), build_and_jump (&l1
));
4316 gimplify_and_add (t
, pre_p
);
4318 t
= build1 (LABEL_EXPR
, void_type_node
, l0
);
4319 gimplify_and_add (t
, pre_p
);
4321 append_to_statement_list (OMP_SINGLE_BODY (single_stmt
), pre_p
);
4324 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt
), pre_p
,
4327 t
= build_fold_addr_expr (ctx
->sender_decl
);
4328 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_END
], 1, t
);
4329 gimplify_and_add (t
, pre_p
);
4331 t
= build_and_jump (&l2
);
4332 gimplify_and_add (t
, pre_p
);
4334 t
= build1 (LABEL_EXPR
, void_type_node
, l1
);
4335 gimplify_and_add (t
, pre_p
);
4337 append_to_statement_list (copyin_seq
, pre_p
);
4339 t
= build1 (LABEL_EXPR
, void_type_node
, l2
);
4340 gimplify_and_add (t
, pre_p
);
4344 /* Expand code for an OpenMP single directive. */
4347 lower_omp_single (tree
*stmt_p
, omp_context
*ctx
)
4349 tree t
, bind
, block
, single_stmt
= *stmt_p
, dlist
;
4351 push_gimplify_context ();
4353 block
= make_node (BLOCK
);
4354 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4355 TREE_SIDE_EFFECTS (bind
) = 1;
4357 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt
),
4358 &BIND_EXPR_BODY (bind
), &dlist
, ctx
);
4359 lower_omp (&OMP_SINGLE_BODY (single_stmt
), ctx
);
4361 append_to_statement_list (single_stmt
, &BIND_EXPR_BODY (bind
));
4363 if (ctx
->record_type
)
4364 lower_omp_single_copy (single_stmt
, &BIND_EXPR_BODY (bind
), ctx
);
4366 lower_omp_single_simple (single_stmt
, &BIND_EXPR_BODY (bind
));
4368 OMP_SINGLE_BODY (single_stmt
) = NULL
;
4370 append_to_statement_list (dlist
, &BIND_EXPR_BODY (bind
));
4372 maybe_catch_exception (&BIND_EXPR_BODY (bind
));
4374 t
= make_node (OMP_RETURN
);
4375 OMP_RETURN_NOWAIT (t
) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt
),
4377 append_to_statement_list (t
, &BIND_EXPR_BODY (bind
));
4379 pop_gimplify_context (bind
);
4381 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4382 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4386 /* Expand code for an OpenMP master directive. */
4389 lower_omp_master (tree
*stmt_p
, omp_context
*ctx
)
4391 tree bind
, block
, stmt
= *stmt_p
, lab
= NULL
, x
;
4393 push_gimplify_context ();
4395 block
= make_node (BLOCK
);
4396 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4397 TREE_SIDE_EFFECTS (bind
) = 1;
4399 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4401 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4402 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
4403 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
4404 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4406 lower_omp (&OMP_MASTER_BODY (stmt
), ctx
);
4407 maybe_catch_exception (&OMP_MASTER_BODY (stmt
));
4408 append_to_statement_list (OMP_MASTER_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4409 OMP_MASTER_BODY (stmt
) = NULL
;
4411 x
= build1 (LABEL_EXPR
, void_type_node
, lab
);
4412 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4414 x
= make_node (OMP_RETURN
);
4415 OMP_RETURN_NOWAIT (x
) = 1;
4416 append_to_statement_list (x
, &BIND_EXPR_BODY (bind
));
4418 pop_gimplify_context (bind
);
4420 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4421 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4425 /* Expand code for an OpenMP ordered directive. */
4428 lower_omp_ordered (tree
*stmt_p
, omp_context
*ctx
)
4430 tree bind
, block
, stmt
= *stmt_p
, x
;
4432 push_gimplify_context ();
4434 block
= make_node (BLOCK
);
4435 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4436 TREE_SIDE_EFFECTS (bind
) = 1;
4438 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4440 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ORDERED_START
], 0);
4441 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4443 lower_omp (&OMP_ORDERED_BODY (stmt
), ctx
);
4444 maybe_catch_exception (&OMP_ORDERED_BODY (stmt
));
4445 append_to_statement_list (OMP_ORDERED_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4446 OMP_ORDERED_BODY (stmt
) = NULL
;
4448 x
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_ORDERED_END
], 0);
4449 gimplify_and_add (x
, &BIND_EXPR_BODY (bind
));
4451 x
= make_node (OMP_RETURN
);
4452 OMP_RETURN_NOWAIT (x
) = 1;
4453 append_to_statement_list (x
, &BIND_EXPR_BODY (bind
));
4455 pop_gimplify_context (bind
);
4457 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4458 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4462 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4463 substitution of a couple of function calls. But in the NAMED case,
4464 requires that languages coordinate a symbol name. It is therefore
4465 best put here in common code. */
4467 static GTY((param1_is (tree
), param2_is (tree
)))
4468 splay_tree critical_name_mutexes
;
4471 lower_omp_critical (tree
*stmt_p
, omp_context
*ctx
)
4473 tree bind
, block
, stmt
= *stmt_p
;
4474 tree t
, lock
, unlock
, name
;
4476 name
= OMP_CRITICAL_NAME (stmt
);
4482 if (!critical_name_mutexes
)
4483 critical_name_mutexes
4484 = splay_tree_new_ggc (splay_tree_compare_pointers
);
4486 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
4491 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
4493 new_str
= ACONCAT ((".gomp_critical_user_",
4494 IDENTIFIER_POINTER (name
), NULL
));
4495 DECL_NAME (decl
) = get_identifier (new_str
);
4496 TREE_PUBLIC (decl
) = 1;
4497 TREE_STATIC (decl
) = 1;
4498 DECL_COMMON (decl
) = 1;
4499 DECL_ARTIFICIAL (decl
) = 1;
4500 DECL_IGNORED_P (decl
) = 1;
4501 varpool_finalize_decl (decl
);
4503 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
4504 (splay_tree_value
) decl
);
4507 decl
= (tree
) n
->value
;
4509 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_START
];
4510 lock
= build_call_expr (lock
, 1, build_fold_addr_expr (decl
));
4512 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_END
];
4513 unlock
= build_call_expr (unlock
, 1, build_fold_addr_expr (decl
));
4517 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_START
];
4518 lock
= build_call_expr (lock
, 0);
4520 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_END
];
4521 unlock
= build_call_expr (unlock
, 0);
4524 push_gimplify_context ();
4526 block
= make_node (BLOCK
);
4527 *stmt_p
= bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, block
);
4528 TREE_SIDE_EFFECTS (bind
) = 1;
4530 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4532 gimplify_and_add (lock
, &BIND_EXPR_BODY (bind
));
4534 lower_omp (&OMP_CRITICAL_BODY (stmt
), ctx
);
4535 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt
));
4536 append_to_statement_list (OMP_CRITICAL_BODY (stmt
), &BIND_EXPR_BODY (bind
));
4537 OMP_CRITICAL_BODY (stmt
) = NULL
;
4539 gimplify_and_add (unlock
, &BIND_EXPR_BODY (bind
));
4541 t
= make_node (OMP_RETURN
);
4542 OMP_RETURN_NOWAIT (t
) = 1;
4543 append_to_statement_list (t
, &BIND_EXPR_BODY (bind
));
4545 pop_gimplify_context (bind
);
4546 BIND_EXPR_VARS (bind
) = chainon (BIND_EXPR_VARS (bind
), ctx
->block_vars
);
4547 BLOCK_VARS (block
) = BIND_EXPR_VARS (bind
);
4551 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4552 for a lastprivate clause. Given a loop control predicate of (V
4553 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4554 is appended to *DLIST, iterator initialization is appended to
4558 lower_omp_for_lastprivate (struct omp_for_data
*fd
, tree
*body_p
,
4559 tree
*dlist
, struct omp_context
*ctx
)
4561 tree clauses
, cond
, stmts
, vinit
, t
;
4562 enum tree_code cond_code
;
4564 cond_code
= fd
->cond_code
;
4565 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
4567 /* When possible, use a strict equality expression. This can let VRP
4568 type optimizations deduce the value and remove a copy. */
4569 if (host_integerp (fd
->step
, 0))
4571 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->step
);
4572 if (step
== 1 || step
== -1)
4573 cond_code
= EQ_EXPR
;
4576 cond
= build2 (cond_code
, boolean_type_node
, fd
->v
, fd
->n2
);
4578 clauses
= OMP_FOR_CLAUSES (fd
->for_stmt
);
4580 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
4583 append_to_statement_list (stmts
, dlist
);
4585 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4587 if (cond_code
== EQ_EXPR
4588 && host_integerp (fd
->n2
, 0)
4589 && ! integer_zerop (fd
->n2
))
4590 vinit
= build_int_cst (TREE_TYPE (fd
->v
), 0);
4592 /* Initialize the iterator variable, so that threads that don't execute
4593 any iterations don't execute the lastprivate clauses by accident. */
4594 t
= build_gimple_modify_stmt (fd
->v
, vinit
);
4595 gimplify_and_add (t
, body_p
);
4600 /* Lower code for an OpenMP loop directive. */
4603 lower_omp_for (tree
*stmt_p
, omp_context
*ctx
)
4605 tree t
, stmt
, ilist
, dlist
, new_stmt
, *body_p
, *rhs_p
;
4606 struct omp_for_data fd
;
4610 push_gimplify_context ();
4612 lower_omp (&OMP_FOR_PRE_BODY (stmt
), ctx
);
4613 lower_omp (&OMP_FOR_BODY (stmt
), ctx
);
4615 /* Move declaration of temporaries in the loop body before we make
4617 if (TREE_CODE (OMP_FOR_BODY (stmt
)) == BIND_EXPR
)
4618 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt
)), ctx
->cb
.dst_fn
);
4620 new_stmt
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4621 TREE_SIDE_EFFECTS (new_stmt
) = 1;
4622 body_p
= &BIND_EXPR_BODY (new_stmt
);
4624 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4627 append_to_statement_list (OMP_FOR_PRE_BODY (stmt
), body_p
);
4628 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt
), body_p
, &dlist
, ctx
);
4630 /* Lower the header expressions. At this point, we can assume that
4631 the header is of the form:
4633 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4635 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4636 using the .omp_data_s mapping, if needed. */
4637 rhs_p
= &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt
), 1);
4638 if (!is_gimple_min_invariant (*rhs_p
))
4639 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4641 rhs_p
= &TREE_OPERAND (OMP_FOR_COND (stmt
), 1);
4642 if (!is_gimple_min_invariant (*rhs_p
))
4643 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4645 rhs_p
= &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt
), 1), 1);
4646 if (!is_gimple_min_invariant (*rhs_p
))
4647 *rhs_p
= get_formal_tmp_var (*rhs_p
, body_p
);
4649 /* Once lowered, extract the bounds and clauses. */
4650 extract_omp_for_data (stmt
, &fd
);
4652 lower_omp_for_lastprivate (&fd
, body_p
, &dlist
, ctx
);
4654 append_to_statement_list (stmt
, body_p
);
4656 append_to_statement_list (OMP_FOR_BODY (stmt
), body_p
);
4658 t
= build2 (OMP_CONTINUE
, void_type_node
, fd
.v
, fd
.v
);
4659 append_to_statement_list (t
, body_p
);
4661 /* After the loop, add exit clauses. */
4662 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt
), body_p
, ctx
);
4663 append_to_statement_list (dlist
, body_p
);
4665 maybe_catch_exception (body_p
);
4667 /* Region exit marker goes at the end of the loop body. */
4668 t
= make_node (OMP_RETURN
);
4669 OMP_RETURN_NOWAIT (t
) = fd
.have_nowait
;
4670 append_to_statement_list (t
, body_p
);
4672 pop_gimplify_context (NULL_TREE
);
4673 record_vars_into (ctx
->block_vars
, ctx
->cb
.dst_fn
);
4675 OMP_FOR_BODY (stmt
) = NULL_TREE
;
4676 OMP_FOR_PRE_BODY (stmt
) = NULL_TREE
;
4680 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4684 check_combined_parallel (tree
*tp
, int *walk_subtrees
, void *data
)
4686 struct walk_stmt_info
*wi
= data
;
4687 int *info
= wi
->info
;
4690 switch (TREE_CODE (*tp
))
4694 *info
= *info
== 0 ? 1 : -1;
4703 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4704 information for the directive. */
4707 lower_omp_parallel (tree
*stmt_p
, omp_context
*ctx
)
4709 tree clauses
, par_bind
, par_body
, new_body
, bind
;
4710 tree olist
, ilist
, par_olist
, par_ilist
;
4711 tree stmt
, child_fn
, t
;
4715 clauses
= OMP_PARALLEL_CLAUSES (stmt
);
4716 par_bind
= OMP_PARALLEL_BODY (stmt
);
4717 par_body
= BIND_EXPR_BODY (par_bind
);
4718 child_fn
= ctx
->cb
.dst_fn
;
4719 if (!OMP_PARALLEL_COMBINED (stmt
))
4721 struct walk_stmt_info wi
;
4724 memset (&wi
, 0, sizeof (wi
));
4725 wi
.callback
= check_combined_parallel
;
4728 walk_stmts (&wi
, &par_bind
);
4730 OMP_PARALLEL_COMBINED (stmt
) = 1;
4733 push_gimplify_context ();
4735 par_olist
= NULL_TREE
;
4736 par_ilist
= NULL_TREE
;
4737 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
4738 lower_omp (&par_body
, ctx
);
4739 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
4741 /* Declare all the variables created by mapping and the variables
4742 declared in the scope of the parallel body. */
4743 record_vars_into (ctx
->block_vars
, child_fn
);
4744 record_vars_into (BIND_EXPR_VARS (par_bind
), child_fn
);
4746 if (ctx
->record_type
)
4748 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_data_o");
4749 OMP_PARALLEL_DATA_ARG (stmt
) = ctx
->sender_decl
;
4754 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
4755 lower_send_shared_vars (&ilist
, &olist
, ctx
);
4757 /* Once all the expansions are done, sequence all the different
4758 fragments inside OMP_PARALLEL_BODY. */
4759 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
4760 append_to_statement_list (ilist
, &BIND_EXPR_BODY (bind
));
4762 new_body
= alloc_stmt_list ();
4764 if (ctx
->record_type
)
4766 t
= build_fold_addr_expr (ctx
->sender_decl
);
4767 /* fixup_child_record_type might have changed receiver_decl's type. */
4768 t
= fold_convert (TREE_TYPE (ctx
->receiver_decl
), t
);
4769 t
= build_gimple_modify_stmt (ctx
->receiver_decl
, t
);
4770 append_to_statement_list (t
, &new_body
);
4773 append_to_statement_list (par_ilist
, &new_body
);
4774 append_to_statement_list (par_body
, &new_body
);
4775 append_to_statement_list (par_olist
, &new_body
);
4776 maybe_catch_exception (&new_body
);
4777 t
= make_node (OMP_RETURN
);
4778 append_to_statement_list (t
, &new_body
);
4779 OMP_PARALLEL_BODY (stmt
) = new_body
;
4781 append_to_statement_list (stmt
, &BIND_EXPR_BODY (bind
));
4782 append_to_statement_list (olist
, &BIND_EXPR_BODY (bind
));
4786 pop_gimplify_context (NULL_TREE
);
4790 /* Pass *TP back through the gimplifier within the context determined by WI.
4791 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
4792 flags on ADDR_EXPR. */
4795 lower_regimplify (tree
*tp
, struct walk_stmt_info
*wi
)
4797 enum gimplify_status gs
;
4801 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_lvalue
, fb_lvalue
);
4802 else if (wi
->val_only
)
4803 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_val
, fb_rvalue
);
4805 gs
= gimplify_expr (tp
, &pre
, NULL
, is_gimple_formal_tmp_var
, fb_rvalue
);
4806 gcc_assert (gs
== GS_ALL_DONE
);
4809 tsi_link_before (&wi
->tsi
, pre
, TSI_SAME_STMT
);
4812 /* Copy EXP into a temporary. Insert the initialization statement before TSI. */
4815 init_tmp_var (tree exp
, tree_stmt_iterator
*tsi
)
4819 t
= create_tmp_var (TREE_TYPE (exp
), NULL
);
4820 DECL_GIMPLE_REG_P (t
) = 1;
4821 stmt
= build_gimple_modify_stmt (t
, exp
);
4822 SET_EXPR_LOCUS (stmt
, EXPR_LOCUS (tsi_stmt (*tsi
)));
4823 tsi_link_before (tsi
, stmt
, TSI_SAME_STMT
);
4828 /* Similarly, but copy from the temporary and insert the statement
4829 after the iterator. */
4832 save_tmp_var (tree exp
, tree_stmt_iterator
*tsi
)
4836 t
= create_tmp_var (TREE_TYPE (exp
), NULL
);
4837 DECL_GIMPLE_REG_P (t
) = 1;
4838 stmt
= build_gimple_modify_stmt (exp
, t
);
4839 SET_EXPR_LOCUS (stmt
, EXPR_LOCUS (tsi_stmt (*tsi
)));
4840 tsi_link_after (tsi
, stmt
, TSI_SAME_STMT
);
4845 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
4848 lower_omp_1 (tree
*tp
, int *walk_subtrees
, void *data
)
4850 struct walk_stmt_info
*wi
= data
;
4851 omp_context
*ctx
= wi
->info
;
4854 /* If we have issued syntax errors, avoid doing any heavy lifting.
4855 Just replace the OpenMP directives with a NOP to avoid
4856 confusing RTL expansion. */
4857 if (errorcount
&& OMP_DIRECTIVE_P (*tp
))
4859 *tp
= build_empty_stmt ();
4864 switch (TREE_CODE (*tp
))
4867 ctx
= maybe_lookup_ctx (t
);
4868 lower_omp_parallel (tp
, ctx
);
4872 ctx
= maybe_lookup_ctx (t
);
4874 lower_omp_for (tp
, ctx
);
4878 ctx
= maybe_lookup_ctx (t
);
4880 lower_omp_sections (tp
, ctx
);
4884 ctx
= maybe_lookup_ctx (t
);
4886 lower_omp_single (tp
, ctx
);
4890 ctx
= maybe_lookup_ctx (t
);
4892 lower_omp_master (tp
, ctx
);
4896 ctx
= maybe_lookup_ctx (t
);
4898 lower_omp_ordered (tp
, ctx
);
4902 ctx
= maybe_lookup_ctx (t
);
4904 lower_omp_critical (tp
, ctx
);
4908 if (ctx
&& DECL_HAS_VALUE_EXPR_P (t
))
4910 lower_regimplify (&t
, wi
);
4914 t
= save_tmp_var (t
, &wi
->tsi
);
4916 t
= init_tmp_var (t
, &wi
->tsi
);
4924 lower_regimplify (tp
, wi
);
4928 case ARRAY_RANGE_REF
:
4932 case VIEW_CONVERT_EXPR
:
4934 lower_regimplify (tp
, wi
);
4941 wi
->val_only
= true;
4942 lower_regimplify (&TREE_OPERAND (t
, 0), wi
);
4947 if (!TYPE_P (t
) && !DECL_P (t
))
4956 lower_omp (tree
*stmt_p
, omp_context
*ctx
)
4958 struct walk_stmt_info wi
;
4960 memset (&wi
, 0, sizeof (wi
));
4961 wi
.callback
= lower_omp_1
;
4964 wi
.want_locations
= true;
4966 walk_stmts (&wi
, stmt_p
);
4969 /* Main entry point. */
4972 execute_lower_omp (void)
4974 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
4975 delete_omp_context
);
4977 scan_omp (&DECL_SAVED_TREE (current_function_decl
), NULL
);
4978 gcc_assert (parallel_nesting_level
== 0);
4980 if (all_contexts
->root
)
4981 lower_omp (&DECL_SAVED_TREE (current_function_decl
), NULL
);
4985 splay_tree_delete (all_contexts
);
4986 all_contexts
= NULL
;
4992 gate_lower_omp (void)
4994 return flag_openmp
!= 0;
4997 struct tree_opt_pass pass_lower_omp
=
4999 "omplower", /* name */
5000 gate_lower_omp
, /* gate */
5001 execute_lower_omp
, /* execute */
5004 0, /* static_pass_number */
5006 PROP_gimple_any
, /* properties_required */
5007 PROP_gimple_lomp
, /* properties_provided */
5008 0, /* properties_destroyed */
5009 0, /* todo_flags_start */
5010 TODO_dump_func
, /* todo_flags_finish */
5014 /* The following is a utility to diagnose OpenMP structured block violations.
5015 It is not part of the "omplower" pass, as that's invoked too late. It
5016 should be invoked by the respective front ends after gimplification. */
5018 static splay_tree all_labels
;
5020 /* Check for mismatched contexts and generate an error if needed. Return
5021 true if an error is detected. */
5024 diagnose_sb_0 (tree
*stmt_p
, tree branch_ctx
, tree label_ctx
)
5028 if ((label_ctx
? TREE_VALUE (label_ctx
) : NULL
) == branch_ctx
)
5031 /* Try to avoid confusing the user by producing and error message
5032 with correct "exit" or "enter" verbage. We prefer "exit"
5033 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5034 if (branch_ctx
== NULL
)
5040 if (TREE_VALUE (label_ctx
) == branch_ctx
)
5045 label_ctx
= TREE_CHAIN (label_ctx
);
5050 error ("invalid exit from OpenMP structured block");
5052 error ("invalid entry to OpenMP structured block");
5054 *stmt_p
= build_empty_stmt ();
5058 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5059 where in the tree each label is found. */
5062 diagnose_sb_1 (tree
*tp
, int *walk_subtrees
, void *data
)
5064 struct walk_stmt_info
*wi
= data
;
5065 tree context
= (tree
) wi
->info
;
5070 switch (TREE_CODE (t
))
5075 walk_tree (&OMP_CLAUSES (t
), diagnose_sb_1
, wi
, NULL
);
5081 /* The minimal context here is just a tree of statements. */
5082 inner_context
= tree_cons (NULL
, t
, context
);
5083 wi
->info
= inner_context
;
5084 walk_stmts (wi
, &OMP_BODY (t
));
5089 walk_tree (&OMP_FOR_CLAUSES (t
), diagnose_sb_1
, wi
, NULL
);
5090 inner_context
= tree_cons (NULL
, t
, context
);
5091 wi
->info
= inner_context
;
5092 walk_tree (&OMP_FOR_INIT (t
), diagnose_sb_1
, wi
, NULL
);
5093 walk_tree (&OMP_FOR_COND (t
), diagnose_sb_1
, wi
, NULL
);
5094 walk_tree (&OMP_FOR_INCR (t
), diagnose_sb_1
, wi
, NULL
);
5095 walk_stmts (wi
, &OMP_FOR_PRE_BODY (t
));
5096 walk_stmts (wi
, &OMP_FOR_BODY (t
));
5101 splay_tree_insert (all_labels
, (splay_tree_key
) LABEL_EXPR_LABEL (t
),
5102 (splay_tree_value
) context
);
5112 /* Pass 2: Check each branch and see if its context differs from that of
5113 the destination label's context. */
5116 diagnose_sb_2 (tree
*tp
, int *walk_subtrees
, void *data
)
5118 struct walk_stmt_info
*wi
= data
;
5119 tree context
= (tree
) wi
->info
;
5124 switch (TREE_CODE (t
))
5129 walk_tree (&OMP_CLAUSES (t
), diagnose_sb_2
, wi
, NULL
);
5136 walk_stmts (wi
, &OMP_BODY (t
));
5141 walk_tree (&OMP_FOR_CLAUSES (t
), diagnose_sb_2
, wi
, NULL
);
5143 walk_tree (&OMP_FOR_INIT (t
), diagnose_sb_2
, wi
, NULL
);
5144 walk_tree (&OMP_FOR_COND (t
), diagnose_sb_2
, wi
, NULL
);
5145 walk_tree (&OMP_FOR_INCR (t
), diagnose_sb_2
, wi
, NULL
);
5146 walk_stmts (wi
, &OMP_FOR_PRE_BODY (t
));
5147 walk_stmts (wi
, &OMP_FOR_BODY (t
));
5153 tree lab
= GOTO_DESTINATION (t
);
5154 if (TREE_CODE (lab
) != LABEL_DECL
)
5157 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
5158 diagnose_sb_0 (tp
, context
, n
? (tree
) n
->value
: NULL_TREE
);
5164 tree vec
= SWITCH_LABELS (t
);
5165 int i
, len
= TREE_VEC_LENGTH (vec
);
5166 for (i
= 0; i
< len
; ++i
)
5168 tree lab
= CASE_LABEL (TREE_VEC_ELT (vec
, i
));
5169 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
5170 if (diagnose_sb_0 (tp
, context
, (tree
) n
->value
))
5177 diagnose_sb_0 (tp
, context
, NULL_TREE
);
5188 diagnose_omp_structured_block_errors (tree fndecl
)
5190 tree save_current
= current_function_decl
;
5191 struct walk_stmt_info wi
;
5193 current_function_decl
= fndecl
;
5195 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
5197 memset (&wi
, 0, sizeof (wi
));
5198 wi
.callback
= diagnose_sb_1
;
5199 walk_stmts (&wi
, &DECL_SAVED_TREE (fndecl
));
5201 memset (&wi
, 0, sizeof (wi
));
5202 wi
.callback
= diagnose_sb_2
;
5203 wi
.want_locations
= true;
5204 wi
.want_return_expr
= true;
5205 walk_stmts (&wi
, &DECL_SAVED_TREE (fndecl
));
5207 splay_tree_delete (all_labels
);
5210 current_function_decl
= save_current
;
5213 #include "gt-omp-low.h"