* charset.c (convert_using_iconv): Close out any shift states,
[official-gcc.git] / gcc / omp-low.c
blob5323ec2eb4df13ea1a266048a4de2ea9857cef48
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
35 #include "timevar.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 tree stmt;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
82 tree block_vars;
84 /* What to do with variables with implicitly determined sharing
85 attributes. */
86 enum omp_clause_default_kind default_kind;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
91 int depth;
93 /* True if this parallel directive is nested within another. */
94 bool is_nested;
95 } omp_context;
98 /* A structure describing the main elements of a parallel loop. */
100 struct omp_for_data
102 tree v, n1, n2, step, chunk_size, for_stmt;
103 enum tree_code cond_code;
104 tree pre;
105 bool have_nowait, have_ordered;
106 enum omp_clause_schedule_kind sched_kind;
110 static splay_tree all_contexts;
111 static int parallel_nesting_level;
112 struct omp_region *root_omp_region;
114 static void scan_omp (tree *, omp_context *);
115 static void lower_omp (tree *, omp_context *);
116 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
117 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
121 tree
122 find_omp_clause (tree clauses, enum tree_code kind)
124 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
125 if (OMP_CLAUSE_CODE (clauses) == kind)
126 return clauses;
128 return NULL_TREE;
131 /* Return true if CTX is for an omp parallel. */
133 static inline bool
134 is_parallel_ctx (omp_context *ctx)
136 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
140 /* Return true if REGION is a combined parallel+workshare region. */
142 static inline bool
143 is_combined_parallel (struct omp_region *region)
145 return region->is_combined_parallel;
149 /* Extract the header elements of parallel loop FOR_STMT and store
150 them into *FD. */
152 static void
153 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
155 tree t, var;
157 fd->for_stmt = for_stmt;
158 fd->pre = NULL;
160 t = OMP_FOR_INIT (for_stmt);
161 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
162 fd->v = GIMPLE_STMT_OPERAND (t, 0);
163 gcc_assert (SSA_VAR_P (fd->v));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
165 var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
166 fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
168 t = OMP_FOR_COND (for_stmt);
169 fd->cond_code = TREE_CODE (t);
170 gcc_assert (TREE_OPERAND (t, 0) == var);
171 fd->n2 = TREE_OPERAND (t, 1);
172 switch (fd->cond_code)
174 case LT_EXPR:
175 case GT_EXPR:
176 break;
177 case LE_EXPR:
178 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
179 build_int_cst (TREE_TYPE (fd->n2), 1));
180 fd->cond_code = LT_EXPR;
181 break;
182 case GE_EXPR:
183 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
184 build_int_cst (TREE_TYPE (fd->n2), 1));
185 fd->cond_code = GT_EXPR;
186 break;
187 default:
188 gcc_unreachable ();
191 t = OMP_FOR_INCR (fd->for_stmt);
192 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
193 gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
194 t = GIMPLE_STMT_OPERAND (t, 1);
195 gcc_assert (TREE_OPERAND (t, 0) == var);
196 switch (TREE_CODE (t))
198 case PLUS_EXPR:
199 fd->step = TREE_OPERAND (t, 1);
200 break;
201 case MINUS_EXPR:
202 fd->step = TREE_OPERAND (t, 1);
203 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
204 break;
205 default:
206 gcc_unreachable ();
209 fd->have_nowait = fd->have_ordered = false;
210 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
211 fd->chunk_size = NULL_TREE;
213 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
214 switch (OMP_CLAUSE_CODE (t))
216 case OMP_CLAUSE_NOWAIT:
217 fd->have_nowait = true;
218 break;
219 case OMP_CLAUSE_ORDERED:
220 fd->have_ordered = true;
221 break;
222 case OMP_CLAUSE_SCHEDULE:
223 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
224 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
225 break;
226 default:
227 break;
230 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
231 gcc_assert (fd->chunk_size == NULL);
232 else if (fd->chunk_size == NULL)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
237 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
238 ? integer_zero_node : integer_one_node;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
255 for (j ...)
257 Is lowered into:
259 # BLOCK 2 (PAR_ENTRY_BB)
260 .omp_data_o.i = i;
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
266 D.1598 = D.1667 * 4;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
278 call.
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
285 static bool
286 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
288 struct omp_for_data fd;
289 tree par_stmt, ws_stmt;
291 par_stmt = last_stmt (par_entry_bb);
292 ws_stmt = last_stmt (ws_entry_bb);
294 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
295 return true;
297 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
299 extract_omp_for_data (ws_stmt, &fd);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
305 see through this. */
306 if (!is_gimple_min_invariant (fd.n1)
307 || !is_gimple_min_invariant (fd.n2)
308 || !is_gimple_min_invariant (fd.step)
309 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
310 return false;
312 return true;
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
318 expanded. */
320 static tree
321 get_ws_args_for (tree ws_stmt)
323 tree t;
325 if (TREE_CODE (ws_stmt) == OMP_FOR)
327 struct omp_for_data fd;
328 tree ws_args;
330 extract_omp_for_data (ws_stmt, &fd);
332 ws_args = NULL_TREE;
333 if (fd.chunk_size)
335 t = fold_convert (long_integer_type_node, fd.chunk_size);
336 ws_args = tree_cons (NULL, t, ws_args);
339 t = fold_convert (long_integer_type_node, fd.step);
340 ws_args = tree_cons (NULL, t, ws_args);
342 t = fold_convert (long_integer_type_node, fd.n2);
343 ws_args = tree_cons (NULL, t, ws_args);
345 t = fold_convert (long_integer_type_node, fd.n1);
346 ws_args = tree_cons (NULL, t, ws_args);
348 return ws_args;
350 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb = single_succ (bb_for_stmt (ws_stmt));
356 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
357 t = tree_cons (NULL, t, NULL);
358 return t;
361 gcc_unreachable ();
365 /* Discover whether REGION is a combined parallel+workshare region. */
367 static void
368 determine_parallel_type (struct omp_region *region)
370 basic_block par_entry_bb, par_exit_bb;
371 basic_block ws_entry_bb, ws_exit_bb;
373 if (region == NULL || region->inner == NULL
374 || region->exit == NULL || region->inner->exit == NULL
375 || region->inner->cont == NULL)
376 return;
378 /* We only support parallel+for and parallel+sections. */
379 if (region->type != OMP_PARALLEL
380 || (region->inner->type != OMP_FOR
381 && region->inner->type != OMP_SECTIONS))
382 return;
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb = region->entry;
387 par_exit_bb = region->exit;
388 ws_entry_bb = region->inner->entry;
389 ws_exit_bb = region->inner->exit;
391 if (single_succ (par_entry_bb) == ws_entry_bb
392 && single_succ (ws_exit_bb) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
395 || (last_and_only_stmt (ws_entry_bb)
396 && last_and_only_stmt (par_exit_bb))))
398 tree ws_stmt = last_stmt (ws_entry_bb);
400 if (region->inner->type == OMP_FOR)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses = OMP_FOR_CLAUSES (ws_stmt);
412 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
413 if (c == NULL
414 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
417 region->is_combined_parallel = false;
418 region->inner->is_combined_parallel = false;
419 return;
423 region->is_combined_parallel = true;
424 region->inner->is_combined_parallel = true;
425 region->ws_args = get_ws_args_for (ws_stmt);
430 /* Return true if EXPR is variable sized. */
432 static inline bool
433 is_variable_sized (const_tree expr)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
438 /* Return true if DECL is a reference type. */
440 static inline bool
441 is_reference (tree decl)
443 return lang_hooks.decls.omp_privatize_by_reference (decl);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
450 static inline tree
451 lookup_decl (tree var, omp_context *ctx)
453 tree *n;
454 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
455 return *n;
458 static inline tree
459 maybe_lookup_decl (tree var, omp_context *ctx)
461 tree *n;
462 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
463 return n ? *n : NULL_TREE;
466 static inline tree
467 lookup_field (tree var, omp_context *ctx)
469 splay_tree_node n;
470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
471 return (tree) n->value;
474 static inline tree
475 maybe_lookup_field (tree var, omp_context *ctx)
477 splay_tree_node n;
478 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
479 return n ? (tree) n->value : NULL_TREE;
482 /* Return true if DECL should be copied by pointer. SHARED_P is true
483 if DECL is to be shared. */
485 static bool
486 use_pointer_for_field (const_tree decl, bool shared_p)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
489 return true;
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
493 if (shared_p)
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
500 return true;
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
507 return true;
509 /* Do not use copy-in/copy-out for variables that have their
510 address taken. */
511 if (TREE_ADDRESSABLE (decl))
512 return true;
515 return false;
518 /* Create a new VAR_DECL and copy information from VAR to it. */
520 tree
521 copy_var_decl (tree var, tree name, tree type)
523 tree copy = build_decl (VAR_DECL, name, type);
525 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
526 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
527 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
528 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
529 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
530 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
531 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
532 TREE_USED (copy) = 1;
533 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
535 return copy;
538 /* Construct a new automatic decl similar to VAR. */
540 static tree
541 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
543 tree copy = copy_var_decl (var, name, type);
545 DECL_CONTEXT (copy) = current_function_decl;
546 TREE_CHAIN (copy) = ctx->block_vars;
547 ctx->block_vars = copy;
549 return copy;
552 static tree
553 omp_copy_decl_1 (tree var, omp_context *ctx)
555 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
558 /* Build tree nodes to access the field for VAR on the receiver side. */
560 static tree
561 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
563 tree x, field = lookup_field (var, ctx);
565 /* If the receiver record type was remapped in the child function,
566 remap the field into the new record type. */
567 x = maybe_lookup_field (field, ctx);
568 if (x != NULL)
569 field = x;
571 x = build_fold_indirect_ref (ctx->receiver_decl);
572 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
573 if (by_ref)
574 x = build_fold_indirect_ref (x);
576 return x;
579 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
580 of a parallel, this is a component reference; for workshare constructs
581 this is some variable. */
583 static tree
584 build_outer_var_ref (tree var, omp_context *ctx)
586 tree x;
588 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
589 x = var;
590 else if (is_variable_sized (var))
592 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
593 x = build_outer_var_ref (x, ctx);
594 x = build_fold_indirect_ref (x);
596 else if (is_parallel_ctx (ctx))
598 bool by_ref = use_pointer_for_field (var, false);
599 x = build_receiver_ref (var, by_ref, ctx);
601 else if (ctx->outer)
602 x = lookup_decl (var, ctx->outer);
603 else if (is_reference (var))
604 /* This can happen with orphaned constructs. If var is reference, it is
605 possible it is shared and as such valid. */
606 x = var;
607 else
608 gcc_unreachable ();
610 if (is_reference (var))
611 x = build_fold_indirect_ref (x);
613 return x;
616 /* Build tree nodes to access the field for VAR on the sender side. */
618 static tree
619 build_sender_ref (tree var, omp_context *ctx)
621 tree field = lookup_field (var, ctx);
622 return build3 (COMPONENT_REF, TREE_TYPE (field),
623 ctx->sender_decl, field, NULL);
626 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
628 static void
629 install_var_field (tree var, bool by_ref, omp_context *ctx)
631 tree field, type;
633 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
635 type = TREE_TYPE (var);
636 if (by_ref)
637 type = build_pointer_type (type);
639 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
641 /* Remember what variable this field was created for. This does have a
642 side effect of making dwarf2out ignore this member, so for helpful
643 debugging we clear it later in delete_omp_context. */
644 DECL_ABSTRACT_ORIGIN (field) = var;
646 insert_field_into_struct (ctx->record_type, field);
648 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
649 (splay_tree_value) field);
652 static tree
653 install_var_local (tree var, omp_context *ctx)
655 tree new_var = omp_copy_decl_1 (var, ctx);
656 insert_decl_map (&ctx->cb, var, new_var);
657 return new_var;
660 /* Adjust the replacement for DECL in CTX for the new context. This means
661 copying the DECL_VALUE_EXPR, and fixing up the type. */
663 static void
664 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
666 tree new_decl, size;
668 new_decl = lookup_decl (decl, ctx);
670 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
672 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
673 && DECL_HAS_VALUE_EXPR_P (decl))
675 tree ve = DECL_VALUE_EXPR (decl);
676 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
677 SET_DECL_VALUE_EXPR (new_decl, ve);
678 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
681 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
683 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
684 if (size == error_mark_node)
685 size = TYPE_SIZE (TREE_TYPE (new_decl));
686 DECL_SIZE (new_decl) = size;
688 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
689 if (size == error_mark_node)
690 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
691 DECL_SIZE_UNIT (new_decl) = size;
695 /* The callback for remap_decl. Search all containing contexts for a
696 mapping of the variable; this avoids having to duplicate the splay
697 tree ahead of time. We know a mapping doesn't already exist in the
698 given context. Create new mappings to implement default semantics. */
700 static tree
701 omp_copy_decl (tree var, copy_body_data *cb)
703 omp_context *ctx = (omp_context *) cb;
704 tree new_var;
706 if (TREE_CODE (var) == LABEL_DECL)
708 new_var = create_artificial_label ();
709 DECL_CONTEXT (new_var) = current_function_decl;
710 insert_decl_map (&ctx->cb, var, new_var);
711 return new_var;
714 while (!is_parallel_ctx (ctx))
716 ctx = ctx->outer;
717 if (ctx == NULL)
718 return var;
719 new_var = maybe_lookup_decl (var, ctx);
720 if (new_var)
721 return new_var;
724 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
725 return var;
727 return error_mark_node;
731 /* Return the parallel region associated with STMT. */
733 /* Debugging dumps for parallel regions. */
734 void dump_omp_region (FILE *, struct omp_region *, int);
735 void debug_omp_region (struct omp_region *);
736 void debug_all_omp_regions (void);
738 /* Dump the parallel region tree rooted at REGION. */
740 void
741 dump_omp_region (FILE *file, struct omp_region *region, int indent)
743 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
744 tree_code_name[region->type]);
746 if (region->inner)
747 dump_omp_region (file, region->inner, indent + 4);
749 if (region->cont)
751 fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
752 region->cont->index);
755 if (region->exit)
756 fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
757 region->exit->index);
758 else
759 fprintf (file, "%*s[no exit marker]\n", indent, "");
761 if (region->next)
762 dump_omp_region (file, region->next, indent);
765 void
766 debug_omp_region (struct omp_region *region)
768 dump_omp_region (stderr, region, 0);
771 void
772 debug_all_omp_regions (void)
774 dump_omp_region (stderr, root_omp_region, 0);
778 /* Create a new parallel region starting at STMT inside region PARENT. */
780 struct omp_region *
781 new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
783 struct omp_region *region = xcalloc (1, sizeof (*region));
785 region->outer = parent;
786 region->entry = bb;
787 region->type = type;
789 if (parent)
791 /* This is a nested region. Add it to the list of inner
792 regions in PARENT. */
793 region->next = parent->inner;
794 parent->inner = region;
796 else
798 /* This is a toplevel region. Add it to the list of toplevel
799 regions in ROOT_OMP_REGION. */
800 region->next = root_omp_region;
801 root_omp_region = region;
804 return region;
807 /* Release the memory associated with the region tree rooted at REGION. */
809 static void
810 free_omp_region_1 (struct omp_region *region)
812 struct omp_region *i, *n;
814 for (i = region->inner; i ; i = n)
816 n = i->next;
817 free_omp_region_1 (i);
820 free (region);
823 /* Release the memory for the entire omp region tree. */
825 void
826 free_omp_regions (void)
828 struct omp_region *r, *n;
829 for (r = root_omp_region; r ; r = n)
831 n = r->next;
832 free_omp_region_1 (r);
834 root_omp_region = NULL;
838 /* Create a new context, with OUTER_CTX being the surrounding context. */
840 static omp_context *
841 new_omp_context (tree stmt, omp_context *outer_ctx)
843 omp_context *ctx = XCNEW (omp_context);
845 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
846 (splay_tree_value) ctx);
847 ctx->stmt = stmt;
849 if (outer_ctx)
851 ctx->outer = outer_ctx;
852 ctx->cb = outer_ctx->cb;
853 ctx->cb.block = NULL;
854 ctx->depth = outer_ctx->depth + 1;
856 else
858 ctx->cb.src_fn = current_function_decl;
859 ctx->cb.dst_fn = current_function_decl;
860 ctx->cb.src_node = cgraph_node (current_function_decl);
861 ctx->cb.dst_node = ctx->cb.src_node;
862 ctx->cb.src_cfun = cfun;
863 ctx->cb.copy_decl = omp_copy_decl;
864 ctx->cb.eh_region = -1;
865 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
866 ctx->depth = 1;
869 ctx->cb.decl_map = pointer_map_create ();
871 return ctx;
874 /* Destroy a omp_context data structures. Called through the splay tree
875 value delete callback. */
877 static void
878 delete_omp_context (splay_tree_value value)
880 omp_context *ctx = (omp_context *) value;
882 pointer_map_destroy (ctx->cb.decl_map);
884 if (ctx->field_map)
885 splay_tree_delete (ctx->field_map);
887 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
888 it produces corrupt debug information. */
889 if (ctx->record_type)
891 tree t;
892 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
893 DECL_ABSTRACT_ORIGIN (t) = NULL;
896 XDELETE (ctx);
899 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
900 context. */
902 static void
903 fixup_child_record_type (omp_context *ctx)
905 tree f, type = ctx->record_type;
907 /* ??? It isn't sufficient to just call remap_type here, because
908 variably_modified_type_p doesn't work the way we expect for
909 record types. Testing each field for whether it needs remapping
910 and creating a new record by hand works, however. */
911 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
912 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
913 break;
914 if (f)
916 tree name, new_fields = NULL;
918 type = lang_hooks.types.make_type (RECORD_TYPE);
919 name = DECL_NAME (TYPE_NAME (ctx->record_type));
920 name = build_decl (TYPE_DECL, name, type);
921 TYPE_NAME (type) = name;
923 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
925 tree new_f = copy_node (f);
926 DECL_CONTEXT (new_f) = type;
927 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
928 TREE_CHAIN (new_f) = new_fields;
929 new_fields = new_f;
931 /* Arrange to be able to look up the receiver field
932 given the sender field. */
933 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
934 (splay_tree_value) new_f);
936 TYPE_FIELDS (type) = nreverse (new_fields);
937 layout_type (type);
940 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
943 /* Instantiate decls as necessary in CTX to satisfy the data sharing
944 specified by CLAUSES. */
946 static void
947 scan_sharing_clauses (tree clauses, omp_context *ctx)
949 tree c, decl;
950 bool scan_array_reductions = false;
952 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
954 bool by_ref;
956 switch (OMP_CLAUSE_CODE (c))
958 case OMP_CLAUSE_PRIVATE:
959 decl = OMP_CLAUSE_DECL (c);
960 if (!is_variable_sized (decl))
961 install_var_local (decl, ctx);
962 break;
964 case OMP_CLAUSE_SHARED:
965 gcc_assert (is_parallel_ctx (ctx));
966 decl = OMP_CLAUSE_DECL (c);
967 gcc_assert (!is_variable_sized (decl));
968 by_ref = use_pointer_for_field (decl, true);
969 /* Global variables don't need to be copied,
970 the receiver side will use them directly. */
971 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
972 break;
973 if (! TREE_READONLY (decl)
974 || TREE_ADDRESSABLE (decl)
975 || by_ref
976 || is_reference (decl))
978 install_var_field (decl, by_ref, ctx);
979 install_var_local (decl, ctx);
980 break;
982 /* We don't need to copy const scalar vars back. */
983 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
984 goto do_private;
986 case OMP_CLAUSE_LASTPRIVATE:
987 /* Let the corresponding firstprivate clause create
988 the variable. */
989 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
990 break;
991 /* FALLTHRU */
993 case OMP_CLAUSE_FIRSTPRIVATE:
994 case OMP_CLAUSE_REDUCTION:
995 decl = OMP_CLAUSE_DECL (c);
996 do_private:
997 if (is_variable_sized (decl))
998 break;
999 else if (is_parallel_ctx (ctx)
1000 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
1001 ctx)))
1003 by_ref = use_pointer_for_field (decl, false);
1004 install_var_field (decl, by_ref, ctx);
1006 install_var_local (decl, ctx);
1007 break;
1009 case OMP_CLAUSE_COPYPRIVATE:
1010 if (ctx->outer)
1011 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
1012 /* FALLTHRU */
1014 case OMP_CLAUSE_COPYIN:
1015 decl = OMP_CLAUSE_DECL (c);
1016 by_ref = use_pointer_for_field (decl, false);
1017 install_var_field (decl, by_ref, ctx);
1018 break;
1020 case OMP_CLAUSE_DEFAULT:
1021 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1022 break;
1024 case OMP_CLAUSE_IF:
1025 case OMP_CLAUSE_NUM_THREADS:
1026 case OMP_CLAUSE_SCHEDULE:
1027 if (ctx->outer)
1028 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1029 break;
1031 case OMP_CLAUSE_NOWAIT:
1032 case OMP_CLAUSE_ORDERED:
1033 break;
1035 default:
1036 gcc_unreachable ();
1040 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1042 switch (OMP_CLAUSE_CODE (c))
1044 case OMP_CLAUSE_LASTPRIVATE:
1045 /* Let the corresponding firstprivate clause create
1046 the variable. */
1047 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1048 break;
1049 /* FALLTHRU */
1051 case OMP_CLAUSE_PRIVATE:
1052 case OMP_CLAUSE_FIRSTPRIVATE:
1053 case OMP_CLAUSE_REDUCTION:
1054 decl = OMP_CLAUSE_DECL (c);
1055 if (is_variable_sized (decl))
1056 install_var_local (decl, ctx);
1057 fixup_remapped_decl (decl, ctx,
1058 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1059 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1060 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1061 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1062 scan_array_reductions = true;
1063 break;
1065 case OMP_CLAUSE_SHARED:
1066 decl = OMP_CLAUSE_DECL (c);
1067 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1068 fixup_remapped_decl (decl, ctx, false);
1069 break;
1071 case OMP_CLAUSE_COPYPRIVATE:
1072 case OMP_CLAUSE_COPYIN:
1073 case OMP_CLAUSE_DEFAULT:
1074 case OMP_CLAUSE_IF:
1075 case OMP_CLAUSE_NUM_THREADS:
1076 case OMP_CLAUSE_SCHEDULE:
1077 case OMP_CLAUSE_NOWAIT:
1078 case OMP_CLAUSE_ORDERED:
1079 break;
1081 default:
1082 gcc_unreachable ();
1086 if (scan_array_reductions)
1087 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1088 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1089 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1091 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1092 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1096 /* Create a new name for omp child function. Returns an identifier. */
1098 static GTY(()) unsigned int tmp_ompfn_id_num;
1100 static tree
1101 create_omp_child_function_name (void)
1103 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1104 size_t len = IDENTIFIER_LENGTH (name);
1105 char *tmp_name, *prefix;
1107 prefix = alloca (len + sizeof ("_omp_fn"));
1108 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1109 strcpy (prefix + len, "_omp_fn");
1110 #ifndef NO_DOT_IN_LABEL
1111 prefix[len] = '.';
1112 #elif !defined NO_DOLLAR_IN_LABEL
1113 prefix[len] = '$';
1114 #endif
1115 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1116 return get_identifier (tmp_name);
1119 /* Build a decl for the omp child function. It'll not contain a body
1120 yet, just the bare decl. */
1122 static void
1123 create_omp_child_function (omp_context *ctx)
1125 tree decl, type, name, t;
1127 name = create_omp_child_function_name ();
1128 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1130 decl = build_decl (FUNCTION_DECL, name, type);
1131 decl = lang_hooks.decls.pushdecl (decl);
1133 ctx->cb.dst_fn = decl;
1135 TREE_STATIC (decl) = 1;
1136 TREE_USED (decl) = 1;
1137 DECL_ARTIFICIAL (decl) = 1;
1138 DECL_IGNORED_P (decl) = 0;
1139 TREE_PUBLIC (decl) = 0;
1140 DECL_UNINLINABLE (decl) = 1;
1141 DECL_EXTERNAL (decl) = 0;
1142 DECL_CONTEXT (decl) = NULL_TREE;
1143 DECL_INITIAL (decl) = make_node (BLOCK);
1145 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1146 DECL_ARTIFICIAL (t) = 1;
1147 DECL_IGNORED_P (t) = 1;
1148 DECL_RESULT (decl) = t;
1150 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1151 DECL_ARTIFICIAL (t) = 1;
1152 DECL_ARG_TYPE (t) = ptr_type_node;
1153 DECL_CONTEXT (t) = current_function_decl;
1154 TREE_USED (t) = 1;
1155 DECL_ARGUMENTS (decl) = t;
1156 ctx->receiver_decl = t;
1158 /* Allocate memory for the function structure. The call to
1159 allocate_struct_function clobbers CFUN, so we need to restore
1160 it afterward. */
1161 push_struct_function (decl);
1162 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1163 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1164 pop_cfun ();
1168 /* Scan an OpenMP parallel directive. */
1170 static void
1171 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1173 omp_context *ctx;
1174 tree name;
1176 /* Ignore parallel directives with empty bodies, unless there
1177 are copyin clauses. */
1178 if (optimize > 0
1179 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1180 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1182 *stmt_p = build_empty_stmt ();
1183 return;
1186 ctx = new_omp_context (*stmt_p, outer_ctx);
1187 if (parallel_nesting_level > 1)
1188 ctx->is_nested = true;
1189 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1190 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1191 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1192 name = create_tmp_var_name (".omp_data_s");
1193 name = build_decl (TYPE_DECL, name, ctx->record_type);
1194 TYPE_NAME (ctx->record_type) = name;
1195 create_omp_child_function (ctx);
1196 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1198 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1199 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1201 if (TYPE_FIELDS (ctx->record_type) == NULL)
1202 ctx->record_type = ctx->receiver_decl = NULL;
1203 else
1205 layout_type (ctx->record_type);
1206 fixup_child_record_type (ctx);
1211 /* Scan an OpenMP loop directive. */
1213 static void
1214 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1216 omp_context *ctx;
1217 tree stmt;
1219 stmt = *stmt_p;
1220 ctx = new_omp_context (stmt, outer_ctx);
1222 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1224 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1225 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1226 scan_omp (&OMP_FOR_COND (stmt), ctx);
1227 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1228 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1231 /* Scan an OpenMP sections directive. */
1233 static void
1234 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1236 tree stmt;
1237 omp_context *ctx;
1239 stmt = *stmt_p;
1240 ctx = new_omp_context (stmt, outer_ctx);
1241 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1242 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1245 /* Scan an OpenMP single directive. */
1247 static void
1248 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1250 tree stmt = *stmt_p;
1251 omp_context *ctx;
1252 tree name;
1254 ctx = new_omp_context (stmt, outer_ctx);
1255 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1256 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1257 name = create_tmp_var_name (".omp_copy_s");
1258 name = build_decl (TYPE_DECL, name, ctx->record_type);
1259 TYPE_NAME (ctx->record_type) = name;
1261 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1262 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1264 if (TYPE_FIELDS (ctx->record_type) == NULL)
1265 ctx->record_type = NULL;
1266 else
1267 layout_type (ctx->record_type);
1271 /* Check OpenMP nesting restrictions. */
1272 static void
1273 check_omp_nesting_restrictions (tree t, omp_context *ctx)
1275 switch (TREE_CODE (t))
1277 case OMP_FOR:
1278 case OMP_SECTIONS:
1279 case OMP_SINGLE:
1280 for (; ctx != NULL; ctx = ctx->outer)
1281 switch (TREE_CODE (ctx->stmt))
1283 case OMP_FOR:
1284 case OMP_SECTIONS:
1285 case OMP_SINGLE:
1286 case OMP_ORDERED:
1287 case OMP_MASTER:
1288 warning (0, "work-sharing region may not be closely nested inside "
1289 "of work-sharing, critical, ordered or master region");
1290 return;
1291 case OMP_PARALLEL:
1292 return;
1293 default:
1294 break;
1296 break;
1297 case OMP_MASTER:
1298 for (; ctx != NULL; ctx = ctx->outer)
1299 switch (TREE_CODE (ctx->stmt))
1301 case OMP_FOR:
1302 case OMP_SECTIONS:
1303 case OMP_SINGLE:
1304 warning (0, "master region may not be closely nested inside "
1305 "of work-sharing region");
1306 return;
1307 case OMP_PARALLEL:
1308 return;
1309 default:
1310 break;
1312 break;
1313 case OMP_ORDERED:
1314 for (; ctx != NULL; ctx = ctx->outer)
1315 switch (TREE_CODE (ctx->stmt))
1317 case OMP_CRITICAL:
1318 warning (0, "ordered region may not be closely nested inside "
1319 "of critical region");
1320 return;
1321 case OMP_FOR:
1322 if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
1323 OMP_CLAUSE_ORDERED) == NULL)
1324 warning (0, "ordered region must be closely nested inside "
1325 "a loop region with an ordered clause");
1326 return;
1327 case OMP_PARALLEL:
1328 return;
1329 default:
1330 break;
1332 break;
1333 case OMP_CRITICAL:
1334 for (; ctx != NULL; ctx = ctx->outer)
1335 if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
1336 && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
1338 warning (0, "critical region may not be nested inside a critical "
1339 "region with the same name");
1340 return;
1342 break;
1343 default:
1344 break;
1349 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1351 static tree
1352 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1354 struct walk_stmt_info *wi = data;
1355 omp_context *ctx = wi->info;
1356 tree t = *tp;
1358 if (EXPR_HAS_LOCATION (t))
1359 input_location = EXPR_LOCATION (t);
1361 /* Check the OpenMP nesting restrictions. */
1362 if (OMP_DIRECTIVE_P (t) && ctx != NULL)
1363 check_omp_nesting_restrictions (t, ctx);
1365 *walk_subtrees = 0;
1366 switch (TREE_CODE (t))
1368 case OMP_PARALLEL:
1369 parallel_nesting_level++;
1370 scan_omp_parallel (tp, ctx);
1371 parallel_nesting_level--;
1372 break;
1374 case OMP_FOR:
1375 scan_omp_for (tp, ctx);
1376 break;
1378 case OMP_SECTIONS:
1379 scan_omp_sections (tp, ctx);
1380 break;
1382 case OMP_SINGLE:
1383 scan_omp_single (tp, ctx);
1384 break;
1386 case OMP_SECTION:
1387 case OMP_MASTER:
1388 case OMP_ORDERED:
1389 case OMP_CRITICAL:
1390 ctx = new_omp_context (*tp, ctx);
1391 scan_omp (&OMP_BODY (*tp), ctx);
1392 break;
1394 case BIND_EXPR:
1396 tree var;
1397 *walk_subtrees = 1;
1399 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1400 insert_decl_map (&ctx->cb, var, var);
1402 break;
1404 case VAR_DECL:
1405 case PARM_DECL:
1406 case LABEL_DECL:
1407 case RESULT_DECL:
1408 if (ctx)
1409 *tp = remap_decl (t, &ctx->cb);
1410 break;
1412 default:
1413 if (ctx && TYPE_P (t))
1414 *tp = remap_type (t, &ctx->cb);
1415 else if (!DECL_P (t))
1416 *walk_subtrees = 1;
1417 break;
1420 return NULL_TREE;
1424 /* Scan all the statements starting at STMT_P. CTX contains context
1425 information about the OpenMP directives and clauses found during
1426 the scan. */
1428 static void
1429 scan_omp (tree *stmt_p, omp_context *ctx)
1431 location_t saved_location;
1432 struct walk_stmt_info wi;
1434 memset (&wi, 0, sizeof (wi));
1435 wi.callback = scan_omp_1;
1436 wi.info = ctx;
1437 wi.want_bind_expr = (ctx != NULL);
1438 wi.want_locations = true;
1440 saved_location = input_location;
1441 walk_stmts (&wi, stmt_p);
1442 input_location = saved_location;
1445 /* Re-gimplification and code generation routines. */
1447 /* Build a call to GOMP_barrier. */
1449 static tree
1450 build_omp_barrier (void)
1452 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1455 /* If a context was created for STMT when it was scanned, return it. */
1457 static omp_context *
1458 maybe_lookup_ctx (tree stmt)
1460 splay_tree_node n;
1461 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1462 return n ? (omp_context *) n->value : NULL;
1466 /* Find the mapping for DECL in CTX or the immediately enclosing
1467 context that has a mapping for DECL.
1469 If CTX is a nested parallel directive, we may have to use the decl
1470 mappings created in CTX's parent context. Suppose that we have the
1471 following parallel nesting (variable UIDs showed for clarity):
1473 iD.1562 = 0;
1474 #omp parallel shared(iD.1562) -> outer parallel
1475 iD.1562 = iD.1562 + 1;
1477 #omp parallel shared (iD.1562) -> inner parallel
1478 iD.1562 = iD.1562 - 1;
1480 Each parallel structure will create a distinct .omp_data_s structure
1481 for copying iD.1562 in/out of the directive:
1483 outer parallel .omp_data_s.1.i -> iD.1562
1484 inner parallel .omp_data_s.2.i -> iD.1562
1486 A shared variable mapping will produce a copy-out operation before
1487 the parallel directive and a copy-in operation after it. So, in
1488 this case we would have:
1490 iD.1562 = 0;
1491 .omp_data_o.1.i = iD.1562;
1492 #omp parallel shared(iD.1562) -> outer parallel
1493 .omp_data_i.1 = &.omp_data_o.1
1494 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1496 .omp_data_o.2.i = iD.1562; -> **
1497 #omp parallel shared(iD.1562) -> inner parallel
1498 .omp_data_i.2 = &.omp_data_o.2
1499 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1502 ** This is a problem. The symbol iD.1562 cannot be referenced
1503 inside the body of the outer parallel region. But since we are
1504 emitting this copy operation while expanding the inner parallel
1505 directive, we need to access the CTX structure of the outer
1506 parallel directive to get the correct mapping:
1508 .omp_data_o.2.i = .omp_data_i.1->i
1510 Since there may be other workshare or parallel directives enclosing
1511 the parallel directive, it may be necessary to walk up the context
1512 parent chain. This is not a problem in general because nested
1513 parallelism happens only rarely. */
1515 static tree
1516 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1518 tree t;
1519 omp_context *up;
1521 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1522 t = maybe_lookup_decl (decl, up);
1524 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
1526 return t ? t : decl;
1530 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1531 in outer contexts. */
1533 static tree
1534 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1536 tree t = NULL;
1537 omp_context *up;
1539 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1540 t = maybe_lookup_decl (decl, up);
1542 return t ? t : decl;
1546 /* Construct the initialization value for reduction CLAUSE. */
1548 tree
1549 omp_reduction_init (tree clause, tree type)
1551 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1553 case PLUS_EXPR:
1554 case MINUS_EXPR:
1555 case BIT_IOR_EXPR:
1556 case BIT_XOR_EXPR:
1557 case TRUTH_OR_EXPR:
1558 case TRUTH_ORIF_EXPR:
1559 case TRUTH_XOR_EXPR:
1560 case NE_EXPR:
1561 return fold_convert (type, integer_zero_node);
1563 case MULT_EXPR:
1564 case TRUTH_AND_EXPR:
1565 case TRUTH_ANDIF_EXPR:
1566 case EQ_EXPR:
1567 return fold_convert (type, integer_one_node);
1569 case BIT_AND_EXPR:
1570 return fold_convert (type, integer_minus_one_node);
1572 case MAX_EXPR:
1573 if (SCALAR_FLOAT_TYPE_P (type))
1575 REAL_VALUE_TYPE max, min;
1576 if (HONOR_INFINITIES (TYPE_MODE (type)))
1578 real_inf (&max);
1579 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1581 else
1582 real_maxval (&min, 1, TYPE_MODE (type));
1583 return build_real (type, min);
1585 else
1587 gcc_assert (INTEGRAL_TYPE_P (type));
1588 return TYPE_MIN_VALUE (type);
1591 case MIN_EXPR:
1592 if (SCALAR_FLOAT_TYPE_P (type))
1594 REAL_VALUE_TYPE max;
1595 if (HONOR_INFINITIES (TYPE_MODE (type)))
1596 real_inf (&max);
1597 else
1598 real_maxval (&max, 0, TYPE_MODE (type));
1599 return build_real (type, max);
1601 else
1603 gcc_assert (INTEGRAL_TYPE_P (type));
1604 return TYPE_MAX_VALUE (type);
1607 default:
1608 gcc_unreachable ();
1612 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1613 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1614 private variables. Initialization statements go in ILIST, while calls
1615 to destructors go in DLIST. */
1617 static void
1618 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1619 omp_context *ctx)
1621 tree_stmt_iterator diter;
1622 tree c, dtor, copyin_seq, x, ptr;
1623 bool copyin_by_ref = false;
1624 bool lastprivate_firstprivate = false;
1625 int pass;
1627 *dlist = alloc_stmt_list ();
1628 diter = tsi_start (*dlist);
1629 copyin_seq = NULL;
1631 /* Do all the fixed sized types in the first pass, and the variable sized
1632 types in the second pass. This makes sure that the scalar arguments to
1633 the variable sized types are processed before we use them in the
1634 variable sized operations. */
1635 for (pass = 0; pass < 2; ++pass)
1637 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1639 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1640 tree var, new_var;
1641 bool by_ref;
1643 switch (c_kind)
1645 case OMP_CLAUSE_PRIVATE:
1646 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1647 continue;
1648 break;
1649 case OMP_CLAUSE_SHARED:
1650 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
1652 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
1653 continue;
1655 case OMP_CLAUSE_FIRSTPRIVATE:
1656 case OMP_CLAUSE_COPYIN:
1657 case OMP_CLAUSE_REDUCTION:
1658 break;
1659 case OMP_CLAUSE_LASTPRIVATE:
1660 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1662 lastprivate_firstprivate = true;
1663 if (pass != 0)
1664 continue;
1666 break;
1667 default:
1668 continue;
1671 new_var = var = OMP_CLAUSE_DECL (c);
1672 if (c_kind != OMP_CLAUSE_COPYIN)
1673 new_var = lookup_decl (var, ctx);
1675 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1677 if (pass != 0)
1678 continue;
1680 else if (is_variable_sized (var))
1682 /* For variable sized types, we need to allocate the
1683 actual storage here. Call alloca and store the
1684 result in the pointer decl that we created elsewhere. */
1685 if (pass == 0)
1686 continue;
1688 ptr = DECL_VALUE_EXPR (new_var);
1689 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1690 ptr = TREE_OPERAND (ptr, 0);
1691 gcc_assert (DECL_P (ptr));
1693 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1694 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1695 x = fold_convert (TREE_TYPE (ptr), x);
1696 x = build_gimple_modify_stmt (ptr, x);
1697 gimplify_and_add (x, ilist);
1699 else if (is_reference (var))
1701 /* For references that are being privatized for Fortran,
1702 allocate new backing storage for the new pointer
1703 variable. This allows us to avoid changing all the
1704 code that expects a pointer to something that expects
1705 a direct variable. Note that this doesn't apply to
1706 C++, since reference types are disallowed in data
1707 sharing clauses there, except for NRV optimized
1708 return values. */
1709 if (pass == 0)
1710 continue;
1712 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1713 if (TREE_CONSTANT (x))
1715 const char *name = NULL;
1716 if (DECL_NAME (var))
1717 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1719 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
1720 name);
1721 gimple_add_tmp_var (x);
1722 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1724 else
1726 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1727 x = fold_convert (TREE_TYPE (new_var), x);
1730 x = build_gimple_modify_stmt (new_var, x);
1731 gimplify_and_add (x, ilist);
1733 new_var = build_fold_indirect_ref (new_var);
1735 else if (c_kind == OMP_CLAUSE_REDUCTION
1736 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1738 if (pass == 0)
1739 continue;
1741 else if (pass != 0)
1742 continue;
1744 switch (OMP_CLAUSE_CODE (c))
1746 case OMP_CLAUSE_SHARED:
1747 /* Shared global vars are just accessed directly. */
1748 if (is_global_var (new_var))
1749 break;
1750 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1751 needs to be delayed until after fixup_child_record_type so
1752 that we get the correct type during the dereference. */
1753 by_ref = use_pointer_for_field (var, true);
1754 x = build_receiver_ref (var, by_ref, ctx);
1755 SET_DECL_VALUE_EXPR (new_var, x);
1756 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1758 /* ??? If VAR is not passed by reference, and the variable
1759 hasn't been initialized yet, then we'll get a warning for
1760 the store into the omp_data_s structure. Ideally, we'd be
1761 able to notice this and not store anything at all, but
1762 we're generating code too early. Suppress the warning. */
1763 if (!by_ref)
1764 TREE_NO_WARNING (var) = 1;
1765 break;
1767 case OMP_CLAUSE_LASTPRIVATE:
1768 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1769 break;
1770 /* FALLTHRU */
1772 case OMP_CLAUSE_PRIVATE:
1773 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1774 if (x)
1775 gimplify_and_add (x, ilist);
1776 /* FALLTHRU */
1778 do_dtor:
1779 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1780 if (x)
1782 dtor = x;
1783 gimplify_stmt (&dtor);
1784 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1786 break;
1788 case OMP_CLAUSE_FIRSTPRIVATE:
1789 x = build_outer_var_ref (var, ctx);
1790 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1791 gimplify_and_add (x, ilist);
1792 goto do_dtor;
1793 break;
1795 case OMP_CLAUSE_COPYIN:
1796 by_ref = use_pointer_for_field (var, false);
1797 x = build_receiver_ref (var, by_ref, ctx);
1798 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1799 append_to_statement_list (x, &copyin_seq);
1800 copyin_by_ref |= by_ref;
1801 break;
1803 case OMP_CLAUSE_REDUCTION:
1804 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1806 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1807 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1809 else
1811 x = omp_reduction_init (c, TREE_TYPE (new_var));
1812 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1813 x = build_gimple_modify_stmt (new_var, x);
1814 gimplify_and_add (x, ilist);
1816 break;
1818 default:
1819 gcc_unreachable ();
1824 /* The copyin sequence is not to be executed by the main thread, since
1825 that would result in self-copies. Perhaps not visible to scalars,
1826 but it certainly is to C++ operator=. */
1827 if (copyin_seq)
1829 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1830 x = build2 (NE_EXPR, boolean_type_node, x,
1831 build_int_cst (TREE_TYPE (x), 0));
1832 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1833 gimplify_and_add (x, ilist);
1836 /* If any copyin variable is passed by reference, we must ensure the
1837 master thread doesn't modify it before it is copied over in all
1838 threads. Similarly for variables in both firstprivate and
1839 lastprivate clauses we need to ensure the lastprivate copying
1840 happens after firstprivate copying in all threads. */
1841 if (copyin_by_ref || lastprivate_firstprivate)
1842 gimplify_and_add (build_omp_barrier (), ilist);
1846 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1847 both parallel and workshare constructs. PREDICATE may be NULL if it's
1848 always true. */
1850 static void
1851 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1852 omp_context *ctx)
1854 tree sub_list, x, c;
1856 /* Early exit if there are no lastprivate clauses. */
1857 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1858 if (clauses == NULL)
1860 /* If this was a workshare clause, see if it had been combined
1861 with its parallel. In that case, look for the clauses on the
1862 parallel statement itself. */
1863 if (is_parallel_ctx (ctx))
1864 return;
1866 ctx = ctx->outer;
1867 if (ctx == NULL || !is_parallel_ctx (ctx))
1868 return;
1870 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1871 OMP_CLAUSE_LASTPRIVATE);
1872 if (clauses == NULL)
1873 return;
1876 sub_list = alloc_stmt_list ();
1878 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1880 tree var, new_var;
1882 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1883 continue;
1885 var = OMP_CLAUSE_DECL (c);
1886 new_var = lookup_decl (var, ctx);
1888 x = build_outer_var_ref (var, ctx);
1889 if (is_reference (var))
1890 new_var = build_fold_indirect_ref (new_var);
1891 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1892 append_to_statement_list (x, &sub_list);
1895 if (predicate)
1896 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1897 else
1898 x = sub_list;
1900 gimplify_and_add (x, stmt_list);
1904 /* Generate code to implement the REDUCTION clauses. */
1906 static void
1907 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1909 tree sub_list = NULL, x, c;
1910 int count = 0;
1912 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1913 update in that case, otherwise use a lock. */
1914 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1915 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1917 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1919 /* Never use OMP_ATOMIC for array reductions. */
1920 count = -1;
1921 break;
1923 count++;
1926 if (count == 0)
1927 return;
1929 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1931 tree var, ref, new_var;
1932 enum tree_code code;
1934 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1935 continue;
1937 var = OMP_CLAUSE_DECL (c);
1938 new_var = lookup_decl (var, ctx);
1939 if (is_reference (var))
1940 new_var = build_fold_indirect_ref (new_var);
1941 ref = build_outer_var_ref (var, ctx);
1942 code = OMP_CLAUSE_REDUCTION_CODE (c);
1944 /* reduction(-:var) sums up the partial results, so it acts
1945 identically to reduction(+:var). */
1946 if (code == MINUS_EXPR)
1947 code = PLUS_EXPR;
1949 if (count == 1)
1951 tree addr = build_fold_addr_expr (ref);
1953 addr = save_expr (addr);
1954 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1955 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1956 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1957 gimplify_and_add (x, stmt_list);
1958 return;
1961 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1963 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1965 if (is_reference (var))
1966 ref = build_fold_addr_expr (ref);
1967 SET_DECL_VALUE_EXPR (placeholder, ref);
1968 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1969 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
1970 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
1971 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
1973 else
1975 x = build2 (code, TREE_TYPE (ref), ref, new_var);
1976 ref = build_outer_var_ref (var, ctx);
1977 x = build_gimple_modify_stmt (ref, x);
1978 append_to_statement_list (x, &sub_list);
1982 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
1983 gimplify_and_add (x, stmt_list);
1985 gimplify_and_add (sub_list, stmt_list);
1987 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
1988 gimplify_and_add (x, stmt_list);
1992 /* Generate code to implement the COPYPRIVATE clauses. */
1994 static void
1995 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
1996 omp_context *ctx)
1998 tree c;
2000 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2002 tree var, ref, x;
2003 bool by_ref;
2005 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2006 continue;
2008 var = OMP_CLAUSE_DECL (c);
2009 by_ref = use_pointer_for_field (var, false);
2011 ref = build_sender_ref (var, ctx);
2012 x = lookup_decl_in_outer_ctx (var, ctx);
2013 x = by_ref ? build_fold_addr_expr (x) : x;
2014 x = build_gimple_modify_stmt (ref, x);
2015 gimplify_and_add (x, slist);
2017 ref = build_receiver_ref (var, by_ref, ctx);
2018 if (is_reference (var))
2020 ref = build_fold_indirect_ref (ref);
2021 var = build_fold_indirect_ref (var);
2023 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2024 gimplify_and_add (x, rlist);
2029 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2030 and REDUCTION from the sender (aka parent) side. */
2032 static void
2033 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
2035 tree c;
2037 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2039 tree val, ref, x, var;
2040 bool by_ref, do_in = false, do_out = false;
2042 switch (OMP_CLAUSE_CODE (c))
2044 case OMP_CLAUSE_FIRSTPRIVATE:
2045 case OMP_CLAUSE_COPYIN:
2046 case OMP_CLAUSE_LASTPRIVATE:
2047 case OMP_CLAUSE_REDUCTION:
2048 break;
2049 default:
2050 continue;
2053 val = OMP_CLAUSE_DECL (c);
2054 var = lookup_decl_in_outer_ctx (val, ctx);
2056 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2057 && is_global_var (var))
2058 continue;
2059 if (is_variable_sized (val))
2060 continue;
2061 by_ref = use_pointer_for_field (val, false);
2063 switch (OMP_CLAUSE_CODE (c))
2065 case OMP_CLAUSE_FIRSTPRIVATE:
2066 case OMP_CLAUSE_COPYIN:
2067 do_in = true;
2068 break;
2070 case OMP_CLAUSE_LASTPRIVATE:
2071 if (by_ref || is_reference (val))
2073 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2074 continue;
2075 do_in = true;
2077 else
2078 do_out = true;
2079 break;
2081 case OMP_CLAUSE_REDUCTION:
2082 do_in = true;
2083 do_out = !(by_ref || is_reference (val));
2084 break;
2086 default:
2087 gcc_unreachable ();
2090 if (do_in)
2092 ref = build_sender_ref (val, ctx);
2093 x = by_ref ? build_fold_addr_expr (var) : var;
2094 x = build_gimple_modify_stmt (ref, x);
2095 gimplify_and_add (x, ilist);
2098 if (do_out)
2100 ref = build_sender_ref (val, ctx);
2101 x = build_gimple_modify_stmt (var, ref);
2102 gimplify_and_add (x, olist);
2107 /* Generate code to implement SHARED from the sender (aka parent) side.
2108 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2109 got automatically shared. */
2111 static void
2112 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
2114 tree var, ovar, nvar, f, x;
2116 if (ctx->record_type == NULL)
2117 return;
2119 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
2121 ovar = DECL_ABSTRACT_ORIGIN (f);
2122 nvar = maybe_lookup_decl (ovar, ctx);
2123 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2124 continue;
2126 /* If CTX is a nested parallel directive. Find the immediately
2127 enclosing parallel or workshare construct that contains a
2128 mapping for OVAR. */
2129 var = lookup_decl_in_outer_ctx (ovar, ctx);
2131 if (use_pointer_for_field (ovar, true))
2133 x = build_sender_ref (ovar, ctx);
2134 var = build_fold_addr_expr (var);
2135 x = build_gimple_modify_stmt (x, var);
2136 gimplify_and_add (x, ilist);
2138 else
2140 x = build_sender_ref (ovar, ctx);
2141 x = build_gimple_modify_stmt (x, var);
2142 gimplify_and_add (x, ilist);
2144 x = build_sender_ref (ovar, ctx);
2145 x = build_gimple_modify_stmt (var, x);
2146 gimplify_and_add (x, olist);
2151 /* Build the function calls to GOMP_parallel_start etc to actually
2152 generate the parallel operation. REGION is the parallel region
2153 being expanded. BB is the block where to insert the code. WS_ARGS
2154 will be set if this is a call to a combined parallel+workshare
2155 construct, it contains the list of additional arguments needed by
2156 the workshare construct. */
2158 static void
2159 expand_parallel_call (struct omp_region *region, basic_block bb,
2160 tree entry_stmt, tree ws_args)
2162 tree t, t1, t2, val, cond, c, clauses;
2163 block_stmt_iterator si;
2164 int start_ix;
2166 clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
2168 /* Determine what flavor of GOMP_parallel_start we will be
2169 emitting. */
2170 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2171 if (is_combined_parallel (region))
2173 switch (region->inner->type)
2175 case OMP_FOR:
2176 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2177 + region->inner->sched_kind;
2178 break;
2179 case OMP_SECTIONS:
2180 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2181 break;
2182 default:
2183 gcc_unreachable ();
2187 /* By default, the value of NUM_THREADS is zero (selected at run time)
2188 and there is no conditional. */
2189 cond = NULL_TREE;
2190 val = build_int_cst (unsigned_type_node, 0);
2192 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2193 if (c)
2194 cond = OMP_CLAUSE_IF_EXPR (c);
2196 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2197 if (c)
2198 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2200 /* Ensure 'val' is of the correct type. */
2201 val = fold_convert (unsigned_type_node, val);
2203 /* If we found the clause 'if (cond)', build either
2204 (cond != 0) or (cond ? val : 1u). */
2205 if (cond)
2207 block_stmt_iterator si;
2209 cond = gimple_boolify (cond);
2211 if (integer_zerop (val))
2212 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2213 build_int_cst (TREE_TYPE (cond), 0));
2214 else
2216 basic_block cond_bb, then_bb, else_bb;
2217 edge e, e_then, e_else;
2218 tree t, tmp_then, tmp_else, tmp_join, tmp_var;
2220 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2221 if (gimple_in_ssa_p (cfun))
2223 tmp_then = make_ssa_name (tmp_var, NULL_TREE);
2224 tmp_else = make_ssa_name (tmp_var, NULL_TREE);
2225 tmp_join = make_ssa_name (tmp_var, NULL_TREE);
2227 else
2229 tmp_then = tmp_var;
2230 tmp_else = tmp_var;
2231 tmp_join = tmp_var;
2234 e = split_block (bb, NULL);
2235 cond_bb = e->src;
2236 bb = e->dest;
2237 remove_edge (e);
2239 then_bb = create_empty_bb (cond_bb);
2240 else_bb = create_empty_bb (then_bb);
2241 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2242 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2244 t = build3 (COND_EXPR, void_type_node,
2245 cond, NULL_TREE, NULL_TREE);
2247 si = bsi_start (cond_bb);
2248 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2250 si = bsi_start (then_bb);
2251 t = build_gimple_modify_stmt (tmp_then, val);
2252 if (gimple_in_ssa_p (cfun))
2253 SSA_NAME_DEF_STMT (tmp_then) = t;
2254 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2256 si = bsi_start (else_bb);
2257 t = build_gimple_modify_stmt (tmp_else,
2258 build_int_cst (unsigned_type_node, 1));
2259 if (gimple_in_ssa_p (cfun))
2260 SSA_NAME_DEF_STMT (tmp_else) = t;
2261 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2263 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2264 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2265 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2266 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2268 if (gimple_in_ssa_p (cfun))
2270 tree phi = create_phi_node (tmp_join, bb);
2271 SSA_NAME_DEF_STMT (tmp_join) = phi;
2272 add_phi_arg (phi, tmp_then, e_then);
2273 add_phi_arg (phi, tmp_else, e_else);
2276 val = tmp_join;
2279 si = bsi_start (bb);
2280 val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
2281 false, BSI_CONTINUE_LINKING);
2284 si = bsi_last (bb);
2285 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2286 if (t == NULL)
2287 t1 = null_pointer_node;
2288 else
2289 t1 = build_fold_addr_expr (t);
2290 t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
2292 if (ws_args)
2294 tree args = tree_cons (NULL, t2,
2295 tree_cons (NULL, t1,
2296 tree_cons (NULL, val, ws_args)));
2297 t = build_function_call_expr (built_in_decls[start_ix], args);
2299 else
2300 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2302 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2303 false, BSI_CONTINUE_LINKING);
2305 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2306 if (t == NULL)
2307 t = null_pointer_node;
2308 else
2309 t = build_fold_addr_expr (t);
2310 t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
2311 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2312 false, BSI_CONTINUE_LINKING);
2314 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
2315 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2316 false, BSI_CONTINUE_LINKING);
2320 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2321 handler. This prevents programs from violating the structured
2322 block semantics with throws. */
2324 static void
2325 maybe_catch_exception (tree *stmt_p)
2327 tree f, t;
2329 if (!flag_exceptions)
2330 return;
2332 if (lang_protect_cleanup_actions)
2333 t = lang_protect_cleanup_actions ();
2334 else
2335 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
2336 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2337 EH_FILTER_MUST_NOT_THROW (f) = 1;
2338 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2340 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2341 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2343 *stmt_p = NULL;
2344 append_to_statement_list (t, stmt_p);
2347 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2349 static tree
2350 list2chain (tree list)
2352 tree t;
2354 for (t = list; t; t = TREE_CHAIN (t))
2356 tree var = TREE_VALUE (t);
2357 if (TREE_CHAIN (t))
2358 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
2359 else
2360 TREE_CHAIN (var) = NULL_TREE;
2363 return list ? TREE_VALUE (list) : NULL_TREE;
2367 /* Remove barriers in REGION->EXIT's block. Note that this is only
2368 valid for OMP_PARALLEL regions. Since the end of a parallel region
2369 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2370 left a barrier at the end of the OMP_PARALLEL region can now be
2371 removed. */
2373 static void
2374 remove_exit_barrier (struct omp_region *region)
2376 block_stmt_iterator si;
2377 basic_block exit_bb;
2378 edge_iterator ei;
2379 edge e;
2380 tree t;
2382 exit_bb = region->exit;
2384 /* If the parallel region doesn't return, we don't have REGION->EXIT
2385 block at all. */
2386 if (! exit_bb)
2387 return;
2389 /* The last insn in the block will be the parallel's OMP_RETURN. The
2390 workshare's OMP_RETURN will be in a preceding block. The kinds of
2391 statements that can appear in between are extremely limited -- no
2392 memory operations at all. Here, we allow nothing at all, so the
2393 only thing we allow to precede this OMP_RETURN is a label. */
2394 si = bsi_last (exit_bb);
2395 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2396 bsi_prev (&si);
2397 if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
2398 return;
2400 FOR_EACH_EDGE (e, ei, exit_bb->preds)
2402 si = bsi_last (e->src);
2403 if (bsi_end_p (si))
2404 continue;
2405 t = bsi_stmt (si);
2406 if (TREE_CODE (t) == OMP_RETURN)
2407 OMP_RETURN_NOWAIT (t) = 1;
2411 static void
2412 remove_exit_barriers (struct omp_region *region)
2414 if (region->type == OMP_PARALLEL)
2415 remove_exit_barrier (region);
2417 if (region->inner)
2419 region = region->inner;
2420 remove_exit_barriers (region);
2421 while (region->next)
2423 region = region->next;
2424 remove_exit_barriers (region);
2429 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
2430 calls. These can't be declared as const functions, but
2431 within one parallel body they are constant, so they can be
2432 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
2433 which are declared const. */
2435 static void
2436 optimize_omp_library_calls (void)
2438 basic_block bb;
2439 block_stmt_iterator bsi;
2440 tree thr_num_id
2441 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
2442 tree num_thr_id
2443 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
2445 FOR_EACH_BB (bb)
2446 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2448 tree stmt = bsi_stmt (bsi);
2449 tree call = get_call_expr_in (stmt);
2450 tree decl;
2452 if (call
2453 && (decl = get_callee_fndecl (call))
2454 && DECL_EXTERNAL (decl)
2455 && TREE_PUBLIC (decl)
2456 && DECL_INITIAL (decl) == NULL)
2458 tree built_in;
2460 if (DECL_NAME (decl) == thr_num_id)
2461 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
2462 else if (DECL_NAME (decl) == num_thr_id)
2463 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
2464 else
2465 continue;
2467 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
2468 || call_expr_nargs (call) != 0)
2469 continue;
2471 if (flag_exceptions && !TREE_NOTHROW (decl))
2472 continue;
2474 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
2475 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
2476 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
2477 continue;
2479 CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
2484 /* Expand the OpenMP parallel directive starting at REGION. */
2486 static void
2487 expand_omp_parallel (struct omp_region *region)
2489 basic_block entry_bb, exit_bb, new_bb;
2490 struct function *child_cfun;
2491 tree child_fn, block, t, ws_args;
2492 block_stmt_iterator si;
2493 tree entry_stmt;
2494 edge e;
2496 entry_stmt = last_stmt (region->entry);
2497 child_fn = OMP_PARALLEL_FN (entry_stmt);
2498 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
2500 entry_bb = region->entry;
2501 exit_bb = region->exit;
2503 if (is_combined_parallel (region))
2504 ws_args = region->ws_args;
2505 else
2506 ws_args = NULL_TREE;
2508 if (child_cfun->cfg)
2510 /* Due to inlining, it may happen that we have already outlined
2511 the region, in which case all we need to do is make the
2512 sub-graph unreachable and emit the parallel call. */
2513 edge entry_succ_e, exit_succ_e;
2514 block_stmt_iterator si;
2516 entry_succ_e = single_succ_edge (entry_bb);
2518 si = bsi_last (entry_bb);
2519 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
2520 bsi_remove (&si, true);
2522 new_bb = entry_bb;
2523 if (exit_bb)
2525 exit_succ_e = single_succ_edge (exit_bb);
2526 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
2528 remove_edge_and_dominated_blocks (entry_succ_e);
2530 else
2532 /* If the parallel region needs data sent from the parent
2533 function, then the very first statement (except possible
2534 tree profile counter updates) of the parallel body
2535 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2536 &.OMP_DATA_O is passed as an argument to the child function,
2537 we need to replace it with the argument as seen by the child
2538 function.
2540 In most cases, this will end up being the identity assignment
2541 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2542 a function call that has been inlined, the original PARM_DECL
2543 .OMP_DATA_I may have been converted into a different local
2544 variable. In which case, we need to keep the assignment. */
2545 if (OMP_PARALLEL_DATA_ARG (entry_stmt))
2547 basic_block entry_succ_bb = single_succ (entry_bb);
2548 block_stmt_iterator si;
2549 tree parcopy_stmt = NULL_TREE, arg, narg;
2551 for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
2553 tree stmt, arg;
2555 gcc_assert (!bsi_end_p (si));
2556 stmt = bsi_stmt (si);
2557 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2558 continue;
2560 arg = GIMPLE_STMT_OPERAND (stmt, 1);
2561 STRIP_NOPS (arg);
2562 if (TREE_CODE (arg) == ADDR_EXPR
2563 && TREE_OPERAND (arg, 0)
2564 == OMP_PARALLEL_DATA_ARG (entry_stmt))
2566 parcopy_stmt = stmt;
2567 break;
2571 gcc_assert (parcopy_stmt != NULL_TREE);
2572 arg = DECL_ARGUMENTS (child_fn);
2574 if (!gimple_in_ssa_p (cfun))
2576 if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
2577 bsi_remove (&si, true);
2578 else
2579 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
2581 else
2583 /* If we are in ssa form, we must load the value from the default
2584 definition of the argument. That should not be defined now,
2585 since the argument is not used uninitialized. */
2586 gcc_assert (gimple_default_def (cfun, arg) == NULL);
2587 narg = make_ssa_name (arg, build_empty_stmt ());
2588 set_default_def (arg, narg);
2589 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
2590 update_stmt (parcopy_stmt);
2594 /* Declare local variables needed in CHILD_CFUN. */
2595 block = DECL_INITIAL (child_fn);
2596 BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
2597 DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
2599 /* Reset DECL_CONTEXT on function arguments. */
2600 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
2601 DECL_CONTEXT (t) = child_fn;
2603 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2604 child function. */
2605 si = bsi_last (entry_bb);
2606 t = bsi_stmt (si);
2607 gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
2608 bsi_remove (&si, true);
2609 e = split_block (entry_bb, t);
2610 entry_bb = e->dest;
2611 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
2613 /* Convert OMP_RETURN into a RETURN_EXPR. */
2614 if (exit_bb)
2616 si = bsi_last (exit_bb);
2617 gcc_assert (!bsi_end_p (si)
2618 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2619 t = build1 (RETURN_EXPR, void_type_node, NULL);
2620 bsi_insert_after (&si, t, BSI_SAME_STMT);
2621 bsi_remove (&si, true);
2624 /* Move the parallel region into CHILD_CFUN. */
2626 if (gimple_in_ssa_p (cfun))
2628 push_cfun (child_cfun);
2629 init_tree_ssa ();
2630 init_ssa_operands ();
2631 cfun->gimple_df->in_ssa_p = true;
2632 pop_cfun ();
2634 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
2635 if (exit_bb)
2636 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
2638 /* Inform the callgraph about the new function. */
2639 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
2640 = cfun->curr_properties;
2641 cgraph_add_new_function (child_fn, true);
2643 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2644 fixed in a following pass. */
2645 push_cfun (child_cfun);
2646 if (optimize)
2647 optimize_omp_library_calls ();
2648 rebuild_cgraph_edges ();
2649 pop_cfun ();
2652 /* Emit a library call to launch the children threads. */
2653 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
2654 update_ssa (TODO_update_ssa_only_virtuals);
2658 /* A subroutine of expand_omp_for. Generate code for a parallel
2659 loop with any schedule. Given parameters:
2661 for (V = N1; V cond N2; V += STEP) BODY;
2663 where COND is "<" or ">", we generate pseudocode
2665 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2666 if (more) goto L0; else goto L3;
2668 V = istart0;
2669 iend = iend0;
2671 BODY;
2672 V += STEP;
2673 if (V cond iend) goto L1; else goto L2;
2675 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2678 If this is a combined omp parallel loop, instead of the call to
2679 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2681 static void
2682 expand_omp_for_generic (struct omp_region *region,
2683 struct omp_for_data *fd,
2684 enum built_in_function start_fn,
2685 enum built_in_function next_fn)
2687 tree type, istart0, iend0, iend, phi;
2688 tree t, vmain, vback;
2689 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
2690 basic_block l2_bb = NULL, l3_bb = NULL;
2691 block_stmt_iterator si;
2692 bool in_combined_parallel = is_combined_parallel (region);
2693 bool broken_loop = region->cont == NULL;
2694 edge e, ne;
2696 gcc_assert (!broken_loop || !in_combined_parallel);
2698 type = TREE_TYPE (fd->v);
2700 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2701 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2702 TREE_ADDRESSABLE (istart0) = 1;
2703 TREE_ADDRESSABLE (iend0) = 1;
2704 if (gimple_in_ssa_p (cfun))
2706 add_referenced_var (istart0);
2707 add_referenced_var (iend0);
2710 entry_bb = region->entry;
2711 cont_bb = region->cont;
2712 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2713 gcc_assert (broken_loop
2714 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2715 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2716 l1_bb = single_succ (l0_bb);
2717 if (!broken_loop)
2719 l2_bb = create_empty_bb (cont_bb);
2720 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
2721 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2723 else
2724 l2_bb = NULL;
2725 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2726 exit_bb = region->exit;
2728 si = bsi_last (entry_bb);
2729 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2730 if (in_combined_parallel)
2732 /* In a combined parallel loop, emit a call to
2733 GOMP_loop_foo_next. */
2734 t = build_call_expr (built_in_decls[next_fn], 2,
2735 build_fold_addr_expr (istart0),
2736 build_fold_addr_expr (iend0));
2738 else
2740 tree t0, t1, t2, t3, t4;
2741 /* If this is not a combined parallel loop, emit a call to
2742 GOMP_loop_foo_start in ENTRY_BB. */
2743 t4 = build_fold_addr_expr (iend0);
2744 t3 = build_fold_addr_expr (istart0);
2745 t2 = fold_convert (long_integer_type_node, fd->step);
2746 t1 = fold_convert (long_integer_type_node, fd->n2);
2747 t0 = fold_convert (long_integer_type_node, fd->n1);
2748 if (fd->chunk_size)
2750 t = fold_convert (long_integer_type_node, fd->chunk_size);
2751 t = build_call_expr (built_in_decls[start_fn], 6,
2752 t0, t1, t2, t, t3, t4);
2754 else
2755 t = build_call_expr (built_in_decls[start_fn], 5,
2756 t0, t1, t2, t3, t4);
2758 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2759 true, BSI_SAME_STMT);
2760 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2761 bsi_insert_after (&si, t, BSI_SAME_STMT);
2763 /* V may be used outside of the loop (e.g., to handle lastprivate clause).
2764 If this is the case, its value is undefined if the loop is not entered
2765 at all. To handle this case, set its initial value to N1. */
2766 if (gimple_in_ssa_p (cfun))
2768 e = find_edge (entry_bb, l3_bb);
2769 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2770 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
2771 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
2773 else
2775 t = build_gimple_modify_stmt (fd->v, fd->n1);
2776 bsi_insert_before (&si, t, BSI_SAME_STMT);
2779 /* Remove the OMP_FOR statement. */
2780 bsi_remove (&si, true);
2782 /* Iteration setup for sequential loop goes in L0_BB. */
2783 si = bsi_start (l0_bb);
2784 t = fold_convert (type, istart0);
2785 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2786 false, BSI_CONTINUE_LINKING);
2787 t = build_gimple_modify_stmt (fd->v, t);
2788 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2789 if (gimple_in_ssa_p (cfun))
2790 SSA_NAME_DEF_STMT (fd->v) = t;
2792 t = fold_convert (type, iend0);
2793 iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2794 false, BSI_CONTINUE_LINKING);
2796 if (!broken_loop)
2798 /* Code to control the increment and predicate for the sequential
2799 loop goes in the CONT_BB. */
2800 si = bsi_last (cont_bb);
2801 t = bsi_stmt (si);
2802 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
2803 vmain = TREE_OPERAND (t, 1);
2804 vback = TREE_OPERAND (t, 0);
2806 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
2807 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2808 true, BSI_SAME_STMT);
2809 t = build_gimple_modify_stmt (vback, t);
2810 bsi_insert_before (&si, t, BSI_SAME_STMT);
2811 if (gimple_in_ssa_p (cfun))
2812 SSA_NAME_DEF_STMT (vback) = t;
2814 t = build2 (fd->cond_code, boolean_type_node, vback, iend);
2815 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2816 bsi_insert_before (&si, t, BSI_SAME_STMT);
2818 /* Remove OMP_CONTINUE. */
2819 bsi_remove (&si, true);
2821 /* Emit code to get the next parallel iteration in L2_BB. */
2822 si = bsi_start (l2_bb);
2824 t = build_call_expr (built_in_decls[next_fn], 2,
2825 build_fold_addr_expr (istart0),
2826 build_fold_addr_expr (iend0));
2827 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2828 false, BSI_CONTINUE_LINKING);
2829 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2830 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2833 /* Add the loop cleanup function. */
2834 si = bsi_last (exit_bb);
2835 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
2836 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
2837 else
2838 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
2839 t = build_call_expr (t, 0);
2840 bsi_insert_after (&si, t, BSI_SAME_STMT);
2841 bsi_remove (&si, true);
2843 /* Connect the new blocks. */
2844 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
2845 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
2847 if (!broken_loop)
2849 e = find_edge (cont_bb, l3_bb);
2850 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
2852 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2853 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
2854 PHI_ARG_DEF_FROM_EDGE (phi, e));
2855 remove_edge (e);
2857 find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
2858 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
2859 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
2861 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
2862 recompute_dominator (CDI_DOMINATORS, l2_bb));
2863 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
2864 recompute_dominator (CDI_DOMINATORS, l3_bb));
2865 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
2866 recompute_dominator (CDI_DOMINATORS, l0_bb));
2867 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
2868 recompute_dominator (CDI_DOMINATORS, l1_bb));
2873 /* A subroutine of expand_omp_for. Generate code for a parallel
2874 loop with static schedule and no specified chunk size. Given
2875 parameters:
2877 for (V = N1; V cond N2; V += STEP) BODY;
2879 where COND is "<" or ">", we generate pseudocode
2881 if (cond is <)
2882 adj = STEP - 1;
2883 else
2884 adj = STEP + 1;
2885 n = (adj + N2 - N1) / STEP;
2886 q = n / nthreads;
2887 q += (q * nthreads != n);
2888 s0 = q * threadid;
2889 e0 = min(s0 + q, n);
2890 V = s0 * STEP + N1;
2891 if (s0 >= e0) goto L2; else goto L0;
2893 e = e0 * STEP + N1;
2895 BODY;
2896 V += STEP;
2897 if (V cond e) goto L1;
2901 static void
2902 expand_omp_for_static_nochunk (struct omp_region *region,
2903 struct omp_for_data *fd)
2905 tree n, q, s0, e0, e, t, nthreads, threadid;
2906 tree type, vmain, vback;
2907 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
2908 basic_block fin_bb;
2909 block_stmt_iterator si;
2911 type = TREE_TYPE (fd->v);
2913 entry_bb = region->entry;
2914 cont_bb = region->cont;
2915 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2916 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2917 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2918 body_bb = single_succ (seq_start_bb);
2919 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
2920 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2921 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
2922 exit_bb = region->exit;
2924 /* Iteration space partitioning goes in ENTRY_BB. */
2925 si = bsi_last (entry_bb);
2926 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2928 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
2929 t = fold_convert (type, t);
2930 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2931 true, BSI_SAME_STMT);
2933 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2934 t = fold_convert (type, t);
2935 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2936 true, BSI_SAME_STMT);
2938 fd->n1 = force_gimple_operand_bsi (&si,
2939 fold_convert (type, fd->n1),
2940 true, NULL_TREE,
2941 true, BSI_SAME_STMT);
2943 fd->n2 = force_gimple_operand_bsi (&si,
2944 fold_convert (type, fd->n2),
2945 true, NULL_TREE,
2946 true, BSI_SAME_STMT);
2948 fd->step = force_gimple_operand_bsi (&si,
2949 fold_convert (type, fd->step),
2950 true, NULL_TREE,
2951 true, BSI_SAME_STMT);
2953 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2954 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2955 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2956 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2957 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2958 t = fold_convert (type, t);
2959 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2961 t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
2962 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2964 t = fold_build2 (MULT_EXPR, type, q, nthreads);
2965 t = fold_build2 (NE_EXPR, type, t, n);
2966 t = fold_build2 (PLUS_EXPR, type, q, t);
2967 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2969 t = build2 (MULT_EXPR, type, q, threadid);
2970 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2972 t = fold_build2 (PLUS_EXPR, type, s0, q);
2973 t = fold_build2 (MIN_EXPR, type, t, n);
2974 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2976 t = fold_convert (type, s0);
2977 t = fold_build2 (MULT_EXPR, type, t, fd->step);
2978 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
2979 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2980 true, BSI_SAME_STMT);
2981 t = build_gimple_modify_stmt (fd->v, t);
2982 bsi_insert_before (&si, t, BSI_SAME_STMT);
2983 if (gimple_in_ssa_p (cfun))
2984 SSA_NAME_DEF_STMT (fd->v) = t;
2986 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
2987 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2988 bsi_insert_before (&si, t, BSI_SAME_STMT);
2990 /* Remove the OMP_FOR statement. */
2991 bsi_remove (&si, true);
2993 /* Setup code for sequential iteration goes in SEQ_START_BB. */
2994 si = bsi_start (seq_start_bb);
2996 t = fold_convert (type, e0);
2997 t = fold_build2 (MULT_EXPR, type, t, fd->step);
2998 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
2999 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3000 false, BSI_CONTINUE_LINKING);
3002 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3003 si = bsi_last (cont_bb);
3004 t = bsi_stmt (si);
3005 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3006 vmain = TREE_OPERAND (t, 1);
3007 vback = TREE_OPERAND (t, 0);
3009 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
3010 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3011 true, BSI_SAME_STMT);
3012 t = build_gimple_modify_stmt (vback, t);
3013 bsi_insert_before (&si, t, BSI_SAME_STMT);
3014 if (gimple_in_ssa_p (cfun))
3015 SSA_NAME_DEF_STMT (vback) = t;
3017 t = build2 (fd->cond_code, boolean_type_node, vback, e);
3018 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3019 bsi_insert_before (&si, t, BSI_SAME_STMT);
3021 /* Remove the OMP_CONTINUE statement. */
3022 bsi_remove (&si, true);
3024 /* Replace the OMP_RETURN with a barrier, or nothing. */
3025 si = bsi_last (exit_bb);
3026 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3027 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3028 false, BSI_SAME_STMT);
3029 bsi_remove (&si, true);
3031 /* Connect all the blocks. */
3032 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3033 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3035 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3036 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3038 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
3039 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3040 recompute_dominator (CDI_DOMINATORS, body_bb));
3041 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3042 recompute_dominator (CDI_DOMINATORS, fin_bb));
3046 /* A subroutine of expand_omp_for. Generate code for a parallel
3047 loop with static schedule and a specified chunk size. Given
3048 parameters:
3050 for (V = N1; V cond N2; V += STEP) BODY;
3052 where COND is "<" or ">", we generate pseudocode
3054 if (cond is <)
3055 adj = STEP - 1;
3056 else
3057 adj = STEP + 1;
3058 n = (adj + N2 - N1) / STEP;
3059 trip = 0;
3060 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3061 here so that V is defined
3062 if the loop is not entered
3064 s0 = (trip * nthreads + threadid) * CHUNK;
3065 e0 = min(s0 + CHUNK, n);
3066 if (s0 < n) goto L1; else goto L4;
3068 V = s0 * STEP + N1;
3069 e = e0 * STEP + N1;
3071 BODY;
3072 V += STEP;
3073 if (V cond e) goto L2; else goto L3;
3075 trip += 1;
3076 goto L0;
3080 static void
3081 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
3083 tree n, s0, e0, e, t, phi, nphi, args;
3084 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3085 tree type, cont, v_main, v_back, v_extra;
3086 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3087 basic_block trip_update_bb, cont_bb, fin_bb;
3088 block_stmt_iterator si;
3089 edge se, re, ene;
3091 type = TREE_TYPE (fd->v);
3093 entry_bb = region->entry;
3094 se = split_block (entry_bb, last_stmt (entry_bb));
3095 entry_bb = se->src;
3096 iter_part_bb = se->dest;
3097 cont_bb = region->cont;
3098 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3099 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
3100 == FALLTHRU_EDGE (cont_bb)->dest);
3101 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3102 body_bb = single_succ (seq_start_bb);
3103 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
3104 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3105 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
3106 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3107 exit_bb = region->exit;
3109 /* Trip and adjustment setup goes in ENTRY_BB. */
3110 si = bsi_last (entry_bb);
3111 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3113 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
3114 t = fold_convert (type, t);
3115 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3116 true, BSI_SAME_STMT);
3118 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
3119 t = fold_convert (type, t);
3120 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3121 true, BSI_SAME_STMT);
3123 fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
3124 true, NULL_TREE,
3125 true, BSI_SAME_STMT);
3126 fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
3127 true, NULL_TREE,
3128 true, BSI_SAME_STMT);
3129 fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
3130 true, NULL_TREE,
3131 true, BSI_SAME_STMT);
3132 fd->chunk_size
3133 = force_gimple_operand_bsi (&si, fold_convert (type,
3134 fd->chunk_size),
3135 true, NULL_TREE,
3136 true, BSI_SAME_STMT);
3138 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
3139 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
3140 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
3141 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
3142 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
3143 t = fold_convert (type, t);
3144 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3145 true, BSI_SAME_STMT);
3147 trip_var = create_tmp_var (type, ".trip");
3148 if (gimple_in_ssa_p (cfun))
3150 add_referenced_var (trip_var);
3151 trip_init = make_ssa_name (trip_var, NULL_TREE);
3152 trip_main = make_ssa_name (trip_var, NULL_TREE);
3153 trip_back = make_ssa_name (trip_var, NULL_TREE);
3155 else
3157 trip_init = trip_var;
3158 trip_main = trip_var;
3159 trip_back = trip_var;
3162 t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
3163 bsi_insert_before (&si, t, BSI_SAME_STMT);
3164 if (gimple_in_ssa_p (cfun))
3165 SSA_NAME_DEF_STMT (trip_init) = t;
3167 t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
3168 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3169 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3170 v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3171 true, BSI_SAME_STMT);
3173 /* Remove the OMP_FOR. */
3174 bsi_remove (&si, true);
3176 /* Iteration space partitioning goes in ITER_PART_BB. */
3177 si = bsi_last (iter_part_bb);
3179 t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
3180 t = fold_build2 (PLUS_EXPR, type, t, threadid);
3181 t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
3182 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3183 false, BSI_CONTINUE_LINKING);
3185 t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
3186 t = fold_build2 (MIN_EXPR, type, t, n);
3187 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3188 false, BSI_CONTINUE_LINKING);
3190 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3191 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3192 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3194 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3195 si = bsi_start (seq_start_bb);
3197 t = fold_convert (type, s0);
3198 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3199 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3200 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3201 false, BSI_CONTINUE_LINKING);
3202 t = build_gimple_modify_stmt (fd->v, t);
3203 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3204 if (gimple_in_ssa_p (cfun))
3205 SSA_NAME_DEF_STMT (fd->v) = t;
3207 t = fold_convert (type, e0);
3208 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3209 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3210 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3211 false, BSI_CONTINUE_LINKING);
3213 /* The code controlling the sequential loop goes in CONT_BB,
3214 replacing the OMP_CONTINUE. */
3215 si = bsi_last (cont_bb);
3216 cont = bsi_stmt (si);
3217 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3218 v_main = TREE_OPERAND (cont, 1);
3219 v_back = TREE_OPERAND (cont, 0);
3221 t = build2 (PLUS_EXPR, type, v_main, fd->step);
3222 t = build_gimple_modify_stmt (v_back, t);
3223 bsi_insert_before (&si, t, BSI_SAME_STMT);
3224 if (gimple_in_ssa_p (cfun))
3225 SSA_NAME_DEF_STMT (v_back) = t;
3227 t = build2 (fd->cond_code, boolean_type_node, v_back, e);
3228 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3229 bsi_insert_before (&si, t, BSI_SAME_STMT);
3231 /* Remove OMP_CONTINUE. */
3232 bsi_remove (&si, true);
3234 /* Trip update code goes into TRIP_UPDATE_BB. */
3235 si = bsi_start (trip_update_bb);
3237 t = build_int_cst (type, 1);
3238 t = build2 (PLUS_EXPR, type, trip_main, t);
3239 t = build_gimple_modify_stmt (trip_back, t);
3240 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3241 if (gimple_in_ssa_p (cfun))
3242 SSA_NAME_DEF_STMT (trip_back) = t;
3244 /* Replace the OMP_RETURN with a barrier, or nothing. */
3245 si = bsi_last (exit_bb);
3246 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3247 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3248 false, BSI_SAME_STMT);
3249 bsi_remove (&si, true);
3251 /* Connect the new blocks. */
3252 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
3253 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3255 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3256 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
3258 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
3260 if (gimple_in_ssa_p (cfun))
3262 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3263 remove arguments of the phi nodes in fin_bb. We need to create
3264 appropriate phi nodes in iter_part_bb instead. */
3265 se = single_pred_edge (fin_bb);
3266 re = single_succ_edge (trip_update_bb);
3267 ene = single_succ_edge (entry_bb);
3269 args = PENDING_STMT (re);
3270 PENDING_STMT (re) = NULL_TREE;
3271 for (phi = phi_nodes (fin_bb);
3272 phi && args;
3273 phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
3275 t = PHI_RESULT (phi);
3276 gcc_assert (t == TREE_PURPOSE (args));
3277 nphi = create_phi_node (t, iter_part_bb);
3278 SSA_NAME_DEF_STMT (t) = nphi;
3280 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
3281 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3282 need to use v_extra instead. */
3283 if (t == fd->v)
3284 t = v_extra;
3285 add_phi_arg (nphi, t, ene);
3286 add_phi_arg (nphi, TREE_VALUE (args), re);
3288 gcc_assert (!phi && !args);
3289 while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
3290 remove_phi_node (phi, NULL_TREE, false);
3292 /* Make phi node for trip. */
3293 phi = create_phi_node (trip_main, iter_part_bb);
3294 SSA_NAME_DEF_STMT (trip_main) = phi;
3295 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
3296 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
3299 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
3300 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
3301 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
3302 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3303 recompute_dominator (CDI_DOMINATORS, fin_bb));
3304 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
3305 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
3306 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3307 recompute_dominator (CDI_DOMINATORS, body_bb));
3311 /* Expand the OpenMP loop defined by REGION. */
3313 static void
3314 expand_omp_for (struct omp_region *region)
3316 struct omp_for_data fd;
3318 extract_omp_for_data (last_stmt (region->entry), &fd);
3319 region->sched_kind = fd.sched_kind;
3321 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
3322 && !fd.have_ordered
3323 && region->cont != NULL)
3325 if (fd.chunk_size == NULL)
3326 expand_omp_for_static_nochunk (region, &fd);
3327 else
3328 expand_omp_for_static_chunk (region, &fd);
3330 else
3332 int fn_index = fd.sched_kind + fd.have_ordered * 4;
3333 int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
3334 int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
3335 expand_omp_for_generic (region, &fd, start_ix, next_ix);
3338 update_ssa (TODO_update_ssa_only_virtuals);
3342 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3344 v = GOMP_sections_start (n);
3346 switch (v)
3348 case 0:
3349 goto L2;
3350 case 1:
3351 section 1;
3352 goto L1;
3353 case 2:
3355 case n:
3357 default:
3358 abort ();
3361 v = GOMP_sections_next ();
3362 goto L0;
3364 reduction;
3366 If this is a combined parallel sections, replace the call to
3367 GOMP_sections_start with call to GOMP_sections_next. */
3369 static void
3370 expand_omp_sections (struct omp_region *region)
3372 tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
3373 unsigned i, casei, len;
3374 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
3375 block_stmt_iterator si;
3376 struct omp_region *inner;
3377 bool exit_reachable = region->cont != NULL;
3379 gcc_assert (exit_reachable == (region->exit != NULL));
3380 entry_bb = region->entry;
3381 l0_bb = single_succ (entry_bb);
3382 l1_bb = region->cont;
3383 l2_bb = region->exit;
3384 if (exit_reachable)
3386 gcc_assert (single_pred (l2_bb) == l0_bb);
3387 default_bb = create_empty_bb (l1_bb->prev_bb);
3388 l1 = tree_block_label (l1_bb);
3389 l2 = tree_block_label (l2_bb);
3391 else
3393 default_bb = create_empty_bb (l0_bb);
3394 l1 = NULL_TREE;
3395 l2 = tree_block_label (default_bb);
3398 /* We will build a switch() with enough cases for all the
3399 OMP_SECTION regions, a '0' case to handle the end of more work
3400 and a default case to abort if something goes wrong. */
3401 len = EDGE_COUNT (l0_bb->succs);
3402 label_vec = make_tree_vec (len + 1);
3404 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3405 OMP_SECTIONS statement. */
3406 si = bsi_last (entry_bb);
3407 sections_stmt = bsi_stmt (si);
3408 gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
3409 vin = OMP_SECTIONS_CONTROL (sections_stmt);
3410 if (!is_combined_parallel (region))
3412 /* If we are not inside a combined parallel+sections region,
3413 call GOMP_sections_start. */
3414 t = build_int_cst (unsigned_type_node,
3415 exit_reachable ? len - 1 : len);
3416 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
3417 t = build_call_expr (u, 1, t);
3419 else
3421 /* Otherwise, call GOMP_sections_next. */
3422 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
3423 t = build_call_expr (u, 0);
3425 t = build_gimple_modify_stmt (vin, t);
3426 bsi_insert_after (&si, t, BSI_SAME_STMT);
3427 if (gimple_in_ssa_p (cfun))
3428 SSA_NAME_DEF_STMT (vin) = t;
3429 bsi_remove (&si, true);
3431 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3432 si = bsi_last (l0_bb);
3433 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
3434 if (exit_reachable)
3436 cont = last_stmt (l1_bb);
3437 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3438 vmain = TREE_OPERAND (cont, 1);
3439 vnext = TREE_OPERAND (cont, 0);
3441 else
3443 vmain = vin;
3444 vnext = NULL_TREE;
3447 t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
3448 bsi_insert_after (&si, t, BSI_SAME_STMT);
3449 bsi_remove (&si, true);
3451 i = 0;
3452 if (exit_reachable)
3454 t = build3 (CASE_LABEL_EXPR, void_type_node,
3455 build_int_cst (unsigned_type_node, 0), NULL, l2);
3456 TREE_VEC_ELT (label_vec, 0) = t;
3457 i++;
3460 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3461 for (inner = region->inner, casei = 1;
3462 inner;
3463 inner = inner->next, i++, casei++)
3465 basic_block s_entry_bb, s_exit_bb;
3467 s_entry_bb = inner->entry;
3468 s_exit_bb = inner->exit;
3470 t = tree_block_label (s_entry_bb);
3471 u = build_int_cst (unsigned_type_node, casei);
3472 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
3473 TREE_VEC_ELT (label_vec, i) = u;
3475 si = bsi_last (s_entry_bb);
3476 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
3477 gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
3478 bsi_remove (&si, true);
3479 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
3481 if (s_exit_bb == NULL)
3482 continue;
3484 si = bsi_last (s_exit_bb);
3485 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3486 bsi_remove (&si, true);
3488 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
3491 /* Error handling code goes in DEFAULT_BB. */
3492 t = tree_block_label (default_bb);
3493 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
3494 TREE_VEC_ELT (label_vec, len) = u;
3495 make_edge (l0_bb, default_bb, 0);
3497 si = bsi_start (default_bb);
3498 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
3499 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3501 if (exit_reachable)
3503 /* Code to get the next section goes in L1_BB. */
3504 si = bsi_last (l1_bb);
3505 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
3507 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
3508 t = build_gimple_modify_stmt (vnext, t);
3509 bsi_insert_after (&si, t, BSI_SAME_STMT);
3510 if (gimple_in_ssa_p (cfun))
3511 SSA_NAME_DEF_STMT (vnext) = t;
3512 bsi_remove (&si, true);
3514 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
3516 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3517 si = bsi_last (l2_bb);
3518 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
3519 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
3520 else
3521 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
3522 t = build_call_expr (t, 0);
3523 bsi_insert_after (&si, t, BSI_SAME_STMT);
3524 bsi_remove (&si, true);
3527 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
3531 /* Expand code for an OpenMP single directive. We've already expanded
3532 much of the code, here we simply place the GOMP_barrier call. */
3534 static void
3535 expand_omp_single (struct omp_region *region)
3537 basic_block entry_bb, exit_bb;
3538 block_stmt_iterator si;
3539 bool need_barrier = false;
3541 entry_bb = region->entry;
3542 exit_bb = region->exit;
3544 si = bsi_last (entry_bb);
3545 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3546 be removed. We need to ensure that the thread that entered the single
3547 does not exit before the data is copied out by the other threads. */
3548 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
3549 OMP_CLAUSE_COPYPRIVATE))
3550 need_barrier = true;
3551 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
3552 bsi_remove (&si, true);
3553 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3555 si = bsi_last (exit_bb);
3556 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
3557 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3558 false, BSI_SAME_STMT);
3559 bsi_remove (&si, true);
3560 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3564 /* Generic expansion for OpenMP synchronization directives: master,
3565 ordered and critical. All we need to do here is remove the entry
3566 and exit markers for REGION. */
3568 static void
3569 expand_omp_synch (struct omp_region *region)
3571 basic_block entry_bb, exit_bb;
3572 block_stmt_iterator si;
3574 entry_bb = region->entry;
3575 exit_bb = region->exit;
3577 si = bsi_last (entry_bb);
3578 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
3579 || TREE_CODE (bsi_stmt (si)) == OMP_MASTER
3580 || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
3581 || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
3582 bsi_remove (&si, true);
3583 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3585 if (exit_bb)
3587 si = bsi_last (exit_bb);
3588 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3589 bsi_remove (&si, true);
3590 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3594 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3595 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3596 size of the data type, and thus usable to find the index of the builtin
3597 decl. Returns false if the expression is not of the proper form. */
3599 static bool
3600 expand_omp_atomic_fetch_op (basic_block load_bb,
3601 tree addr, tree loaded_val,
3602 tree stored_val, int index)
3604 enum built_in_function base;
3605 tree decl, itype, call;
3606 enum insn_code *optab;
3607 tree rhs;
3608 basic_block store_bb = single_succ (load_bb);
3609 block_stmt_iterator bsi;
3610 tree stmt;
3612 /* We expect to find the following sequences:
3614 load_bb:
3615 OMP_ATOMIC_LOAD (tmp, mem)
3617 store_bb:
3618 val = tmp OP something; (or: something OP tmp)
3619 OMP_STORE (val)
3621 ???FIXME: Allow a more flexible sequence.
3622 Perhaps use data flow to pick the statements.
3626 bsi = bsi_after_labels (store_bb);
3627 stmt = bsi_stmt (bsi);
3628 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3629 return false;
3630 bsi_next (&bsi);
3631 if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
3632 return false;
3634 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
3635 return false;
3637 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
3639 /* Check for one of the supported fetch-op operations. */
3640 switch (TREE_CODE (rhs))
3642 case PLUS_EXPR:
3643 case POINTER_PLUS_EXPR:
3644 base = BUILT_IN_FETCH_AND_ADD_N;
3645 optab = sync_add_optab;
3646 break;
3647 case MINUS_EXPR:
3648 base = BUILT_IN_FETCH_AND_SUB_N;
3649 optab = sync_add_optab;
3650 break;
3651 case BIT_AND_EXPR:
3652 base = BUILT_IN_FETCH_AND_AND_N;
3653 optab = sync_and_optab;
3654 break;
3655 case BIT_IOR_EXPR:
3656 base = BUILT_IN_FETCH_AND_OR_N;
3657 optab = sync_ior_optab;
3658 break;
3659 case BIT_XOR_EXPR:
3660 base = BUILT_IN_FETCH_AND_XOR_N;
3661 optab = sync_xor_optab;
3662 break;
3663 default:
3664 return false;
3666 /* Make sure the expression is of the proper form. */
3667 if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
3668 rhs = TREE_OPERAND (rhs, 1);
3669 else if (commutative_tree_code (TREE_CODE (rhs))
3670 && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
3671 rhs = TREE_OPERAND (rhs, 0);
3672 else
3673 return false;
3675 decl = built_in_decls[base + index + 1];
3676 itype = TREE_TYPE (TREE_TYPE (decl));
3678 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
3679 return false;
3681 bsi = bsi_last (load_bb);
3682 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3683 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
3684 force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
3685 bsi_remove (&bsi, true);
3687 bsi = bsi_last (store_bb);
3688 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3689 bsi_remove (&bsi, true);
3690 bsi = bsi_last (store_bb);
3691 bsi_remove (&bsi, true);
3693 if (gimple_in_ssa_p (cfun))
3694 update_ssa (TODO_update_ssa_no_phi);
3696 return true;
3699 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3701 oldval = *addr;
3702 repeat:
3703 newval = rhs; // with oldval replacing *addr in rhs
3704 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3705 if (oldval != newval)
3706 goto repeat;
3708 INDEX is log2 of the size of the data type, and thus usable to find the
3709 index of the builtin decl. */
3711 static bool
3712 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
3713 tree addr, tree loaded_val, tree stored_val,
3714 int index)
3716 tree loadedi, storedi, initial, new_stored, new_storedi, old_vali;
3717 tree type, itype, cmpxchg, iaddr;
3718 block_stmt_iterator bsi;
3719 basic_block loop_header = single_succ (load_bb);
3720 tree phi, x;
3721 edge e;
3723 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
3724 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3725 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
3727 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
3728 return false;
3730 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3731 bsi = bsi_last (load_bb);
3732 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3733 initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (addr),
3734 true, NULL_TREE, true, BSI_SAME_STMT);
3735 /* Move the value to the LOADED_VAL temporary. */
3736 if (gimple_in_ssa_p (cfun))
3738 gcc_assert (phi_nodes (loop_header) == NULL_TREE);
3739 phi = create_phi_node (loaded_val, loop_header);
3740 SSA_NAME_DEF_STMT (loaded_val) = phi;
3741 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
3742 initial);
3744 else
3745 bsi_insert_before (&bsi,
3746 build_gimple_modify_stmt (loaded_val, initial),
3747 BSI_SAME_STMT);
3748 bsi_remove (&bsi, true);
3750 bsi = bsi_last (store_bb);
3751 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3753 /* For floating-point values, we'll need to view-convert them to integers
3754 so that we can perform the atomic compare and swap. Simplify the
3755 following code by always setting up the "i"ntegral variables. */
3756 if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3758 loadedi = loaded_val;
3759 storedi = stored_val;
3760 iaddr = addr;
3762 else
3764 loadedi = force_gimple_operand_bsi (&bsi,
3765 build1 (VIEW_CONVERT_EXPR, itype,
3766 loaded_val), true,
3767 NULL_TREE, true, BSI_SAME_STMT);
3768 storedi =
3769 force_gimple_operand_bsi (&bsi,
3770 build1 (VIEW_CONVERT_EXPR, itype,
3771 stored_val), true, NULL_TREE, true,
3772 BSI_SAME_STMT);
3773 iaddr = fold_convert (build_pointer_type (itype), addr);
3776 /* Build the compare&swap statement. */
3777 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
3778 new_storedi = force_gimple_operand_bsi (&bsi,
3779 fold_convert (itype, new_storedi),
3780 true, NULL_TREE,
3781 true, BSI_SAME_STMT);
3782 if (storedi == stored_val)
3783 new_stored = new_storedi;
3784 else
3785 new_stored = force_gimple_operand_bsi (&bsi,
3786 build1 (VIEW_CONVERT_EXPR, type,
3787 new_storedi), true,
3788 NULL_TREE, true, BSI_SAME_STMT);
3790 if (gimple_in_ssa_p (cfun))
3791 old_vali = loadedi;
3792 else
3794 old_vali = create_tmp_var (itype, NULL);
3795 x = build_gimple_modify_stmt (old_vali, loadedi);
3796 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3798 x = build_gimple_modify_stmt (loaded_val, new_stored);
3799 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3802 /* Note that we always perform the comparison as an integer, even for
3803 floating point. This allows the atomic operation to properly
3804 succeed even with NaNs and -0.0. */
3805 x = build3 (COND_EXPR, void_type_node,
3806 build2 (NE_EXPR, boolean_type_node,
3807 new_storedi, old_vali), NULL_TREE, NULL_TREE);
3808 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3810 /* Update cfg. */
3811 e = single_succ_edge (store_bb);
3812 e->flags &= ~EDGE_FALLTHRU;
3813 e->flags |= EDGE_FALSE_VALUE;
3815 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
3817 /* Copy the new value to loaded_val (we already did that before the condition
3818 if we are not in SSA). */
3819 if (gimple_in_ssa_p (cfun))
3821 phi = phi_nodes (loop_header);
3822 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_stored);
3825 /* Remove OMP_ATOMIC_STORE. */
3826 bsi_remove (&bsi, true);
3828 if (gimple_in_ssa_p (cfun))
3829 update_ssa (TODO_update_ssa_no_phi);
3831 return true;
3834 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3836 GOMP_atomic_start ();
3837 *addr = rhs;
3838 GOMP_atomic_end ();
3840 The result is not globally atomic, but works so long as all parallel
3841 references are within #pragma omp atomic directives. According to
3842 responses received from omp@openmp.org, appears to be within spec.
3843 Which makes sense, since that's how several other compilers handle
3844 this situation as well.
3845 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3846 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3848 We replace
3849 OMP_ATOMIC_LOAD (loaded_val, addr) with
3850 loaded_val = *addr;
3852 and replace
3853 OMP_ATOMIC_ATORE (stored_val) with
3854 *addr = stored_val;
3857 static bool
3858 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
3859 tree addr, tree loaded_val, tree stored_val)
3861 block_stmt_iterator bsi;
3862 tree t;
3864 bsi = bsi_last (load_bb);
3865 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3867 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
3868 t = build_function_call_expr (t, 0);
3869 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3871 t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
3872 if (gimple_in_ssa_p (cfun))
3873 SSA_NAME_DEF_STMT (loaded_val) = t;
3874 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3875 bsi_remove (&bsi, true);
3877 bsi = bsi_last (store_bb);
3878 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3880 t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
3881 stored_val);
3882 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3884 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
3885 t = build_function_call_expr (t, 0);
3886 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3887 bsi_remove (&bsi, true);
3889 if (gimple_in_ssa_p (cfun))
3890 update_ssa (TODO_update_ssa_no_phi);
3891 return true;
3894 /* Expand an OMP_ATOMIC statement. We try to expand
3895 using expand_omp_atomic_fetch_op. If it failed, we try to
3896 call expand_omp_atomic_pipeline, and if it fails too, the
3897 ultimate fallback is wrapping the operation in a mutex
3898 (expand_omp_atomic_mutex). REGION is the atomic region built
3899 by build_omp_regions_1(). */
3901 static void
3902 expand_omp_atomic (struct omp_region *region)
3904 basic_block load_bb = region->entry, store_bb = region->exit;
3905 tree load = last_stmt (load_bb), store = last_stmt (store_bb);
3906 tree loaded_val = TREE_OPERAND (load, 0);
3907 tree addr = TREE_OPERAND (load, 1);
3908 tree stored_val = TREE_OPERAND (store, 0);
3909 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3910 HOST_WIDE_INT index;
3912 /* Make sure the type is one of the supported sizes. */
3913 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
3914 index = exact_log2 (index);
3915 if (index >= 0 && index <= 4)
3917 unsigned int align = TYPE_ALIGN_UNIT (type);
3919 /* __sync builtins require strict data alignment. */
3920 if (exact_log2 (align) >= index)
3922 /* When possible, use specialized atomic update functions. */
3923 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3924 && store_bb == single_succ (load_bb))
3926 if (expand_omp_atomic_fetch_op (load_bb, addr,
3927 loaded_val, stored_val, index))
3928 return;
3931 /* If we don't have specialized __sync builtins, try and implement
3932 as a compare and swap loop. */
3933 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
3934 loaded_val, stored_val, index))
3935 return;
3939 /* The ultimate fallback is wrapping the operation in a mutex. */
3940 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
3944 /* Expand the parallel region tree rooted at REGION. Expansion
3945 proceeds in depth-first order. Innermost regions are expanded
3946 first. This way, parallel regions that require a new function to
3947 be created (e.g., OMP_PARALLEL) can be expanded without having any
3948 internal dependencies in their body. */
3950 static void
3951 expand_omp (struct omp_region *region)
3953 while (region)
3955 if (region->inner)
3956 expand_omp (region->inner);
3958 switch (region->type)
3960 case OMP_PARALLEL:
3961 expand_omp_parallel (region);
3962 break;
3964 case OMP_FOR:
3965 expand_omp_for (region);
3966 break;
3968 case OMP_SECTIONS:
3969 expand_omp_sections (region);
3970 break;
3972 case OMP_SECTION:
3973 /* Individual omp sections are handled together with their
3974 parent OMP_SECTIONS region. */
3975 break;
3977 case OMP_SINGLE:
3978 expand_omp_single (region);
3979 break;
3981 case OMP_MASTER:
3982 case OMP_ORDERED:
3983 case OMP_CRITICAL:
3984 expand_omp_synch (region);
3985 break;
3987 case OMP_ATOMIC_LOAD:
3988 expand_omp_atomic (region);
3989 break;
3992 default:
3993 gcc_unreachable ();
3996 region = region->next;
4001 /* Helper for build_omp_regions. Scan the dominator tree starting at
4002 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
4003 true, the function ends once a single tree is built (otherwise, whole
4004 forest of OMP constructs may be built). */
4006 static void
4007 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
4008 bool single_tree)
4010 block_stmt_iterator si;
4011 tree stmt;
4012 basic_block son;
4014 si = bsi_last (bb);
4015 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
4017 struct omp_region *region;
4018 enum tree_code code;
4020 stmt = bsi_stmt (si);
4021 code = TREE_CODE (stmt);
4022 if (code == OMP_RETURN)
4024 /* STMT is the return point out of region PARENT. Mark it
4025 as the exit point and make PARENT the immediately
4026 enclosing region. */
4027 gcc_assert (parent);
4028 region = parent;
4029 region->exit = bb;
4030 parent = parent->outer;
4032 /* If REGION is a parallel region, determine whether it is
4033 a combined parallel+workshare region. */
4034 if (region->type == OMP_PARALLEL)
4035 determine_parallel_type (region);
4037 else if (code == OMP_ATOMIC_STORE)
4039 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
4040 OMP_ATOMIC_LOAD. */
4041 gcc_assert (parent);
4042 gcc_assert (parent->type == OMP_ATOMIC_LOAD);
4043 region = parent;
4044 region->exit = bb;
4045 parent = parent->outer;
4048 else if (code == OMP_CONTINUE)
4050 gcc_assert (parent);
4051 parent->cont = bb;
4053 else if (code == OMP_SECTIONS_SWITCH)
4055 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4056 it. */ ;
4058 else
4060 /* Otherwise, this directive becomes the parent for a new
4061 region. */
4062 region = new_omp_region (bb, code, parent);
4063 parent = region;
4067 if (single_tree && !parent)
4068 return;
4070 for (son = first_dom_son (CDI_DOMINATORS, bb);
4071 son;
4072 son = next_dom_son (CDI_DOMINATORS, son))
4073 build_omp_regions_1 (son, parent, single_tree);
4076 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4077 root_omp_region. */
4079 static void
4080 build_omp_regions_root (basic_block root)
4082 gcc_assert (root_omp_region == NULL);
4083 build_omp_regions_1 (root, NULL, true);
4084 gcc_assert (root_omp_region != NULL);
4087 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4089 void
4090 omp_expand_local (basic_block head)
4092 build_omp_regions_root (head);
4093 if (dump_file && (dump_flags & TDF_DETAILS))
4095 fprintf (dump_file, "\nOMP region tree\n\n");
4096 dump_omp_region (dump_file, root_omp_region, 0);
4097 fprintf (dump_file, "\n");
4100 remove_exit_barriers (root_omp_region);
4101 expand_omp (root_omp_region);
4103 free_omp_regions ();
4106 /* Scan the CFG and build a tree of OMP regions. Return the root of
4107 the OMP region tree. */
4109 static void
4110 build_omp_regions (void)
4112 gcc_assert (root_omp_region == NULL);
4113 calculate_dominance_info (CDI_DOMINATORS);
4114 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
4118 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4120 static unsigned int
4121 execute_expand_omp (void)
4123 build_omp_regions ();
4125 if (!root_omp_region)
4126 return 0;
4128 if (dump_file)
4130 fprintf (dump_file, "\nOMP region tree\n\n");
4131 dump_omp_region (dump_file, root_omp_region, 0);
4132 fprintf (dump_file, "\n");
4135 remove_exit_barriers (root_omp_region);
4137 expand_omp (root_omp_region);
4139 cleanup_tree_cfg ();
4141 free_omp_regions ();
4143 return 0;
4146 /* OMP expansion in SSA form. For testing purposes only. */
4148 static bool
4149 gate_expand_omp_ssa (void)
4151 return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
4154 struct tree_opt_pass pass_expand_omp_ssa =
4156 "ompexpssa", /* name */
4157 gate_expand_omp_ssa, /* gate */
4158 execute_expand_omp, /* execute */
4159 NULL, /* sub */
4160 NULL, /* next */
4161 0, /* static_pass_number */
4162 0, /* tv_id */
4163 PROP_gimple_any, /* properties_required */
4164 PROP_gimple_lomp, /* properties_provided */
4165 0, /* properties_destroyed */
4166 0, /* todo_flags_start */
4167 TODO_dump_func, /* todo_flags_finish */
4168 0 /* letter */
4171 /* OMP expansion -- the default pass, run before creation of SSA form. */
4173 static bool
4174 gate_expand_omp (void)
4176 return ((!flag_openmp_ssa || !optimize)
4177 && flag_openmp != 0 && errorcount == 0);
4180 struct tree_opt_pass pass_expand_omp =
4182 "ompexp", /* name */
4183 gate_expand_omp, /* gate */
4184 execute_expand_omp, /* execute */
4185 NULL, /* sub */
4186 NULL, /* next */
4187 0, /* static_pass_number */
4188 0, /* tv_id */
4189 PROP_gimple_any, /* properties_required */
4190 PROP_gimple_lomp, /* properties_provided */
4191 0, /* properties_destroyed */
4192 0, /* todo_flags_start */
4193 TODO_dump_func, /* todo_flags_finish */
4194 0 /* letter */
4197 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4199 /* Lower the OpenMP sections directive in *STMT_P. */
4201 static void
4202 lower_omp_sections (tree *stmt_p, omp_context *ctx)
4204 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
4205 tree t, dlist;
4206 tree_stmt_iterator tsi;
4207 unsigned i, len;
4209 stmt = *stmt_p;
4211 push_gimplify_context ();
4213 dlist = NULL;
4214 ilist = NULL;
4215 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
4217 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4218 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
4219 continue;
4221 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4222 body = alloc_stmt_list ();
4223 for (i = 0; i < len; i++, tsi_next (&tsi))
4225 omp_context *sctx;
4226 tree sec_start, sec_end;
4228 sec_start = tsi_stmt (tsi);
4229 sctx = maybe_lookup_ctx (sec_start);
4230 gcc_assert (sctx);
4232 append_to_statement_list (sec_start, &body);
4234 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
4235 append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
4236 OMP_SECTION_BODY (sec_start) = NULL;
4238 if (i == len - 1)
4240 tree l = alloc_stmt_list ();
4241 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
4242 &l, ctx);
4243 append_to_statement_list (l, &body);
4244 OMP_SECTION_LAST (sec_start) = 1;
4247 sec_end = make_node (OMP_RETURN);
4248 append_to_statement_list (sec_end, &body);
4251 block = make_node (BLOCK);
4252 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
4254 olist = NULL_TREE;
4255 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
4257 pop_gimplify_context (NULL_TREE);
4258 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4260 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4261 TREE_SIDE_EFFECTS (new_stmt) = 1;
4263 new_body = alloc_stmt_list ();
4264 append_to_statement_list (ilist, &new_body);
4265 append_to_statement_list (stmt, &new_body);
4266 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
4267 append_to_statement_list (bind, &new_body);
4269 control = create_tmp_var (unsigned_type_node, ".section");
4270 t = build2 (OMP_CONTINUE, void_type_node, control, control);
4271 OMP_SECTIONS_CONTROL (stmt) = control;
4272 append_to_statement_list (t, &new_body);
4274 append_to_statement_list (olist, &new_body);
4275 append_to_statement_list (dlist, &new_body);
4277 maybe_catch_exception (&new_body);
4279 t = make_node (OMP_RETURN);
4280 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
4281 OMP_CLAUSE_NOWAIT);
4282 append_to_statement_list (t, &new_body);
4284 BIND_EXPR_BODY (new_stmt) = new_body;
4285 OMP_SECTIONS_BODY (stmt) = NULL;
4287 *stmt_p = new_stmt;
4291 /* A subroutine of lower_omp_single. Expand the simple form of
4292 an OMP_SINGLE, without a copyprivate clause:
4294 if (GOMP_single_start ())
4295 BODY;
4296 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4298 FIXME. It may be better to delay expanding the logic of this until
4299 pass_expand_omp. The expanded logic may make the job more difficult
4300 to a synchronization analysis pass. */
4302 static void
4303 lower_omp_single_simple (tree single_stmt, tree *pre_p)
4305 tree t;
4307 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
4308 t = build3 (COND_EXPR, void_type_node, t,
4309 OMP_SINGLE_BODY (single_stmt), NULL);
4310 gimplify_and_add (t, pre_p);
4314 /* A subroutine of lower_omp_single. Expand the simple form of
4315 an OMP_SINGLE, with a copyprivate clause:
4317 #pragma omp single copyprivate (a, b, c)
4319 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4322 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4324 BODY;
4325 copyout.a = a;
4326 copyout.b = b;
4327 copyout.c = c;
4328 GOMP_single_copy_end (&copyout);
4330 else
4332 a = copyout_p->a;
4333 b = copyout_p->b;
4334 c = copyout_p->c;
4336 GOMP_barrier ();
4339 FIXME. It may be better to delay expanding the logic of this until
4340 pass_expand_omp. The expanded logic may make the job more difficult
4341 to a synchronization analysis pass. */
4343 static void
4344 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
4346 tree ptr_type, t, l0, l1, l2, copyin_seq;
4348 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
4350 ptr_type = build_pointer_type (ctx->record_type);
4351 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
4353 l0 = create_artificial_label ();
4354 l1 = create_artificial_label ();
4355 l2 = create_artificial_label ();
4357 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
4358 t = fold_convert (ptr_type, t);
4359 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4360 gimplify_and_add (t, pre_p);
4362 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
4363 build_int_cst (ptr_type, 0));
4364 t = build3 (COND_EXPR, void_type_node, t,
4365 build_and_jump (&l0), build_and_jump (&l1));
4366 gimplify_and_add (t, pre_p);
4368 t = build1 (LABEL_EXPR, void_type_node, l0);
4369 gimplify_and_add (t, pre_p);
4371 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
4373 copyin_seq = NULL;
4374 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
4375 &copyin_seq, ctx);
4377 t = build_fold_addr_expr (ctx->sender_decl);
4378 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
4379 gimplify_and_add (t, pre_p);
4381 t = build_and_jump (&l2);
4382 gimplify_and_add (t, pre_p);
4384 t = build1 (LABEL_EXPR, void_type_node, l1);
4385 gimplify_and_add (t, pre_p);
4387 append_to_statement_list (copyin_seq, pre_p);
4389 t = build1 (LABEL_EXPR, void_type_node, l2);
4390 gimplify_and_add (t, pre_p);
4394 /* Expand code for an OpenMP single directive. */
4396 static void
4397 lower_omp_single (tree *stmt_p, omp_context *ctx)
4399 tree t, bind, block, single_stmt = *stmt_p, dlist;
4401 push_gimplify_context ();
4403 block = make_node (BLOCK);
4404 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4405 TREE_SIDE_EFFECTS (bind) = 1;
4407 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
4408 &BIND_EXPR_BODY (bind), &dlist, ctx);
4409 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
4411 append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
4413 if (ctx->record_type)
4414 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
4415 else
4416 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
4418 OMP_SINGLE_BODY (single_stmt) = NULL;
4420 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
4422 maybe_catch_exception (&BIND_EXPR_BODY (bind));
4424 t = make_node (OMP_RETURN);
4425 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
4426 OMP_CLAUSE_NOWAIT);
4427 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4429 pop_gimplify_context (bind);
4431 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4432 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4436 /* Expand code for an OpenMP master directive. */
4438 static void
4439 lower_omp_master (tree *stmt_p, omp_context *ctx)
4441 tree bind, block, stmt = *stmt_p, lab = NULL, x;
4443 push_gimplify_context ();
4445 block = make_node (BLOCK);
4446 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4447 TREE_SIDE_EFFECTS (bind) = 1;
4449 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4451 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4452 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
4453 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
4454 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4456 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
4457 maybe_catch_exception (&OMP_MASTER_BODY (stmt));
4458 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
4459 OMP_MASTER_BODY (stmt) = NULL;
4461 x = build1 (LABEL_EXPR, void_type_node, lab);
4462 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4464 x = make_node (OMP_RETURN);
4465 OMP_RETURN_NOWAIT (x) = 1;
4466 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4468 pop_gimplify_context (bind);
4470 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4471 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4475 /* Expand code for an OpenMP ordered directive. */
4477 static void
4478 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
4480 tree bind, block, stmt = *stmt_p, x;
4482 push_gimplify_context ();
4484 block = make_node (BLOCK);
4485 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4486 TREE_SIDE_EFFECTS (bind) = 1;
4488 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4490 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
4491 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4493 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
4494 maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
4495 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
4496 OMP_ORDERED_BODY (stmt) = NULL;
4498 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
4499 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4501 x = make_node (OMP_RETURN);
4502 OMP_RETURN_NOWAIT (x) = 1;
4503 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4505 pop_gimplify_context (bind);
4507 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4508 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4512 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4513 substitution of a couple of function calls. But in the NAMED case,
4514 requires that languages coordinate a symbol name. It is therefore
4515 best put here in common code. */
4517 static GTY((param1_is (tree), param2_is (tree)))
4518 splay_tree critical_name_mutexes;
4520 static void
4521 lower_omp_critical (tree *stmt_p, omp_context *ctx)
4523 tree bind, block, stmt = *stmt_p;
4524 tree t, lock, unlock, name;
4526 name = OMP_CRITICAL_NAME (stmt);
4527 if (name)
4529 tree decl;
4530 splay_tree_node n;
4532 if (!critical_name_mutexes)
4533 critical_name_mutexes
4534 = splay_tree_new_ggc (splay_tree_compare_pointers);
4536 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
4537 if (n == NULL)
4539 char *new_str;
4541 decl = create_tmp_var_raw (ptr_type_node, NULL);
4543 new_str = ACONCAT ((".gomp_critical_user_",
4544 IDENTIFIER_POINTER (name), NULL));
4545 DECL_NAME (decl) = get_identifier (new_str);
4546 TREE_PUBLIC (decl) = 1;
4547 TREE_STATIC (decl) = 1;
4548 DECL_COMMON (decl) = 1;
4549 DECL_ARTIFICIAL (decl) = 1;
4550 DECL_IGNORED_P (decl) = 1;
4551 varpool_finalize_decl (decl);
4553 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
4554 (splay_tree_value) decl);
4556 else
4557 decl = (tree) n->value;
4559 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
4560 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
4562 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
4563 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
4565 else
4567 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
4568 lock = build_call_expr (lock, 0);
4570 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
4571 unlock = build_call_expr (unlock, 0);
4574 push_gimplify_context ();
4576 block = make_node (BLOCK);
4577 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4578 TREE_SIDE_EFFECTS (bind) = 1;
4580 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4582 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
4584 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
4585 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
4586 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
4587 OMP_CRITICAL_BODY (stmt) = NULL;
4589 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
4591 t = make_node (OMP_RETURN);
4592 OMP_RETURN_NOWAIT (t) = 1;
4593 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4595 pop_gimplify_context (bind);
4596 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4597 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4601 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4602 for a lastprivate clause. Given a loop control predicate of (V
4603 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4604 is appended to *DLIST, iterator initialization is appended to
4605 *BODY_P. */
4607 static void
4608 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
4609 tree *dlist, struct omp_context *ctx)
4611 tree clauses, cond, stmts, vinit, t;
4612 enum tree_code cond_code;
4614 cond_code = fd->cond_code;
4615 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
4617 /* When possible, use a strict equality expression. This can let VRP
4618 type optimizations deduce the value and remove a copy. */
4619 if (host_integerp (fd->step, 0))
4621 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
4622 if (step == 1 || step == -1)
4623 cond_code = EQ_EXPR;
4626 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
4628 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
4629 stmts = NULL;
4630 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
4631 if (stmts != NULL)
4633 append_to_statement_list (stmts, dlist);
4635 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4636 vinit = fd->n1;
4637 if (cond_code == EQ_EXPR
4638 && host_integerp (fd->n2, 0)
4639 && ! integer_zerop (fd->n2))
4640 vinit = build_int_cst (TREE_TYPE (fd->v), 0);
4642 /* Initialize the iterator variable, so that threads that don't execute
4643 any iterations don't execute the lastprivate clauses by accident. */
4644 t = build_gimple_modify_stmt (fd->v, vinit);
4645 gimplify_and_add (t, body_p);
4650 /* Lower code for an OpenMP loop directive. */
4652 static void
4653 lower_omp_for (tree *stmt_p, omp_context *ctx)
4655 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
4656 struct omp_for_data fd;
4658 stmt = *stmt_p;
4660 push_gimplify_context ();
4662 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
4663 lower_omp (&OMP_FOR_BODY (stmt), ctx);
4665 /* Move declaration of temporaries in the loop body before we make
4666 it go away. */
4667 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
4668 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
4670 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4671 TREE_SIDE_EFFECTS (new_stmt) = 1;
4672 body_p = &BIND_EXPR_BODY (new_stmt);
4674 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4675 ilist = NULL;
4676 dlist = NULL;
4677 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
4678 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
4680 /* Lower the header expressions. At this point, we can assume that
4681 the header is of the form:
4683 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4685 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4686 using the .omp_data_s mapping, if needed. */
4687 rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
4688 if (!is_gimple_min_invariant (*rhs_p))
4689 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4691 rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
4692 if (!is_gimple_min_invariant (*rhs_p))
4693 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4695 rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
4696 if (!is_gimple_min_invariant (*rhs_p))
4697 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4699 /* Once lowered, extract the bounds and clauses. */
4700 extract_omp_for_data (stmt, &fd);
4702 lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
4704 append_to_statement_list (stmt, body_p);
4706 append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
4708 t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
4709 append_to_statement_list (t, body_p);
4711 /* After the loop, add exit clauses. */
4712 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
4713 append_to_statement_list (dlist, body_p);
4715 maybe_catch_exception (body_p);
4717 /* Region exit marker goes at the end of the loop body. */
4718 t = make_node (OMP_RETURN);
4719 OMP_RETURN_NOWAIT (t) = fd.have_nowait;
4720 append_to_statement_list (t, body_p);
4722 pop_gimplify_context (NULL_TREE);
4723 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4725 OMP_FOR_BODY (stmt) = NULL_TREE;
4726 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
4727 *stmt_p = new_stmt;
4730 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4731 or OMP_PARALLEL. */
4733 static tree
4734 check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
4736 struct walk_stmt_info *wi = data;
4737 int *info = wi->info;
4739 *walk_subtrees = 0;
4740 switch (TREE_CODE (*tp))
4742 case OMP_FOR:
4743 case OMP_SECTIONS:
4744 *info = *info == 0 ? 1 : -1;
4745 break;
4746 default:
4747 *info = -1;
4748 break;
4750 return NULL;
4753 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4754 information for the directive. */
4756 static void
4757 lower_omp_parallel (tree *stmt_p, omp_context *ctx)
4759 tree clauses, par_bind, par_body, new_body, bind;
4760 tree olist, ilist, par_olist, par_ilist;
4761 tree stmt, child_fn, t;
4763 stmt = *stmt_p;
4765 clauses = OMP_PARALLEL_CLAUSES (stmt);
4766 par_bind = OMP_PARALLEL_BODY (stmt);
4767 par_body = BIND_EXPR_BODY (par_bind);
4768 child_fn = ctx->cb.dst_fn;
4769 if (!OMP_PARALLEL_COMBINED (stmt))
4771 struct walk_stmt_info wi;
4772 int ws_num = 0;
4774 memset (&wi, 0, sizeof (wi));
4775 wi.callback = check_combined_parallel;
4776 wi.info = &ws_num;
4777 wi.val_only = true;
4778 walk_stmts (&wi, &par_bind);
4779 if (ws_num == 1)
4780 OMP_PARALLEL_COMBINED (stmt) = 1;
4783 push_gimplify_context ();
4785 par_olist = NULL_TREE;
4786 par_ilist = NULL_TREE;
4787 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
4788 lower_omp (&par_body, ctx);
4789 lower_reduction_clauses (clauses, &par_olist, ctx);
4791 /* Declare all the variables created by mapping and the variables
4792 declared in the scope of the parallel body. */
4793 record_vars_into (ctx->block_vars, child_fn);
4794 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
4796 if (ctx->record_type)
4798 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
4799 OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
4802 olist = NULL_TREE;
4803 ilist = NULL_TREE;
4804 lower_send_clauses (clauses, &ilist, &olist, ctx);
4805 lower_send_shared_vars (&ilist, &olist, ctx);
4807 /* Once all the expansions are done, sequence all the different
4808 fragments inside OMP_PARALLEL_BODY. */
4809 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4810 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
4812 new_body = alloc_stmt_list ();
4814 if (ctx->record_type)
4816 t = build_fold_addr_expr (ctx->sender_decl);
4817 /* fixup_child_record_type might have changed receiver_decl's type. */
4818 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
4819 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4820 append_to_statement_list (t, &new_body);
4823 append_to_statement_list (par_ilist, &new_body);
4824 append_to_statement_list (par_body, &new_body);
4825 append_to_statement_list (par_olist, &new_body);
4826 maybe_catch_exception (&new_body);
4827 t = make_node (OMP_RETURN);
4828 append_to_statement_list (t, &new_body);
4829 OMP_PARALLEL_BODY (stmt) = new_body;
4831 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4832 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
4834 *stmt_p = bind;
4836 pop_gimplify_context (NULL_TREE);
4840 /* Pass *TP back through the gimplifier within the context determined by WI.
4841 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
4842 flags on ADDR_EXPR. */
4844 static void
4845 lower_regimplify (tree *tp, struct walk_stmt_info *wi)
4847 enum gimplify_status gs;
4848 tree pre = NULL;
4850 if (wi->is_lhs)
4851 gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
4852 else if (wi->val_only)
4853 gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
4854 else
4855 gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
4856 gcc_assert (gs == GS_ALL_DONE);
4858 if (pre)
4859 tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
4862 /* Copy EXP into a temporary. Insert the initialization statement before TSI. */
4864 static tree
4865 init_tmp_var (tree exp, tree_stmt_iterator *tsi)
4867 tree t, stmt;
4869 t = create_tmp_var (TREE_TYPE (exp), NULL);
4870 DECL_GIMPLE_REG_P (t) = 1;
4871 stmt = build_gimple_modify_stmt (t, exp);
4872 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4873 tsi_link_before (tsi, stmt, TSI_SAME_STMT);
4875 return t;
4878 /* Similarly, but copy from the temporary and insert the statement
4879 after the iterator. */
4881 static tree
4882 save_tmp_var (tree exp, tree_stmt_iterator *tsi)
4884 tree t, stmt;
4886 t = create_tmp_var (TREE_TYPE (exp), NULL);
4887 DECL_GIMPLE_REG_P (t) = 1;
4888 stmt = build_gimple_modify_stmt (exp, t);
4889 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4890 tsi_link_after (tsi, stmt, TSI_SAME_STMT);
4892 return t;
4895 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
4897 static tree
4898 lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
4900 struct walk_stmt_info *wi = data;
4901 omp_context *ctx = wi->info;
4902 tree t = *tp;
4904 /* If we have issued syntax errors, avoid doing any heavy lifting.
4905 Just replace the OpenMP directives with a NOP to avoid
4906 confusing RTL expansion. */
4907 if (errorcount && OMP_DIRECTIVE_P (*tp))
4909 *tp = build_empty_stmt ();
4910 return NULL_TREE;
4913 *walk_subtrees = 0;
4914 switch (TREE_CODE (*tp))
4916 case OMP_PARALLEL:
4917 ctx = maybe_lookup_ctx (t);
4918 lower_omp_parallel (tp, ctx);
4919 break;
4921 case OMP_FOR:
4922 ctx = maybe_lookup_ctx (t);
4923 gcc_assert (ctx);
4924 lower_omp_for (tp, ctx);
4925 break;
4927 case OMP_SECTIONS:
4928 ctx = maybe_lookup_ctx (t);
4929 gcc_assert (ctx);
4930 lower_omp_sections (tp, ctx);
4931 break;
4933 case OMP_SINGLE:
4934 ctx = maybe_lookup_ctx (t);
4935 gcc_assert (ctx);
4936 lower_omp_single (tp, ctx);
4937 break;
4939 case OMP_MASTER:
4940 ctx = maybe_lookup_ctx (t);
4941 gcc_assert (ctx);
4942 lower_omp_master (tp, ctx);
4943 break;
4945 case OMP_ORDERED:
4946 ctx = maybe_lookup_ctx (t);
4947 gcc_assert (ctx);
4948 lower_omp_ordered (tp, ctx);
4949 break;
4951 case OMP_CRITICAL:
4952 ctx = maybe_lookup_ctx (t);
4953 gcc_assert (ctx);
4954 lower_omp_critical (tp, ctx);
4955 break;
4957 case VAR_DECL:
4958 if (ctx && DECL_HAS_VALUE_EXPR_P (t))
4960 lower_regimplify (&t, wi);
4961 if (wi->val_only)
4963 if (wi->is_lhs)
4964 t = save_tmp_var (t, &wi->tsi);
4965 else
4966 t = init_tmp_var (t, &wi->tsi);
4968 *tp = t;
4970 break;
4972 case ADDR_EXPR:
4973 if (ctx)
4974 lower_regimplify (tp, wi);
4975 break;
4977 case ARRAY_REF:
4978 case ARRAY_RANGE_REF:
4979 case REALPART_EXPR:
4980 case IMAGPART_EXPR:
4981 case COMPONENT_REF:
4982 case VIEW_CONVERT_EXPR:
4983 if (ctx)
4984 lower_regimplify (tp, wi);
4985 break;
4987 case INDIRECT_REF:
4988 if (ctx)
4990 wi->is_lhs = false;
4991 wi->val_only = true;
4992 lower_regimplify (&TREE_OPERAND (t, 0), wi);
4994 break;
4996 default:
4997 if (!TYPE_P (t) && !DECL_P (t))
4998 *walk_subtrees = 1;
4999 break;
5002 return NULL_TREE;
5005 static void
5006 lower_omp (tree *stmt_p, omp_context *ctx)
5008 struct walk_stmt_info wi;
5010 memset (&wi, 0, sizeof (wi));
5011 wi.callback = lower_omp_1;
5012 wi.info = ctx;
5013 wi.val_only = true;
5014 wi.want_locations = true;
5016 walk_stmts (&wi, stmt_p);
5019 /* Main entry point. */
5021 static unsigned int
5022 execute_lower_omp (void)
5024 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
5025 delete_omp_context);
5027 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5028 gcc_assert (parallel_nesting_level == 0);
5030 if (all_contexts->root)
5031 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5033 if (all_contexts)
5035 splay_tree_delete (all_contexts);
5036 all_contexts = NULL;
5038 return 0;
5041 static bool
5042 gate_lower_omp (void)
5044 return flag_openmp != 0;
5047 struct tree_opt_pass pass_lower_omp =
5049 "omplower", /* name */
5050 gate_lower_omp, /* gate */
5051 execute_lower_omp, /* execute */
5052 NULL, /* sub */
5053 NULL, /* next */
5054 0, /* static_pass_number */
5055 0, /* tv_id */
5056 PROP_gimple_any, /* properties_required */
5057 PROP_gimple_lomp, /* properties_provided */
5058 0, /* properties_destroyed */
5059 0, /* todo_flags_start */
5060 TODO_dump_func, /* todo_flags_finish */
5061 0 /* letter */
5064 /* The following is a utility to diagnose OpenMP structured block violations.
5065 It is not part of the "omplower" pass, as that's invoked too late. It
5066 should be invoked by the respective front ends after gimplification. */
5068 static splay_tree all_labels;
5070 /* Check for mismatched contexts and generate an error if needed. Return
5071 true if an error is detected. */
5073 static bool
5074 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
5076 bool exit_p = true;
5078 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
5079 return false;
5081 /* Try to avoid confusing the user by producing and error message
5082 with correct "exit" or "enter" verbage. We prefer "exit"
5083 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5084 if (branch_ctx == NULL)
5085 exit_p = false;
5086 else
5088 while (label_ctx)
5090 if (TREE_VALUE (label_ctx) == branch_ctx)
5092 exit_p = false;
5093 break;
5095 label_ctx = TREE_CHAIN (label_ctx);
5099 if (exit_p)
5100 error ("invalid exit from OpenMP structured block");
5101 else
5102 error ("invalid entry to OpenMP structured block");
5104 *stmt_p = build_empty_stmt ();
5105 return true;
5108 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5109 where in the tree each label is found. */
5111 static tree
5112 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
5114 struct walk_stmt_info *wi = data;
5115 tree context = (tree) wi->info;
5116 tree inner_context;
5117 tree t = *tp;
5119 *walk_subtrees = 0;
5120 switch (TREE_CODE (t))
5122 case OMP_PARALLEL:
5123 case OMP_SECTIONS:
5124 case OMP_SINGLE:
5125 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
5126 /* FALLTHRU */
5127 case OMP_SECTION:
5128 case OMP_MASTER:
5129 case OMP_ORDERED:
5130 case OMP_CRITICAL:
5131 /* The minimal context here is just a tree of statements. */
5132 inner_context = tree_cons (NULL, t, context);
5133 wi->info = inner_context;
5134 walk_stmts (wi, &OMP_BODY (t));
5135 wi->info = context;
5136 break;
5138 case OMP_FOR:
5139 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
5140 inner_context = tree_cons (NULL, t, context);
5141 wi->info = inner_context;
5142 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
5143 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
5144 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
5145 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5146 walk_stmts (wi, &OMP_FOR_BODY (t));
5147 wi->info = context;
5148 break;
5150 case LABEL_EXPR:
5151 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
5152 (splay_tree_value) context);
5153 break;
5155 default:
5156 break;
5159 return NULL_TREE;
5162 /* Pass 2: Check each branch and see if its context differs from that of
5163 the destination label's context. */
5165 static tree
5166 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
5168 struct walk_stmt_info *wi = data;
5169 tree context = (tree) wi->info;
5170 splay_tree_node n;
5171 tree t = *tp;
5173 *walk_subtrees = 0;
5174 switch (TREE_CODE (t))
5176 case OMP_PARALLEL:
5177 case OMP_SECTIONS:
5178 case OMP_SINGLE:
5179 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
5180 /* FALLTHRU */
5181 case OMP_SECTION:
5182 case OMP_MASTER:
5183 case OMP_ORDERED:
5184 case OMP_CRITICAL:
5185 wi->info = t;
5186 walk_stmts (wi, &OMP_BODY (t));
5187 wi->info = context;
5188 break;
5190 case OMP_FOR:
5191 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
5192 wi->info = t;
5193 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
5194 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
5195 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
5196 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5197 walk_stmts (wi, &OMP_FOR_BODY (t));
5198 wi->info = context;
5199 break;
5201 case GOTO_EXPR:
5203 tree lab = GOTO_DESTINATION (t);
5204 if (TREE_CODE (lab) != LABEL_DECL)
5205 break;
5207 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5208 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
5210 break;
5212 case SWITCH_EXPR:
5214 tree vec = SWITCH_LABELS (t);
5215 int i, len = TREE_VEC_LENGTH (vec);
5216 for (i = 0; i < len; ++i)
5218 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
5219 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5220 if (diagnose_sb_0 (tp, context, (tree) n->value))
5221 break;
5224 break;
5226 case RETURN_EXPR:
5227 diagnose_sb_0 (tp, context, NULL_TREE);
5228 break;
5230 default:
5231 break;
5234 return NULL_TREE;
5237 void
5238 diagnose_omp_structured_block_errors (tree fndecl)
5240 tree save_current = current_function_decl;
5241 struct walk_stmt_info wi;
5243 current_function_decl = fndecl;
5245 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
5247 memset (&wi, 0, sizeof (wi));
5248 wi.callback = diagnose_sb_1;
5249 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5251 memset (&wi, 0, sizeof (wi));
5252 wi.callback = diagnose_sb_2;
5253 wi.want_locations = true;
5254 wi.want_return_expr = true;
5255 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5257 splay_tree_delete (all_labels);
5258 all_labels = NULL;
5260 current_function_decl = save_current;
5263 #include "gt-omp-low.h"