PR c++/33916
[official-gcc.git] / gcc / omp-low.c
blobca0026604ce093a7b270f47b8c4e331c87199e38
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
35 #include "timevar.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 tree stmt;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
82 tree block_vars;
84 /* What to do with variables with implicitly determined sharing
85 attributes. */
86 enum omp_clause_default_kind default_kind;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
91 int depth;
93 /* True if this parallel directive is nested within another. */
94 bool is_nested;
95 } omp_context;
98 /* A structure describing the main elements of a parallel loop. */
100 struct omp_for_data
102 tree v, n1, n2, step, chunk_size, for_stmt;
103 enum tree_code cond_code;
104 tree pre;
105 bool have_nowait, have_ordered;
106 enum omp_clause_schedule_kind sched_kind;
110 static splay_tree all_contexts;
111 static int parallel_nesting_level;
112 struct omp_region *root_omp_region;
114 static void scan_omp (tree *, omp_context *);
115 static void lower_omp (tree *, omp_context *);
116 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
117 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
121 tree
122 find_omp_clause (tree clauses, enum tree_code kind)
124 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
125 if (OMP_CLAUSE_CODE (clauses) == kind)
126 return clauses;
128 return NULL_TREE;
131 /* Return true if CTX is for an omp parallel. */
133 static inline bool
134 is_parallel_ctx (omp_context *ctx)
136 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
140 /* Return true if REGION is a combined parallel+workshare region. */
142 static inline bool
143 is_combined_parallel (struct omp_region *region)
145 return region->is_combined_parallel;
149 /* Extract the header elements of parallel loop FOR_STMT and store
150 them into *FD. */
152 static void
153 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
155 tree t, var;
157 fd->for_stmt = for_stmt;
158 fd->pre = NULL;
160 t = OMP_FOR_INIT (for_stmt);
161 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
162 fd->v = GIMPLE_STMT_OPERAND (t, 0);
163 gcc_assert (SSA_VAR_P (fd->v));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
165 var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
166 fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
168 t = OMP_FOR_COND (for_stmt);
169 fd->cond_code = TREE_CODE (t);
170 gcc_assert (TREE_OPERAND (t, 0) == var);
171 fd->n2 = TREE_OPERAND (t, 1);
172 switch (fd->cond_code)
174 case LT_EXPR:
175 case GT_EXPR:
176 break;
177 case LE_EXPR:
178 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
179 build_int_cst (TREE_TYPE (fd->n2), 1));
180 fd->cond_code = LT_EXPR;
181 break;
182 case GE_EXPR:
183 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
184 build_int_cst (TREE_TYPE (fd->n2), 1));
185 fd->cond_code = GT_EXPR;
186 break;
187 default:
188 gcc_unreachable ();
191 t = OMP_FOR_INCR (fd->for_stmt);
192 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
193 gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
194 t = GIMPLE_STMT_OPERAND (t, 1);
195 gcc_assert (TREE_OPERAND (t, 0) == var);
196 switch (TREE_CODE (t))
198 case PLUS_EXPR:
199 fd->step = TREE_OPERAND (t, 1);
200 break;
201 case MINUS_EXPR:
202 fd->step = TREE_OPERAND (t, 1);
203 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
204 break;
205 default:
206 gcc_unreachable ();
209 fd->have_nowait = fd->have_ordered = false;
210 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
211 fd->chunk_size = NULL_TREE;
213 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
214 switch (OMP_CLAUSE_CODE (t))
216 case OMP_CLAUSE_NOWAIT:
217 fd->have_nowait = true;
218 break;
219 case OMP_CLAUSE_ORDERED:
220 fd->have_ordered = true;
221 break;
222 case OMP_CLAUSE_SCHEDULE:
223 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
224 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
225 break;
226 default:
227 break;
230 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
231 gcc_assert (fd->chunk_size == NULL);
232 else if (fd->chunk_size == NULL)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
237 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
238 ? integer_zero_node : integer_one_node;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
255 for (j ...)
257 Is lowered into:
259 # BLOCK 2 (PAR_ENTRY_BB)
260 .omp_data_o.i = i;
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
266 D.1598 = D.1667 * 4;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
278 call.
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
285 static bool
286 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
288 struct omp_for_data fd;
289 tree par_stmt, ws_stmt;
291 par_stmt = last_stmt (par_entry_bb);
292 ws_stmt = last_stmt (ws_entry_bb);
294 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
295 return true;
297 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
299 extract_omp_for_data (ws_stmt, &fd);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
305 see through this. */
306 if (!is_gimple_min_invariant (fd.n1)
307 || !is_gimple_min_invariant (fd.n2)
308 || !is_gimple_min_invariant (fd.step)
309 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
310 return false;
312 return true;
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
318 expanded. */
320 static tree
321 get_ws_args_for (tree ws_stmt)
323 tree t;
325 if (TREE_CODE (ws_stmt) == OMP_FOR)
327 struct omp_for_data fd;
328 tree ws_args;
330 extract_omp_for_data (ws_stmt, &fd);
332 ws_args = NULL_TREE;
333 if (fd.chunk_size)
335 t = fold_convert (long_integer_type_node, fd.chunk_size);
336 ws_args = tree_cons (NULL, t, ws_args);
339 t = fold_convert (long_integer_type_node, fd.step);
340 ws_args = tree_cons (NULL, t, ws_args);
342 t = fold_convert (long_integer_type_node, fd.n2);
343 ws_args = tree_cons (NULL, t, ws_args);
345 t = fold_convert (long_integer_type_node, fd.n1);
346 ws_args = tree_cons (NULL, t, ws_args);
348 return ws_args;
350 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb = single_succ (bb_for_stmt (ws_stmt));
356 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
357 t = tree_cons (NULL, t, NULL);
358 return t;
361 gcc_unreachable ();
365 /* Discover whether REGION is a combined parallel+workshare region. */
367 static void
368 determine_parallel_type (struct omp_region *region)
370 basic_block par_entry_bb, par_exit_bb;
371 basic_block ws_entry_bb, ws_exit_bb;
373 if (region == NULL || region->inner == NULL
374 || region->exit == NULL || region->inner->exit == NULL
375 || region->inner->cont == NULL)
376 return;
378 /* We only support parallel+for and parallel+sections. */
379 if (region->type != OMP_PARALLEL
380 || (region->inner->type != OMP_FOR
381 && region->inner->type != OMP_SECTIONS))
382 return;
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb = region->entry;
387 par_exit_bb = region->exit;
388 ws_entry_bb = region->inner->entry;
389 ws_exit_bb = region->inner->exit;
391 if (single_succ (par_entry_bb) == ws_entry_bb
392 && single_succ (ws_exit_bb) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
395 || (last_and_only_stmt (ws_entry_bb)
396 && last_and_only_stmt (par_exit_bb))))
398 tree ws_stmt = last_stmt (ws_entry_bb);
400 if (region->inner->type == OMP_FOR)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses = OMP_FOR_CLAUSES (ws_stmt);
412 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
413 if (c == NULL
414 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
417 region->is_combined_parallel = false;
418 region->inner->is_combined_parallel = false;
419 return;
423 region->is_combined_parallel = true;
424 region->inner->is_combined_parallel = true;
425 region->ws_args = get_ws_args_for (ws_stmt);
430 /* Return true if EXPR is variable sized. */
432 static inline bool
433 is_variable_sized (const_tree expr)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
438 /* Return true if DECL is a reference type. */
440 static inline bool
441 is_reference (tree decl)
443 return lang_hooks.decls.omp_privatize_by_reference (decl);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
450 static inline tree
451 lookup_decl (tree var, omp_context *ctx)
453 tree *n;
454 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
455 return *n;
458 static inline tree
459 maybe_lookup_decl (tree var, omp_context *ctx)
461 tree *n;
462 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
463 return n ? *n : NULL_TREE;
466 static inline tree
467 lookup_field (tree var, omp_context *ctx)
469 splay_tree_node n;
470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
471 return (tree) n->value;
474 static inline tree
475 maybe_lookup_field (tree var, omp_context *ctx)
477 splay_tree_node n;
478 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
479 return n ? (tree) n->value : NULL_TREE;
482 /* Return true if DECL should be copied by pointer. SHARED_P is true
483 if DECL is to be shared. */
485 static bool
486 use_pointer_for_field (const_tree decl, bool shared_p)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
489 return true;
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
493 if (shared_p)
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
500 return true;
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
507 return true;
509 /* Do not use copy-in/copy-out for variables that have their
510 address taken. */
511 if (TREE_ADDRESSABLE (decl))
512 return true;
515 return false;
518 /* Create a new VAR_DECL and copy information from VAR to it. */
520 tree
521 copy_var_decl (tree var, tree name, tree type)
523 tree copy = build_decl (VAR_DECL, name, type);
525 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
526 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
527 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
528 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
529 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
530 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
531 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
532 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
533 TREE_USED (copy) = 1;
534 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
536 return copy;
539 /* Construct a new automatic decl similar to VAR. */
541 static tree
542 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
544 tree copy = copy_var_decl (var, name, type);
546 DECL_CONTEXT (copy) = current_function_decl;
547 TREE_CHAIN (copy) = ctx->block_vars;
548 ctx->block_vars = copy;
550 return copy;
553 static tree
554 omp_copy_decl_1 (tree var, omp_context *ctx)
556 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
559 /* Build tree nodes to access the field for VAR on the receiver side. */
561 static tree
562 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
564 tree x, field = lookup_field (var, ctx);
566 /* If the receiver record type was remapped in the child function,
567 remap the field into the new record type. */
568 x = maybe_lookup_field (field, ctx);
569 if (x != NULL)
570 field = x;
572 x = build_fold_indirect_ref (ctx->receiver_decl);
573 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
574 if (by_ref)
575 x = build_fold_indirect_ref (x);
577 return x;
580 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
581 of a parallel, this is a component reference; for workshare constructs
582 this is some variable. */
584 static tree
585 build_outer_var_ref (tree var, omp_context *ctx)
587 tree x;
589 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
590 x = var;
591 else if (is_variable_sized (var))
593 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
594 x = build_outer_var_ref (x, ctx);
595 x = build_fold_indirect_ref (x);
597 else if (is_parallel_ctx (ctx))
599 bool by_ref = use_pointer_for_field (var, false);
600 x = build_receiver_ref (var, by_ref, ctx);
602 else if (ctx->outer)
603 x = lookup_decl (var, ctx->outer);
604 else if (is_reference (var))
605 /* This can happen with orphaned constructs. If var is reference, it is
606 possible it is shared and as such valid. */
607 x = var;
608 else
609 gcc_unreachable ();
611 if (is_reference (var))
612 x = build_fold_indirect_ref (x);
614 return x;
617 /* Build tree nodes to access the field for VAR on the sender side. */
619 static tree
620 build_sender_ref (tree var, omp_context *ctx)
622 tree field = lookup_field (var, ctx);
623 return build3 (COMPONENT_REF, TREE_TYPE (field),
624 ctx->sender_decl, field, NULL);
627 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
629 static void
630 install_var_field (tree var, bool by_ref, omp_context *ctx)
632 tree field, type;
634 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
636 type = TREE_TYPE (var);
637 if (by_ref)
638 type = build_pointer_type (type);
640 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
642 /* Remember what variable this field was created for. This does have a
643 side effect of making dwarf2out ignore this member, so for helpful
644 debugging we clear it later in delete_omp_context. */
645 DECL_ABSTRACT_ORIGIN (field) = var;
647 insert_field_into_struct (ctx->record_type, field);
649 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
650 (splay_tree_value) field);
653 static tree
654 install_var_local (tree var, omp_context *ctx)
656 tree new_var = omp_copy_decl_1 (var, ctx);
657 insert_decl_map (&ctx->cb, var, new_var);
658 return new_var;
661 /* Adjust the replacement for DECL in CTX for the new context. This means
662 copying the DECL_VALUE_EXPR, and fixing up the type. */
664 static void
665 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
667 tree new_decl, size;
669 new_decl = lookup_decl (decl, ctx);
671 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
673 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
674 && DECL_HAS_VALUE_EXPR_P (decl))
676 tree ve = DECL_VALUE_EXPR (decl);
677 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
678 SET_DECL_VALUE_EXPR (new_decl, ve);
679 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
682 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
684 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
685 if (size == error_mark_node)
686 size = TYPE_SIZE (TREE_TYPE (new_decl));
687 DECL_SIZE (new_decl) = size;
689 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
690 if (size == error_mark_node)
691 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
692 DECL_SIZE_UNIT (new_decl) = size;
696 /* The callback for remap_decl. Search all containing contexts for a
697 mapping of the variable; this avoids having to duplicate the splay
698 tree ahead of time. We know a mapping doesn't already exist in the
699 given context. Create new mappings to implement default semantics. */
701 static tree
702 omp_copy_decl (tree var, copy_body_data *cb)
704 omp_context *ctx = (omp_context *) cb;
705 tree new_var;
707 if (TREE_CODE (var) == LABEL_DECL)
709 new_var = create_artificial_label ();
710 DECL_CONTEXT (new_var) = current_function_decl;
711 insert_decl_map (&ctx->cb, var, new_var);
712 return new_var;
715 while (!is_parallel_ctx (ctx))
717 ctx = ctx->outer;
718 if (ctx == NULL)
719 return var;
720 new_var = maybe_lookup_decl (var, ctx);
721 if (new_var)
722 return new_var;
725 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
726 return var;
728 return error_mark_node;
732 /* Return the parallel region associated with STMT. */
734 /* Debugging dumps for parallel regions. */
735 void dump_omp_region (FILE *, struct omp_region *, int);
736 void debug_omp_region (struct omp_region *);
737 void debug_all_omp_regions (void);
739 /* Dump the parallel region tree rooted at REGION. */
741 void
742 dump_omp_region (FILE *file, struct omp_region *region, int indent)
744 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
745 tree_code_name[region->type]);
747 if (region->inner)
748 dump_omp_region (file, region->inner, indent + 4);
750 if (region->cont)
752 fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
753 region->cont->index);
756 if (region->exit)
757 fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
758 region->exit->index);
759 else
760 fprintf (file, "%*s[no exit marker]\n", indent, "");
762 if (region->next)
763 dump_omp_region (file, region->next, indent);
766 void
767 debug_omp_region (struct omp_region *region)
769 dump_omp_region (stderr, region, 0);
772 void
773 debug_all_omp_regions (void)
775 dump_omp_region (stderr, root_omp_region, 0);
779 /* Create a new parallel region starting at STMT inside region PARENT. */
781 struct omp_region *
782 new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
784 struct omp_region *region = xcalloc (1, sizeof (*region));
786 region->outer = parent;
787 region->entry = bb;
788 region->type = type;
790 if (parent)
792 /* This is a nested region. Add it to the list of inner
793 regions in PARENT. */
794 region->next = parent->inner;
795 parent->inner = region;
797 else
799 /* This is a toplevel region. Add it to the list of toplevel
800 regions in ROOT_OMP_REGION. */
801 region->next = root_omp_region;
802 root_omp_region = region;
805 return region;
808 /* Release the memory associated with the region tree rooted at REGION. */
810 static void
811 free_omp_region_1 (struct omp_region *region)
813 struct omp_region *i, *n;
815 for (i = region->inner; i ; i = n)
817 n = i->next;
818 free_omp_region_1 (i);
821 free (region);
824 /* Release the memory for the entire omp region tree. */
826 void
827 free_omp_regions (void)
829 struct omp_region *r, *n;
830 for (r = root_omp_region; r ; r = n)
832 n = r->next;
833 free_omp_region_1 (r);
835 root_omp_region = NULL;
839 /* Create a new context, with OUTER_CTX being the surrounding context. */
841 static omp_context *
842 new_omp_context (tree stmt, omp_context *outer_ctx)
844 omp_context *ctx = XCNEW (omp_context);
846 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
847 (splay_tree_value) ctx);
848 ctx->stmt = stmt;
850 if (outer_ctx)
852 ctx->outer = outer_ctx;
853 ctx->cb = outer_ctx->cb;
854 ctx->cb.block = NULL;
855 ctx->depth = outer_ctx->depth + 1;
857 else
859 ctx->cb.src_fn = current_function_decl;
860 ctx->cb.dst_fn = current_function_decl;
861 ctx->cb.src_node = cgraph_node (current_function_decl);
862 ctx->cb.dst_node = ctx->cb.src_node;
863 ctx->cb.src_cfun = cfun;
864 ctx->cb.copy_decl = omp_copy_decl;
865 ctx->cb.eh_region = -1;
866 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
867 ctx->depth = 1;
870 ctx->cb.decl_map = pointer_map_create ();
872 return ctx;
875 /* Destroy a omp_context data structures. Called through the splay tree
876 value delete callback. */
878 static void
879 delete_omp_context (splay_tree_value value)
881 omp_context *ctx = (omp_context *) value;
883 pointer_map_destroy (ctx->cb.decl_map);
885 if (ctx->field_map)
886 splay_tree_delete (ctx->field_map);
888 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
889 it produces corrupt debug information. */
890 if (ctx->record_type)
892 tree t;
893 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
894 DECL_ABSTRACT_ORIGIN (t) = NULL;
897 XDELETE (ctx);
900 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
901 context. */
903 static void
904 fixup_child_record_type (omp_context *ctx)
906 tree f, type = ctx->record_type;
908 /* ??? It isn't sufficient to just call remap_type here, because
909 variably_modified_type_p doesn't work the way we expect for
910 record types. Testing each field for whether it needs remapping
911 and creating a new record by hand works, however. */
912 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
913 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
914 break;
915 if (f)
917 tree name, new_fields = NULL;
919 type = lang_hooks.types.make_type (RECORD_TYPE);
920 name = DECL_NAME (TYPE_NAME (ctx->record_type));
921 name = build_decl (TYPE_DECL, name, type);
922 TYPE_NAME (type) = name;
924 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
926 tree new_f = copy_node (f);
927 DECL_CONTEXT (new_f) = type;
928 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
929 TREE_CHAIN (new_f) = new_fields;
930 new_fields = new_f;
932 /* Arrange to be able to look up the receiver field
933 given the sender field. */
934 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
935 (splay_tree_value) new_f);
937 TYPE_FIELDS (type) = nreverse (new_fields);
938 layout_type (type);
941 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
944 /* Instantiate decls as necessary in CTX to satisfy the data sharing
945 specified by CLAUSES. */
947 static void
948 scan_sharing_clauses (tree clauses, omp_context *ctx)
950 tree c, decl;
951 bool scan_array_reductions = false;
953 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
955 bool by_ref;
957 switch (OMP_CLAUSE_CODE (c))
959 case OMP_CLAUSE_PRIVATE:
960 decl = OMP_CLAUSE_DECL (c);
961 if (!is_variable_sized (decl))
962 install_var_local (decl, ctx);
963 break;
965 case OMP_CLAUSE_SHARED:
966 gcc_assert (is_parallel_ctx (ctx));
967 decl = OMP_CLAUSE_DECL (c);
968 gcc_assert (!is_variable_sized (decl));
969 by_ref = use_pointer_for_field (decl, true);
970 /* Global variables don't need to be copied,
971 the receiver side will use them directly. */
972 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
973 break;
974 if (! TREE_READONLY (decl)
975 || TREE_ADDRESSABLE (decl)
976 || by_ref
977 || is_reference (decl))
979 install_var_field (decl, by_ref, ctx);
980 install_var_local (decl, ctx);
981 break;
983 /* We don't need to copy const scalar vars back. */
984 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
985 goto do_private;
987 case OMP_CLAUSE_LASTPRIVATE:
988 /* Let the corresponding firstprivate clause create
989 the variable. */
990 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
991 break;
992 /* FALLTHRU */
994 case OMP_CLAUSE_FIRSTPRIVATE:
995 case OMP_CLAUSE_REDUCTION:
996 decl = OMP_CLAUSE_DECL (c);
997 do_private:
998 if (is_variable_sized (decl))
999 break;
1000 else if (is_parallel_ctx (ctx)
1001 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
1002 ctx)))
1004 by_ref = use_pointer_for_field (decl, false);
1005 install_var_field (decl, by_ref, ctx);
1007 install_var_local (decl, ctx);
1008 break;
1010 case OMP_CLAUSE_COPYPRIVATE:
1011 if (ctx->outer)
1012 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
1013 /* FALLTHRU */
1015 case OMP_CLAUSE_COPYIN:
1016 decl = OMP_CLAUSE_DECL (c);
1017 by_ref = use_pointer_for_field (decl, false);
1018 install_var_field (decl, by_ref, ctx);
1019 break;
1021 case OMP_CLAUSE_DEFAULT:
1022 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1023 break;
1025 case OMP_CLAUSE_IF:
1026 case OMP_CLAUSE_NUM_THREADS:
1027 case OMP_CLAUSE_SCHEDULE:
1028 if (ctx->outer)
1029 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1030 break;
1032 case OMP_CLAUSE_NOWAIT:
1033 case OMP_CLAUSE_ORDERED:
1034 break;
1036 default:
1037 gcc_unreachable ();
1041 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1043 switch (OMP_CLAUSE_CODE (c))
1045 case OMP_CLAUSE_LASTPRIVATE:
1046 /* Let the corresponding firstprivate clause create
1047 the variable. */
1048 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1049 break;
1050 /* FALLTHRU */
1052 case OMP_CLAUSE_PRIVATE:
1053 case OMP_CLAUSE_FIRSTPRIVATE:
1054 case OMP_CLAUSE_REDUCTION:
1055 decl = OMP_CLAUSE_DECL (c);
1056 if (is_variable_sized (decl))
1057 install_var_local (decl, ctx);
1058 fixup_remapped_decl (decl, ctx,
1059 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1060 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1061 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1062 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1063 scan_array_reductions = true;
1064 break;
1066 case OMP_CLAUSE_SHARED:
1067 decl = OMP_CLAUSE_DECL (c);
1068 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1069 fixup_remapped_decl (decl, ctx, false);
1070 break;
1072 case OMP_CLAUSE_COPYPRIVATE:
1073 case OMP_CLAUSE_COPYIN:
1074 case OMP_CLAUSE_DEFAULT:
1075 case OMP_CLAUSE_IF:
1076 case OMP_CLAUSE_NUM_THREADS:
1077 case OMP_CLAUSE_SCHEDULE:
1078 case OMP_CLAUSE_NOWAIT:
1079 case OMP_CLAUSE_ORDERED:
1080 break;
1082 default:
1083 gcc_unreachable ();
1087 if (scan_array_reductions)
1088 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1089 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1090 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1092 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1093 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1097 /* Create a new name for omp child function. Returns an identifier. */
1099 static GTY(()) unsigned int tmp_ompfn_id_num;
1101 static tree
1102 create_omp_child_function_name (void)
1104 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1105 size_t len = IDENTIFIER_LENGTH (name);
1106 char *tmp_name, *prefix;
1108 prefix = alloca (len + sizeof ("_omp_fn"));
1109 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1110 strcpy (prefix + len, "_omp_fn");
1111 #ifndef NO_DOT_IN_LABEL
1112 prefix[len] = '.';
1113 #elif !defined NO_DOLLAR_IN_LABEL
1114 prefix[len] = '$';
1115 #endif
1116 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1117 return get_identifier (tmp_name);
1120 /* Build a decl for the omp child function. It'll not contain a body
1121 yet, just the bare decl. */
1123 static void
1124 create_omp_child_function (omp_context *ctx)
1126 tree decl, type, name, t;
1128 name = create_omp_child_function_name ();
1129 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1131 decl = build_decl (FUNCTION_DECL, name, type);
1132 decl = lang_hooks.decls.pushdecl (decl);
1134 ctx->cb.dst_fn = decl;
1136 TREE_STATIC (decl) = 1;
1137 TREE_USED (decl) = 1;
1138 DECL_ARTIFICIAL (decl) = 1;
1139 DECL_IGNORED_P (decl) = 0;
1140 TREE_PUBLIC (decl) = 0;
1141 DECL_UNINLINABLE (decl) = 1;
1142 DECL_EXTERNAL (decl) = 0;
1143 DECL_CONTEXT (decl) = NULL_TREE;
1144 DECL_INITIAL (decl) = make_node (BLOCK);
1146 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1147 DECL_ARTIFICIAL (t) = 1;
1148 DECL_IGNORED_P (t) = 1;
1149 DECL_RESULT (decl) = t;
1151 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1152 DECL_ARTIFICIAL (t) = 1;
1153 DECL_ARG_TYPE (t) = ptr_type_node;
1154 DECL_CONTEXT (t) = current_function_decl;
1155 TREE_USED (t) = 1;
1156 DECL_ARGUMENTS (decl) = t;
1157 ctx->receiver_decl = t;
1159 /* Allocate memory for the function structure. The call to
1160 allocate_struct_function clobbers CFUN, so we need to restore
1161 it afterward. */
1162 push_struct_function (decl);
1163 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1164 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1165 pop_cfun ();
1169 /* Scan an OpenMP parallel directive. */
1171 static void
1172 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1174 omp_context *ctx;
1175 tree name;
1177 /* Ignore parallel directives with empty bodies, unless there
1178 are copyin clauses. */
1179 if (optimize > 0
1180 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1181 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1183 *stmt_p = build_empty_stmt ();
1184 return;
1187 ctx = new_omp_context (*stmt_p, outer_ctx);
1188 if (parallel_nesting_level > 1)
1189 ctx->is_nested = true;
1190 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1191 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1192 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1193 name = create_tmp_var_name (".omp_data_s");
1194 name = build_decl (TYPE_DECL, name, ctx->record_type);
1195 TYPE_NAME (ctx->record_type) = name;
1196 create_omp_child_function (ctx);
1197 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1199 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1200 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1202 if (TYPE_FIELDS (ctx->record_type) == NULL)
1203 ctx->record_type = ctx->receiver_decl = NULL;
1204 else
1206 layout_type (ctx->record_type);
1207 fixup_child_record_type (ctx);
1212 /* Scan an OpenMP loop directive. */
1214 static void
1215 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1217 omp_context *ctx;
1218 tree stmt;
1220 stmt = *stmt_p;
1221 ctx = new_omp_context (stmt, outer_ctx);
1223 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1225 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1226 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1227 scan_omp (&OMP_FOR_COND (stmt), ctx);
1228 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1229 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1232 /* Scan an OpenMP sections directive. */
1234 static void
1235 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1237 tree stmt;
1238 omp_context *ctx;
1240 stmt = *stmt_p;
1241 ctx = new_omp_context (stmt, outer_ctx);
1242 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1243 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1246 /* Scan an OpenMP single directive. */
1248 static void
1249 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1251 tree stmt = *stmt_p;
1252 omp_context *ctx;
1253 tree name;
1255 ctx = new_omp_context (stmt, outer_ctx);
1256 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1257 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1258 name = create_tmp_var_name (".omp_copy_s");
1259 name = build_decl (TYPE_DECL, name, ctx->record_type);
1260 TYPE_NAME (ctx->record_type) = name;
1262 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1263 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1265 if (TYPE_FIELDS (ctx->record_type) == NULL)
1266 ctx->record_type = NULL;
1267 else
1268 layout_type (ctx->record_type);
1272 /* Check OpenMP nesting restrictions. */
1273 static void
1274 check_omp_nesting_restrictions (tree t, omp_context *ctx)
1276 switch (TREE_CODE (t))
1278 case OMP_FOR:
1279 case OMP_SECTIONS:
1280 case OMP_SINGLE:
1281 for (; ctx != NULL; ctx = ctx->outer)
1282 switch (TREE_CODE (ctx->stmt))
1284 case OMP_FOR:
1285 case OMP_SECTIONS:
1286 case OMP_SINGLE:
1287 case OMP_ORDERED:
1288 case OMP_MASTER:
1289 warning (0, "work-sharing region may not be closely nested inside "
1290 "of work-sharing, critical, ordered or master region");
1291 return;
1292 case OMP_PARALLEL:
1293 return;
1294 default:
1295 break;
1297 break;
1298 case OMP_MASTER:
1299 for (; ctx != NULL; ctx = ctx->outer)
1300 switch (TREE_CODE (ctx->stmt))
1302 case OMP_FOR:
1303 case OMP_SECTIONS:
1304 case OMP_SINGLE:
1305 warning (0, "master region may not be closely nested inside "
1306 "of work-sharing region");
1307 return;
1308 case OMP_PARALLEL:
1309 return;
1310 default:
1311 break;
1313 break;
1314 case OMP_ORDERED:
1315 for (; ctx != NULL; ctx = ctx->outer)
1316 switch (TREE_CODE (ctx->stmt))
1318 case OMP_CRITICAL:
1319 warning (0, "ordered region may not be closely nested inside "
1320 "of critical region");
1321 return;
1322 case OMP_FOR:
1323 if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
1324 OMP_CLAUSE_ORDERED) == NULL)
1325 warning (0, "ordered region must be closely nested inside "
1326 "a loop region with an ordered clause");
1327 return;
1328 case OMP_PARALLEL:
1329 return;
1330 default:
1331 break;
1333 break;
1334 case OMP_CRITICAL:
1335 for (; ctx != NULL; ctx = ctx->outer)
1336 if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
1337 && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
1339 warning (0, "critical region may not be nested inside a critical "
1340 "region with the same name");
1341 return;
1343 break;
1344 default:
1345 break;
1350 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1352 static tree
1353 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1355 struct walk_stmt_info *wi = data;
1356 omp_context *ctx = wi->info;
1357 tree t = *tp;
1359 if (EXPR_HAS_LOCATION (t))
1360 input_location = EXPR_LOCATION (t);
1362 /* Check the OpenMP nesting restrictions. */
1363 if (OMP_DIRECTIVE_P (t) && ctx != NULL)
1364 check_omp_nesting_restrictions (t, ctx);
1366 *walk_subtrees = 0;
1367 switch (TREE_CODE (t))
1369 case OMP_PARALLEL:
1370 parallel_nesting_level++;
1371 scan_omp_parallel (tp, ctx);
1372 parallel_nesting_level--;
1373 break;
1375 case OMP_FOR:
1376 scan_omp_for (tp, ctx);
1377 break;
1379 case OMP_SECTIONS:
1380 scan_omp_sections (tp, ctx);
1381 break;
1383 case OMP_SINGLE:
1384 scan_omp_single (tp, ctx);
1385 break;
1387 case OMP_SECTION:
1388 case OMP_MASTER:
1389 case OMP_ORDERED:
1390 case OMP_CRITICAL:
1391 ctx = new_omp_context (*tp, ctx);
1392 scan_omp (&OMP_BODY (*tp), ctx);
1393 break;
1395 case BIND_EXPR:
1397 tree var;
1398 *walk_subtrees = 1;
1400 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1401 insert_decl_map (&ctx->cb, var, var);
1403 break;
1405 case VAR_DECL:
1406 case PARM_DECL:
1407 case LABEL_DECL:
1408 case RESULT_DECL:
1409 if (ctx)
1410 *tp = remap_decl (t, &ctx->cb);
1411 break;
1413 default:
1414 if (ctx && TYPE_P (t))
1415 *tp = remap_type (t, &ctx->cb);
1416 else if (!DECL_P (t))
1417 *walk_subtrees = 1;
1418 break;
1421 return NULL_TREE;
1425 /* Scan all the statements starting at STMT_P. CTX contains context
1426 information about the OpenMP directives and clauses found during
1427 the scan. */
1429 static void
1430 scan_omp (tree *stmt_p, omp_context *ctx)
1432 location_t saved_location;
1433 struct walk_stmt_info wi;
1435 memset (&wi, 0, sizeof (wi));
1436 wi.callback = scan_omp_1;
1437 wi.info = ctx;
1438 wi.want_bind_expr = (ctx != NULL);
1439 wi.want_locations = true;
1441 saved_location = input_location;
1442 walk_stmts (&wi, stmt_p);
1443 input_location = saved_location;
1446 /* Re-gimplification and code generation routines. */
1448 /* Build a call to GOMP_barrier. */
1450 static tree
1451 build_omp_barrier (void)
1453 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1456 /* If a context was created for STMT when it was scanned, return it. */
1458 static omp_context *
1459 maybe_lookup_ctx (tree stmt)
1461 splay_tree_node n;
1462 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1463 return n ? (omp_context *) n->value : NULL;
1467 /* Find the mapping for DECL in CTX or the immediately enclosing
1468 context that has a mapping for DECL.
1470 If CTX is a nested parallel directive, we may have to use the decl
1471 mappings created in CTX's parent context. Suppose that we have the
1472 following parallel nesting (variable UIDs showed for clarity):
1474 iD.1562 = 0;
1475 #omp parallel shared(iD.1562) -> outer parallel
1476 iD.1562 = iD.1562 + 1;
1478 #omp parallel shared (iD.1562) -> inner parallel
1479 iD.1562 = iD.1562 - 1;
1481 Each parallel structure will create a distinct .omp_data_s structure
1482 for copying iD.1562 in/out of the directive:
1484 outer parallel .omp_data_s.1.i -> iD.1562
1485 inner parallel .omp_data_s.2.i -> iD.1562
1487 A shared variable mapping will produce a copy-out operation before
1488 the parallel directive and a copy-in operation after it. So, in
1489 this case we would have:
1491 iD.1562 = 0;
1492 .omp_data_o.1.i = iD.1562;
1493 #omp parallel shared(iD.1562) -> outer parallel
1494 .omp_data_i.1 = &.omp_data_o.1
1495 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1497 .omp_data_o.2.i = iD.1562; -> **
1498 #omp parallel shared(iD.1562) -> inner parallel
1499 .omp_data_i.2 = &.omp_data_o.2
1500 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1503 ** This is a problem. The symbol iD.1562 cannot be referenced
1504 inside the body of the outer parallel region. But since we are
1505 emitting this copy operation while expanding the inner parallel
1506 directive, we need to access the CTX structure of the outer
1507 parallel directive to get the correct mapping:
1509 .omp_data_o.2.i = .omp_data_i.1->i
1511 Since there may be other workshare or parallel directives enclosing
1512 the parallel directive, it may be necessary to walk up the context
1513 parent chain. This is not a problem in general because nested
1514 parallelism happens only rarely. */
1516 static tree
1517 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1519 tree t;
1520 omp_context *up;
1522 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1523 t = maybe_lookup_decl (decl, up);
1525 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
1527 return t ? t : decl;
1531 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1532 in outer contexts. */
1534 static tree
1535 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1537 tree t = NULL;
1538 omp_context *up;
1540 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1541 t = maybe_lookup_decl (decl, up);
1543 return t ? t : decl;
1547 /* Construct the initialization value for reduction CLAUSE. */
1549 tree
1550 omp_reduction_init (tree clause, tree type)
1552 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1554 case PLUS_EXPR:
1555 case MINUS_EXPR:
1556 case BIT_IOR_EXPR:
1557 case BIT_XOR_EXPR:
1558 case TRUTH_OR_EXPR:
1559 case TRUTH_ORIF_EXPR:
1560 case TRUTH_XOR_EXPR:
1561 case NE_EXPR:
1562 return fold_convert (type, integer_zero_node);
1564 case MULT_EXPR:
1565 case TRUTH_AND_EXPR:
1566 case TRUTH_ANDIF_EXPR:
1567 case EQ_EXPR:
1568 return fold_convert (type, integer_one_node);
1570 case BIT_AND_EXPR:
1571 return fold_convert (type, integer_minus_one_node);
1573 case MAX_EXPR:
1574 if (SCALAR_FLOAT_TYPE_P (type))
1576 REAL_VALUE_TYPE max, min;
1577 if (HONOR_INFINITIES (TYPE_MODE (type)))
1579 real_inf (&max);
1580 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1582 else
1583 real_maxval (&min, 1, TYPE_MODE (type));
1584 return build_real (type, min);
1586 else
1588 gcc_assert (INTEGRAL_TYPE_P (type));
1589 return TYPE_MIN_VALUE (type);
1592 case MIN_EXPR:
1593 if (SCALAR_FLOAT_TYPE_P (type))
1595 REAL_VALUE_TYPE max;
1596 if (HONOR_INFINITIES (TYPE_MODE (type)))
1597 real_inf (&max);
1598 else
1599 real_maxval (&max, 0, TYPE_MODE (type));
1600 return build_real (type, max);
1602 else
1604 gcc_assert (INTEGRAL_TYPE_P (type));
1605 return TYPE_MAX_VALUE (type);
1608 default:
1609 gcc_unreachable ();
1613 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1614 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1615 private variables. Initialization statements go in ILIST, while calls
1616 to destructors go in DLIST. */
1618 static void
1619 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1620 omp_context *ctx)
1622 tree_stmt_iterator diter;
1623 tree c, dtor, copyin_seq, x, ptr;
1624 bool copyin_by_ref = false;
1625 bool lastprivate_firstprivate = false;
1626 int pass;
1628 *dlist = alloc_stmt_list ();
1629 diter = tsi_start (*dlist);
1630 copyin_seq = NULL;
1632 /* Do all the fixed sized types in the first pass, and the variable sized
1633 types in the second pass. This makes sure that the scalar arguments to
1634 the variable sized types are processed before we use them in the
1635 variable sized operations. */
1636 for (pass = 0; pass < 2; ++pass)
1638 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1640 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1641 tree var, new_var;
1642 bool by_ref;
1644 switch (c_kind)
1646 case OMP_CLAUSE_PRIVATE:
1647 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1648 continue;
1649 break;
1650 case OMP_CLAUSE_SHARED:
1651 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
1653 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
1654 continue;
1656 case OMP_CLAUSE_FIRSTPRIVATE:
1657 case OMP_CLAUSE_COPYIN:
1658 case OMP_CLAUSE_REDUCTION:
1659 break;
1660 case OMP_CLAUSE_LASTPRIVATE:
1661 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1663 lastprivate_firstprivate = true;
1664 if (pass != 0)
1665 continue;
1667 break;
1668 default:
1669 continue;
1672 new_var = var = OMP_CLAUSE_DECL (c);
1673 if (c_kind != OMP_CLAUSE_COPYIN)
1674 new_var = lookup_decl (var, ctx);
1676 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1678 if (pass != 0)
1679 continue;
1681 else if (is_variable_sized (var))
1683 /* For variable sized types, we need to allocate the
1684 actual storage here. Call alloca and store the
1685 result in the pointer decl that we created elsewhere. */
1686 if (pass == 0)
1687 continue;
1689 ptr = DECL_VALUE_EXPR (new_var);
1690 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1691 ptr = TREE_OPERAND (ptr, 0);
1692 gcc_assert (DECL_P (ptr));
1694 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1695 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1696 x = fold_convert (TREE_TYPE (ptr), x);
1697 x = build_gimple_modify_stmt (ptr, x);
1698 gimplify_and_add (x, ilist);
1700 else if (is_reference (var))
1702 /* For references that are being privatized for Fortran,
1703 allocate new backing storage for the new pointer
1704 variable. This allows us to avoid changing all the
1705 code that expects a pointer to something that expects
1706 a direct variable. Note that this doesn't apply to
1707 C++, since reference types are disallowed in data
1708 sharing clauses there, except for NRV optimized
1709 return values. */
1710 if (pass == 0)
1711 continue;
1713 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1714 if (TREE_CONSTANT (x))
1716 const char *name = NULL;
1717 if (DECL_NAME (var))
1718 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1720 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
1721 name);
1722 gimple_add_tmp_var (x);
1723 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1725 else
1727 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1728 x = fold_convert (TREE_TYPE (new_var), x);
1731 x = build_gimple_modify_stmt (new_var, x);
1732 gimplify_and_add (x, ilist);
1734 new_var = build_fold_indirect_ref (new_var);
1736 else if (c_kind == OMP_CLAUSE_REDUCTION
1737 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1739 if (pass == 0)
1740 continue;
1742 else if (pass != 0)
1743 continue;
1745 switch (OMP_CLAUSE_CODE (c))
1747 case OMP_CLAUSE_SHARED:
1748 /* Shared global vars are just accessed directly. */
1749 if (is_global_var (new_var))
1750 break;
1751 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1752 needs to be delayed until after fixup_child_record_type so
1753 that we get the correct type during the dereference. */
1754 by_ref = use_pointer_for_field (var, true);
1755 x = build_receiver_ref (var, by_ref, ctx);
1756 SET_DECL_VALUE_EXPR (new_var, x);
1757 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1759 /* ??? If VAR is not passed by reference, and the variable
1760 hasn't been initialized yet, then we'll get a warning for
1761 the store into the omp_data_s structure. Ideally, we'd be
1762 able to notice this and not store anything at all, but
1763 we're generating code too early. Suppress the warning. */
1764 if (!by_ref)
1765 TREE_NO_WARNING (var) = 1;
1766 break;
1768 case OMP_CLAUSE_LASTPRIVATE:
1769 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1770 break;
1771 /* FALLTHRU */
1773 case OMP_CLAUSE_PRIVATE:
1774 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1775 if (x)
1776 gimplify_and_add (x, ilist);
1777 /* FALLTHRU */
1779 do_dtor:
1780 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1781 if (x)
1783 dtor = x;
1784 gimplify_stmt (&dtor);
1785 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1787 break;
1789 case OMP_CLAUSE_FIRSTPRIVATE:
1790 x = build_outer_var_ref (var, ctx);
1791 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1792 gimplify_and_add (x, ilist);
1793 goto do_dtor;
1794 break;
1796 case OMP_CLAUSE_COPYIN:
1797 by_ref = use_pointer_for_field (var, false);
1798 x = build_receiver_ref (var, by_ref, ctx);
1799 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1800 append_to_statement_list (x, &copyin_seq);
1801 copyin_by_ref |= by_ref;
1802 break;
1804 case OMP_CLAUSE_REDUCTION:
1805 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1807 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1808 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1810 else
1812 x = omp_reduction_init (c, TREE_TYPE (new_var));
1813 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1814 x = build_gimple_modify_stmt (new_var, x);
1815 gimplify_and_add (x, ilist);
1817 break;
1819 default:
1820 gcc_unreachable ();
1825 /* The copyin sequence is not to be executed by the main thread, since
1826 that would result in self-copies. Perhaps not visible to scalars,
1827 but it certainly is to C++ operator=. */
1828 if (copyin_seq)
1830 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1831 x = build2 (NE_EXPR, boolean_type_node, x,
1832 build_int_cst (TREE_TYPE (x), 0));
1833 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1834 gimplify_and_add (x, ilist);
1837 /* If any copyin variable is passed by reference, we must ensure the
1838 master thread doesn't modify it before it is copied over in all
1839 threads. Similarly for variables in both firstprivate and
1840 lastprivate clauses we need to ensure the lastprivate copying
1841 happens after firstprivate copying in all threads. */
1842 if (copyin_by_ref || lastprivate_firstprivate)
1843 gimplify_and_add (build_omp_barrier (), ilist);
1847 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1848 both parallel and workshare constructs. PREDICATE may be NULL if it's
1849 always true. */
1851 static void
1852 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1853 omp_context *ctx)
1855 tree sub_list, x, c;
1857 /* Early exit if there are no lastprivate clauses. */
1858 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1859 if (clauses == NULL)
1861 /* If this was a workshare clause, see if it had been combined
1862 with its parallel. In that case, look for the clauses on the
1863 parallel statement itself. */
1864 if (is_parallel_ctx (ctx))
1865 return;
1867 ctx = ctx->outer;
1868 if (ctx == NULL || !is_parallel_ctx (ctx))
1869 return;
1871 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1872 OMP_CLAUSE_LASTPRIVATE);
1873 if (clauses == NULL)
1874 return;
1877 sub_list = alloc_stmt_list ();
1879 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1881 tree var, new_var;
1883 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1884 continue;
1886 var = OMP_CLAUSE_DECL (c);
1887 new_var = lookup_decl (var, ctx);
1889 x = build_outer_var_ref (var, ctx);
1890 if (is_reference (var))
1891 new_var = build_fold_indirect_ref (new_var);
1892 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1893 append_to_statement_list (x, &sub_list);
1896 if (predicate)
1897 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1898 else
1899 x = sub_list;
1901 gimplify_and_add (x, stmt_list);
1905 /* Generate code to implement the REDUCTION clauses. */
1907 static void
1908 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1910 tree sub_list = NULL, x, c;
1911 int count = 0;
1913 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1914 update in that case, otherwise use a lock. */
1915 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1918 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1920 /* Never use OMP_ATOMIC for array reductions. */
1921 count = -1;
1922 break;
1924 count++;
1927 if (count == 0)
1928 return;
1930 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1932 tree var, ref, new_var;
1933 enum tree_code code;
1935 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1936 continue;
1938 var = OMP_CLAUSE_DECL (c);
1939 new_var = lookup_decl (var, ctx);
1940 if (is_reference (var))
1941 new_var = build_fold_indirect_ref (new_var);
1942 ref = build_outer_var_ref (var, ctx);
1943 code = OMP_CLAUSE_REDUCTION_CODE (c);
1945 /* reduction(-:var) sums up the partial results, so it acts
1946 identically to reduction(+:var). */
1947 if (code == MINUS_EXPR)
1948 code = PLUS_EXPR;
1950 if (count == 1)
1952 tree addr = build_fold_addr_expr (ref);
1954 addr = save_expr (addr);
1955 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1956 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1957 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1958 gimplify_and_add (x, stmt_list);
1959 return;
1962 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1964 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1966 if (is_reference (var))
1967 ref = build_fold_addr_expr (ref);
1968 SET_DECL_VALUE_EXPR (placeholder, ref);
1969 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1970 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
1971 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
1972 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
1974 else
1976 x = build2 (code, TREE_TYPE (ref), ref, new_var);
1977 ref = build_outer_var_ref (var, ctx);
1978 x = build_gimple_modify_stmt (ref, x);
1979 append_to_statement_list (x, &sub_list);
1983 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
1984 gimplify_and_add (x, stmt_list);
1986 gimplify_and_add (sub_list, stmt_list);
1988 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
1989 gimplify_and_add (x, stmt_list);
1993 /* Generate code to implement the COPYPRIVATE clauses. */
1995 static void
1996 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
1997 omp_context *ctx)
1999 tree c;
2001 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2003 tree var, ref, x;
2004 bool by_ref;
2006 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2007 continue;
2009 var = OMP_CLAUSE_DECL (c);
2010 by_ref = use_pointer_for_field (var, false);
2012 ref = build_sender_ref (var, ctx);
2013 x = lookup_decl_in_outer_ctx (var, ctx);
2014 x = by_ref ? build_fold_addr_expr (x) : x;
2015 x = build_gimple_modify_stmt (ref, x);
2016 gimplify_and_add (x, slist);
2018 ref = build_receiver_ref (var, by_ref, ctx);
2019 if (is_reference (var))
2021 ref = build_fold_indirect_ref (ref);
2022 var = build_fold_indirect_ref (var);
2024 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2025 gimplify_and_add (x, rlist);
2030 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2031 and REDUCTION from the sender (aka parent) side. */
2033 static void
2034 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
2036 tree c;
2038 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2040 tree val, ref, x, var;
2041 bool by_ref, do_in = false, do_out = false;
2043 switch (OMP_CLAUSE_CODE (c))
2045 case OMP_CLAUSE_FIRSTPRIVATE:
2046 case OMP_CLAUSE_COPYIN:
2047 case OMP_CLAUSE_LASTPRIVATE:
2048 case OMP_CLAUSE_REDUCTION:
2049 break;
2050 default:
2051 continue;
2054 val = OMP_CLAUSE_DECL (c);
2055 var = lookup_decl_in_outer_ctx (val, ctx);
2057 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2058 && is_global_var (var))
2059 continue;
2060 if (is_variable_sized (val))
2061 continue;
2062 by_ref = use_pointer_for_field (val, false);
2064 switch (OMP_CLAUSE_CODE (c))
2066 case OMP_CLAUSE_FIRSTPRIVATE:
2067 case OMP_CLAUSE_COPYIN:
2068 do_in = true;
2069 break;
2071 case OMP_CLAUSE_LASTPRIVATE:
2072 if (by_ref || is_reference (val))
2074 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2075 continue;
2076 do_in = true;
2078 else
2079 do_out = true;
2080 break;
2082 case OMP_CLAUSE_REDUCTION:
2083 do_in = true;
2084 do_out = !(by_ref || is_reference (val));
2085 break;
2087 default:
2088 gcc_unreachable ();
2091 if (do_in)
2093 ref = build_sender_ref (val, ctx);
2094 x = by_ref ? build_fold_addr_expr (var) : var;
2095 x = build_gimple_modify_stmt (ref, x);
2096 gimplify_and_add (x, ilist);
2099 if (do_out)
2101 ref = build_sender_ref (val, ctx);
2102 x = build_gimple_modify_stmt (var, ref);
2103 gimplify_and_add (x, olist);
2108 /* Generate code to implement SHARED from the sender (aka parent) side.
2109 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2110 got automatically shared. */
2112 static void
2113 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
2115 tree var, ovar, nvar, f, x;
2117 if (ctx->record_type == NULL)
2118 return;
2120 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
2122 ovar = DECL_ABSTRACT_ORIGIN (f);
2123 nvar = maybe_lookup_decl (ovar, ctx);
2124 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2125 continue;
2127 /* If CTX is a nested parallel directive. Find the immediately
2128 enclosing parallel or workshare construct that contains a
2129 mapping for OVAR. */
2130 var = lookup_decl_in_outer_ctx (ovar, ctx);
2132 if (use_pointer_for_field (ovar, true))
2134 x = build_sender_ref (ovar, ctx);
2135 var = build_fold_addr_expr (var);
2136 x = build_gimple_modify_stmt (x, var);
2137 gimplify_and_add (x, ilist);
2139 else
2141 x = build_sender_ref (ovar, ctx);
2142 x = build_gimple_modify_stmt (x, var);
2143 gimplify_and_add (x, ilist);
2145 x = build_sender_ref (ovar, ctx);
2146 x = build_gimple_modify_stmt (var, x);
2147 gimplify_and_add (x, olist);
2152 /* Build the function calls to GOMP_parallel_start etc to actually
2153 generate the parallel operation. REGION is the parallel region
2154 being expanded. BB is the block where to insert the code. WS_ARGS
2155 will be set if this is a call to a combined parallel+workshare
2156 construct, it contains the list of additional arguments needed by
2157 the workshare construct. */
2159 static void
2160 expand_parallel_call (struct omp_region *region, basic_block bb,
2161 tree entry_stmt, tree ws_args)
2163 tree t, t1, t2, val, cond, c, clauses;
2164 block_stmt_iterator si;
2165 int start_ix;
2167 clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
2169 /* Determine what flavor of GOMP_parallel_start we will be
2170 emitting. */
2171 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2172 if (is_combined_parallel (region))
2174 switch (region->inner->type)
2176 case OMP_FOR:
2177 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2178 + region->inner->sched_kind;
2179 break;
2180 case OMP_SECTIONS:
2181 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2182 break;
2183 default:
2184 gcc_unreachable ();
2188 /* By default, the value of NUM_THREADS is zero (selected at run time)
2189 and there is no conditional. */
2190 cond = NULL_TREE;
2191 val = build_int_cst (unsigned_type_node, 0);
2193 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2194 if (c)
2195 cond = OMP_CLAUSE_IF_EXPR (c);
2197 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2198 if (c)
2199 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2201 /* Ensure 'val' is of the correct type. */
2202 val = fold_convert (unsigned_type_node, val);
2204 /* If we found the clause 'if (cond)', build either
2205 (cond != 0) or (cond ? val : 1u). */
2206 if (cond)
2208 block_stmt_iterator si;
2210 cond = gimple_boolify (cond);
2212 if (integer_zerop (val))
2213 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2214 build_int_cst (TREE_TYPE (cond), 0));
2215 else
2217 basic_block cond_bb, then_bb, else_bb;
2218 edge e, e_then, e_else;
2219 tree t, tmp_then, tmp_else, tmp_join, tmp_var;
2221 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2222 if (gimple_in_ssa_p (cfun))
2224 tmp_then = make_ssa_name (tmp_var, NULL_TREE);
2225 tmp_else = make_ssa_name (tmp_var, NULL_TREE);
2226 tmp_join = make_ssa_name (tmp_var, NULL_TREE);
2228 else
2230 tmp_then = tmp_var;
2231 tmp_else = tmp_var;
2232 tmp_join = tmp_var;
2235 e = split_block (bb, NULL);
2236 cond_bb = e->src;
2237 bb = e->dest;
2238 remove_edge (e);
2240 then_bb = create_empty_bb (cond_bb);
2241 else_bb = create_empty_bb (then_bb);
2242 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2243 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2245 t = build3 (COND_EXPR, void_type_node,
2246 cond, NULL_TREE, NULL_TREE);
2248 si = bsi_start (cond_bb);
2249 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2251 si = bsi_start (then_bb);
2252 t = build_gimple_modify_stmt (tmp_then, val);
2253 if (gimple_in_ssa_p (cfun))
2254 SSA_NAME_DEF_STMT (tmp_then) = t;
2255 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2257 si = bsi_start (else_bb);
2258 t = build_gimple_modify_stmt (tmp_else,
2259 build_int_cst (unsigned_type_node, 1));
2260 if (gimple_in_ssa_p (cfun))
2261 SSA_NAME_DEF_STMT (tmp_else) = t;
2262 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2264 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2265 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2266 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2267 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2269 if (gimple_in_ssa_p (cfun))
2271 tree phi = create_phi_node (tmp_join, bb);
2272 SSA_NAME_DEF_STMT (tmp_join) = phi;
2273 add_phi_arg (phi, tmp_then, e_then);
2274 add_phi_arg (phi, tmp_else, e_else);
2277 val = tmp_join;
2280 si = bsi_start (bb);
2281 val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
2282 false, BSI_CONTINUE_LINKING);
2285 si = bsi_last (bb);
2286 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2287 if (t == NULL)
2288 t1 = null_pointer_node;
2289 else
2290 t1 = build_fold_addr_expr (t);
2291 t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
2293 if (ws_args)
2295 tree args = tree_cons (NULL, t2,
2296 tree_cons (NULL, t1,
2297 tree_cons (NULL, val, ws_args)));
2298 t = build_function_call_expr (built_in_decls[start_ix], args);
2300 else
2301 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2303 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2304 false, BSI_CONTINUE_LINKING);
2306 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2307 if (t == NULL)
2308 t = null_pointer_node;
2309 else
2310 t = build_fold_addr_expr (t);
2311 t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
2312 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2313 false, BSI_CONTINUE_LINKING);
2315 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
2316 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2317 false, BSI_CONTINUE_LINKING);
2321 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2322 handler. This prevents programs from violating the structured
2323 block semantics with throws. */
2325 static void
2326 maybe_catch_exception (tree *stmt_p)
2328 tree f, t;
2330 if (!flag_exceptions)
2331 return;
2333 if (lang_protect_cleanup_actions)
2334 t = lang_protect_cleanup_actions ();
2335 else
2336 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
2337 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2338 EH_FILTER_MUST_NOT_THROW (f) = 1;
2339 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2341 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2342 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2344 *stmt_p = NULL;
2345 append_to_statement_list (t, stmt_p);
2348 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2350 static tree
2351 list2chain (tree list)
2353 tree t;
2355 for (t = list; t; t = TREE_CHAIN (t))
2357 tree var = TREE_VALUE (t);
2358 if (TREE_CHAIN (t))
2359 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
2360 else
2361 TREE_CHAIN (var) = NULL_TREE;
2364 return list ? TREE_VALUE (list) : NULL_TREE;
2368 /* Remove barriers in REGION->EXIT's block. Note that this is only
2369 valid for OMP_PARALLEL regions. Since the end of a parallel region
2370 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2371 left a barrier at the end of the OMP_PARALLEL region can now be
2372 removed. */
2374 static void
2375 remove_exit_barrier (struct omp_region *region)
2377 block_stmt_iterator si;
2378 basic_block exit_bb;
2379 edge_iterator ei;
2380 edge e;
2381 tree t;
2383 exit_bb = region->exit;
2385 /* If the parallel region doesn't return, we don't have REGION->EXIT
2386 block at all. */
2387 if (! exit_bb)
2388 return;
2390 /* The last insn in the block will be the parallel's OMP_RETURN. The
2391 workshare's OMP_RETURN will be in a preceding block. The kinds of
2392 statements that can appear in between are extremely limited -- no
2393 memory operations at all. Here, we allow nothing at all, so the
2394 only thing we allow to precede this OMP_RETURN is a label. */
2395 si = bsi_last (exit_bb);
2396 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2397 bsi_prev (&si);
2398 if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
2399 return;
2401 FOR_EACH_EDGE (e, ei, exit_bb->preds)
2403 si = bsi_last (e->src);
2404 if (bsi_end_p (si))
2405 continue;
2406 t = bsi_stmt (si);
2407 if (TREE_CODE (t) == OMP_RETURN)
2408 OMP_RETURN_NOWAIT (t) = 1;
2412 static void
2413 remove_exit_barriers (struct omp_region *region)
2415 if (region->type == OMP_PARALLEL)
2416 remove_exit_barrier (region);
2418 if (region->inner)
2420 region = region->inner;
2421 remove_exit_barriers (region);
2422 while (region->next)
2424 region = region->next;
2425 remove_exit_barriers (region);
2430 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
2431 calls. These can't be declared as const functions, but
2432 within one parallel body they are constant, so they can be
2433 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
2434 which are declared const. */
2436 static void
2437 optimize_omp_library_calls (void)
2439 basic_block bb;
2440 block_stmt_iterator bsi;
2441 tree thr_num_id
2442 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
2443 tree num_thr_id
2444 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
2446 FOR_EACH_BB (bb)
2447 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2449 tree stmt = bsi_stmt (bsi);
2450 tree call = get_call_expr_in (stmt);
2451 tree decl;
2453 if (call
2454 && (decl = get_callee_fndecl (call))
2455 && DECL_EXTERNAL (decl)
2456 && TREE_PUBLIC (decl)
2457 && DECL_INITIAL (decl) == NULL)
2459 tree built_in;
2461 if (DECL_NAME (decl) == thr_num_id)
2462 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
2463 else if (DECL_NAME (decl) == num_thr_id)
2464 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
2465 else
2466 continue;
2468 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
2469 || call_expr_nargs (call) != 0)
2470 continue;
2472 if (flag_exceptions && !TREE_NOTHROW (decl))
2473 continue;
2475 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
2476 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
2477 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
2478 continue;
2480 CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
2485 /* Expand the OpenMP parallel directive starting at REGION. */
2487 static void
2488 expand_omp_parallel (struct omp_region *region)
2490 basic_block entry_bb, exit_bb, new_bb;
2491 struct function *child_cfun;
2492 tree child_fn, block, t, ws_args;
2493 block_stmt_iterator si;
2494 tree entry_stmt;
2495 edge e;
2497 entry_stmt = last_stmt (region->entry);
2498 child_fn = OMP_PARALLEL_FN (entry_stmt);
2499 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
2500 /* If this function has been already instrumented, make sure
2501 the child function isn't instrumented again. */
2502 child_cfun->after_tree_profile = cfun->after_tree_profile;
2504 entry_bb = region->entry;
2505 exit_bb = region->exit;
2507 if (is_combined_parallel (region))
2508 ws_args = region->ws_args;
2509 else
2510 ws_args = NULL_TREE;
2512 if (child_cfun->cfg)
2514 /* Due to inlining, it may happen that we have already outlined
2515 the region, in which case all we need to do is make the
2516 sub-graph unreachable and emit the parallel call. */
2517 edge entry_succ_e, exit_succ_e;
2518 block_stmt_iterator si;
2520 entry_succ_e = single_succ_edge (entry_bb);
2522 si = bsi_last (entry_bb);
2523 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
2524 bsi_remove (&si, true);
2526 new_bb = entry_bb;
2527 if (exit_bb)
2529 exit_succ_e = single_succ_edge (exit_bb);
2530 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
2532 remove_edge_and_dominated_blocks (entry_succ_e);
2534 else
2536 /* If the parallel region needs data sent from the parent
2537 function, then the very first statement (except possible
2538 tree profile counter updates) of the parallel body
2539 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2540 &.OMP_DATA_O is passed as an argument to the child function,
2541 we need to replace it with the argument as seen by the child
2542 function.
2544 In most cases, this will end up being the identity assignment
2545 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2546 a function call that has been inlined, the original PARM_DECL
2547 .OMP_DATA_I may have been converted into a different local
2548 variable. In which case, we need to keep the assignment. */
2549 if (OMP_PARALLEL_DATA_ARG (entry_stmt))
2551 basic_block entry_succ_bb = single_succ (entry_bb);
2552 block_stmt_iterator si;
2553 tree parcopy_stmt = NULL_TREE, arg, narg;
2555 for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
2557 tree stmt, arg;
2559 gcc_assert (!bsi_end_p (si));
2560 stmt = bsi_stmt (si);
2561 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2562 continue;
2564 arg = GIMPLE_STMT_OPERAND (stmt, 1);
2565 STRIP_NOPS (arg);
2566 if (TREE_CODE (arg) == ADDR_EXPR
2567 && TREE_OPERAND (arg, 0)
2568 == OMP_PARALLEL_DATA_ARG (entry_stmt))
2570 parcopy_stmt = stmt;
2571 break;
2575 gcc_assert (parcopy_stmt != NULL_TREE);
2576 arg = DECL_ARGUMENTS (child_fn);
2578 if (!gimple_in_ssa_p (cfun))
2580 if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
2581 bsi_remove (&si, true);
2582 else
2583 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
2585 else
2587 /* If we are in ssa form, we must load the value from the default
2588 definition of the argument. That should not be defined now,
2589 since the argument is not used uninitialized. */
2590 gcc_assert (gimple_default_def (cfun, arg) == NULL);
2591 narg = make_ssa_name (arg, build_empty_stmt ());
2592 set_default_def (arg, narg);
2593 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
2594 update_stmt (parcopy_stmt);
2598 /* Declare local variables needed in CHILD_CFUN. */
2599 block = DECL_INITIAL (child_fn);
2600 BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
2601 DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
2603 /* Reset DECL_CONTEXT on function arguments. */
2604 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
2605 DECL_CONTEXT (t) = child_fn;
2607 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2608 child function. */
2609 si = bsi_last (entry_bb);
2610 t = bsi_stmt (si);
2611 gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
2612 bsi_remove (&si, true);
2613 e = split_block (entry_bb, t);
2614 entry_bb = e->dest;
2615 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
2617 /* Convert OMP_RETURN into a RETURN_EXPR. */
2618 if (exit_bb)
2620 si = bsi_last (exit_bb);
2621 gcc_assert (!bsi_end_p (si)
2622 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2623 t = build1 (RETURN_EXPR, void_type_node, NULL);
2624 bsi_insert_after (&si, t, BSI_SAME_STMT);
2625 bsi_remove (&si, true);
2628 /* Move the parallel region into CHILD_CFUN. */
2630 if (gimple_in_ssa_p (cfun))
2632 push_cfun (child_cfun);
2633 init_tree_ssa ();
2634 init_ssa_operands ();
2635 cfun->gimple_df->in_ssa_p = true;
2636 pop_cfun ();
2638 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
2639 if (exit_bb)
2640 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
2642 /* Inform the callgraph about the new function. */
2643 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
2644 = cfun->curr_properties;
2645 cgraph_add_new_function (child_fn, true);
2647 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2648 fixed in a following pass. */
2649 push_cfun (child_cfun);
2650 if (optimize)
2651 optimize_omp_library_calls ();
2652 rebuild_cgraph_edges ();
2654 /* Some EH regions might become dead, see PR34608. If
2655 pass_cleanup_cfg isn't the first pass to happen with the
2656 new child, these dead EH edges might cause problems.
2657 Clean them up now. */
2658 if (flag_exceptions)
2660 basic_block bb;
2661 tree save_current = current_function_decl;
2662 bool changed = false;
2664 current_function_decl = child_fn;
2665 FOR_EACH_BB (bb)
2666 changed |= tree_purge_dead_eh_edges (bb);
2667 if (changed)
2668 cleanup_tree_cfg ();
2669 current_function_decl = save_current;
2671 pop_cfun ();
2674 /* Emit a library call to launch the children threads. */
2675 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
2676 update_ssa (TODO_update_ssa_only_virtuals);
2680 /* A subroutine of expand_omp_for. Generate code for a parallel
2681 loop with any schedule. Given parameters:
2683 for (V = N1; V cond N2; V += STEP) BODY;
2685 where COND is "<" or ">", we generate pseudocode
2687 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2688 if (more) goto L0; else goto L3;
2690 V = istart0;
2691 iend = iend0;
2693 BODY;
2694 V += STEP;
2695 if (V cond iend) goto L1; else goto L2;
2697 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2700 If this is a combined omp parallel loop, instead of the call to
2701 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2703 static void
2704 expand_omp_for_generic (struct omp_region *region,
2705 struct omp_for_data *fd,
2706 enum built_in_function start_fn,
2707 enum built_in_function next_fn)
2709 tree type, istart0, iend0, iend, phi;
2710 tree t, vmain, vback;
2711 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
2712 basic_block l2_bb = NULL, l3_bb = NULL;
2713 block_stmt_iterator si;
2714 bool in_combined_parallel = is_combined_parallel (region);
2715 bool broken_loop = region->cont == NULL;
2716 edge e, ne;
2718 gcc_assert (!broken_loop || !in_combined_parallel);
2720 type = TREE_TYPE (fd->v);
2722 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2723 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2724 TREE_ADDRESSABLE (istart0) = 1;
2725 TREE_ADDRESSABLE (iend0) = 1;
2726 if (gimple_in_ssa_p (cfun))
2728 add_referenced_var (istart0);
2729 add_referenced_var (iend0);
2732 entry_bb = region->entry;
2733 cont_bb = region->cont;
2734 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2735 gcc_assert (broken_loop
2736 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2737 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2738 l1_bb = single_succ (l0_bb);
2739 if (!broken_loop)
2741 l2_bb = create_empty_bb (cont_bb);
2742 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
2743 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2745 else
2746 l2_bb = NULL;
2747 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2748 exit_bb = region->exit;
2750 si = bsi_last (entry_bb);
2751 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2752 if (in_combined_parallel)
2754 /* In a combined parallel loop, emit a call to
2755 GOMP_loop_foo_next. */
2756 t = build_call_expr (built_in_decls[next_fn], 2,
2757 build_fold_addr_expr (istart0),
2758 build_fold_addr_expr (iend0));
2760 else
2762 tree t0, t1, t2, t3, t4;
2763 /* If this is not a combined parallel loop, emit a call to
2764 GOMP_loop_foo_start in ENTRY_BB. */
2765 t4 = build_fold_addr_expr (iend0);
2766 t3 = build_fold_addr_expr (istart0);
2767 t2 = fold_convert (long_integer_type_node, fd->step);
2768 t1 = fold_convert (long_integer_type_node, fd->n2);
2769 t0 = fold_convert (long_integer_type_node, fd->n1);
2770 if (fd->chunk_size)
2772 t = fold_convert (long_integer_type_node, fd->chunk_size);
2773 t = build_call_expr (built_in_decls[start_fn], 6,
2774 t0, t1, t2, t, t3, t4);
2776 else
2777 t = build_call_expr (built_in_decls[start_fn], 5,
2778 t0, t1, t2, t3, t4);
2780 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2781 true, BSI_SAME_STMT);
2782 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2783 bsi_insert_after (&si, t, BSI_SAME_STMT);
2785 /* V may be used outside of the loop (e.g., to handle lastprivate clause).
2786 If this is the case, its value is undefined if the loop is not entered
2787 at all. To handle this case, set its initial value to N1. */
2788 if (gimple_in_ssa_p (cfun))
2790 e = find_edge (entry_bb, l3_bb);
2791 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2792 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
2793 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
2795 else
2797 t = build_gimple_modify_stmt (fd->v, fd->n1);
2798 bsi_insert_before (&si, t, BSI_SAME_STMT);
2801 /* Remove the OMP_FOR statement. */
2802 bsi_remove (&si, true);
2804 /* Iteration setup for sequential loop goes in L0_BB. */
2805 si = bsi_start (l0_bb);
2806 t = fold_convert (type, istart0);
2807 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2808 false, BSI_CONTINUE_LINKING);
2809 t = build_gimple_modify_stmt (fd->v, t);
2810 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2811 if (gimple_in_ssa_p (cfun))
2812 SSA_NAME_DEF_STMT (fd->v) = t;
2814 t = fold_convert (type, iend0);
2815 iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2816 false, BSI_CONTINUE_LINKING);
2818 if (!broken_loop)
2820 /* Code to control the increment and predicate for the sequential
2821 loop goes in the CONT_BB. */
2822 si = bsi_last (cont_bb);
2823 t = bsi_stmt (si);
2824 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
2825 vmain = TREE_OPERAND (t, 1);
2826 vback = TREE_OPERAND (t, 0);
2828 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
2829 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2830 true, BSI_SAME_STMT);
2831 t = build_gimple_modify_stmt (vback, t);
2832 bsi_insert_before (&si, t, BSI_SAME_STMT);
2833 if (gimple_in_ssa_p (cfun))
2834 SSA_NAME_DEF_STMT (vback) = t;
2836 t = build2 (fd->cond_code, boolean_type_node, vback, iend);
2837 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2838 bsi_insert_before (&si, t, BSI_SAME_STMT);
2840 /* Remove OMP_CONTINUE. */
2841 bsi_remove (&si, true);
2843 /* Emit code to get the next parallel iteration in L2_BB. */
2844 si = bsi_start (l2_bb);
2846 t = build_call_expr (built_in_decls[next_fn], 2,
2847 build_fold_addr_expr (istart0),
2848 build_fold_addr_expr (iend0));
2849 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2850 false, BSI_CONTINUE_LINKING);
2851 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2852 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2855 /* Add the loop cleanup function. */
2856 si = bsi_last (exit_bb);
2857 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
2858 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
2859 else
2860 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
2861 t = build_call_expr (t, 0);
2862 bsi_insert_after (&si, t, BSI_SAME_STMT);
2863 bsi_remove (&si, true);
2865 /* Connect the new blocks. */
2866 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
2867 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
2869 if (!broken_loop)
2871 e = find_edge (cont_bb, l3_bb);
2872 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
2874 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2875 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
2876 PHI_ARG_DEF_FROM_EDGE (phi, e));
2877 remove_edge (e);
2879 find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
2880 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
2881 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
2883 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
2884 recompute_dominator (CDI_DOMINATORS, l2_bb));
2885 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
2886 recompute_dominator (CDI_DOMINATORS, l3_bb));
2887 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
2888 recompute_dominator (CDI_DOMINATORS, l0_bb));
2889 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
2890 recompute_dominator (CDI_DOMINATORS, l1_bb));
2895 /* A subroutine of expand_omp_for. Generate code for a parallel
2896 loop with static schedule and no specified chunk size. Given
2897 parameters:
2899 for (V = N1; V cond N2; V += STEP) BODY;
2901 where COND is "<" or ">", we generate pseudocode
2903 if (cond is <)
2904 adj = STEP - 1;
2905 else
2906 adj = STEP + 1;
2907 n = (adj + N2 - N1) / STEP;
2908 q = n / nthreads;
2909 q += (q * nthreads != n);
2910 s0 = q * threadid;
2911 e0 = min(s0 + q, n);
2912 V = s0 * STEP + N1;
2913 if (s0 >= e0) goto L2; else goto L0;
2915 e = e0 * STEP + N1;
2917 BODY;
2918 V += STEP;
2919 if (V cond e) goto L1;
2923 static void
2924 expand_omp_for_static_nochunk (struct omp_region *region,
2925 struct omp_for_data *fd)
2927 tree n, q, s0, e0, e, t, nthreads, threadid;
2928 tree type, vmain, vback;
2929 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
2930 basic_block fin_bb;
2931 block_stmt_iterator si;
2933 type = TREE_TYPE (fd->v);
2935 entry_bb = region->entry;
2936 cont_bb = region->cont;
2937 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2938 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2939 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2940 body_bb = single_succ (seq_start_bb);
2941 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
2942 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2943 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
2944 exit_bb = region->exit;
2946 /* Iteration space partitioning goes in ENTRY_BB. */
2947 si = bsi_last (entry_bb);
2948 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2950 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
2951 t = fold_convert (type, t);
2952 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2953 true, BSI_SAME_STMT);
2955 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2956 t = fold_convert (type, t);
2957 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2958 true, BSI_SAME_STMT);
2960 fd->n1 = force_gimple_operand_bsi (&si,
2961 fold_convert (type, fd->n1),
2962 true, NULL_TREE,
2963 true, BSI_SAME_STMT);
2965 fd->n2 = force_gimple_operand_bsi (&si,
2966 fold_convert (type, fd->n2),
2967 true, NULL_TREE,
2968 true, BSI_SAME_STMT);
2970 fd->step = force_gimple_operand_bsi (&si,
2971 fold_convert (type, fd->step),
2972 true, NULL_TREE,
2973 true, BSI_SAME_STMT);
2975 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2976 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2977 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2978 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2979 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2980 t = fold_convert (type, t);
2981 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2983 t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
2984 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2986 t = fold_build2 (MULT_EXPR, type, q, nthreads);
2987 t = fold_build2 (NE_EXPR, type, t, n);
2988 t = fold_build2 (PLUS_EXPR, type, q, t);
2989 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2991 t = build2 (MULT_EXPR, type, q, threadid);
2992 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2994 t = fold_build2 (PLUS_EXPR, type, s0, q);
2995 t = fold_build2 (MIN_EXPR, type, t, n);
2996 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2998 t = fold_convert (type, s0);
2999 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3000 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3001 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3002 true, BSI_SAME_STMT);
3003 t = build_gimple_modify_stmt (fd->v, t);
3004 bsi_insert_before (&si, t, BSI_SAME_STMT);
3005 if (gimple_in_ssa_p (cfun))
3006 SSA_NAME_DEF_STMT (fd->v) = t;
3008 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3009 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3010 bsi_insert_before (&si, t, BSI_SAME_STMT);
3012 /* Remove the OMP_FOR statement. */
3013 bsi_remove (&si, true);
3015 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3016 si = bsi_start (seq_start_bb);
3018 t = fold_convert (type, e0);
3019 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3020 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3021 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3022 false, BSI_CONTINUE_LINKING);
3024 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3025 si = bsi_last (cont_bb);
3026 t = bsi_stmt (si);
3027 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3028 vmain = TREE_OPERAND (t, 1);
3029 vback = TREE_OPERAND (t, 0);
3031 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
3032 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3033 true, BSI_SAME_STMT);
3034 t = build_gimple_modify_stmt (vback, t);
3035 bsi_insert_before (&si, t, BSI_SAME_STMT);
3036 if (gimple_in_ssa_p (cfun))
3037 SSA_NAME_DEF_STMT (vback) = t;
3039 t = build2 (fd->cond_code, boolean_type_node, vback, e);
3040 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3041 bsi_insert_before (&si, t, BSI_SAME_STMT);
3043 /* Remove the OMP_CONTINUE statement. */
3044 bsi_remove (&si, true);
3046 /* Replace the OMP_RETURN with a barrier, or nothing. */
3047 si = bsi_last (exit_bb);
3048 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3049 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3050 false, BSI_SAME_STMT);
3051 bsi_remove (&si, true);
3053 /* Connect all the blocks. */
3054 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3055 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3057 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3058 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3060 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
3061 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3062 recompute_dominator (CDI_DOMINATORS, body_bb));
3063 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3064 recompute_dominator (CDI_DOMINATORS, fin_bb));
3068 /* A subroutine of expand_omp_for. Generate code for a parallel
3069 loop with static schedule and a specified chunk size. Given
3070 parameters:
3072 for (V = N1; V cond N2; V += STEP) BODY;
3074 where COND is "<" or ">", we generate pseudocode
3076 if (cond is <)
3077 adj = STEP - 1;
3078 else
3079 adj = STEP + 1;
3080 n = (adj + N2 - N1) / STEP;
3081 trip = 0;
3082 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3083 here so that V is defined
3084 if the loop is not entered
3086 s0 = (trip * nthreads + threadid) * CHUNK;
3087 e0 = min(s0 + CHUNK, n);
3088 if (s0 < n) goto L1; else goto L4;
3090 V = s0 * STEP + N1;
3091 e = e0 * STEP + N1;
3093 BODY;
3094 V += STEP;
3095 if (V cond e) goto L2; else goto L3;
3097 trip += 1;
3098 goto L0;
3102 static void
3103 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
3105 tree n, s0, e0, e, t, phi, nphi, args;
3106 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3107 tree type, cont, v_main, v_back, v_extra;
3108 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3109 basic_block trip_update_bb, cont_bb, fin_bb;
3110 block_stmt_iterator si;
3111 edge se, re, ene;
3113 type = TREE_TYPE (fd->v);
3115 entry_bb = region->entry;
3116 se = split_block (entry_bb, last_stmt (entry_bb));
3117 entry_bb = se->src;
3118 iter_part_bb = se->dest;
3119 cont_bb = region->cont;
3120 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3121 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
3122 == FALLTHRU_EDGE (cont_bb)->dest);
3123 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3124 body_bb = single_succ (seq_start_bb);
3125 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
3126 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3127 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
3128 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3129 exit_bb = region->exit;
3131 /* Trip and adjustment setup goes in ENTRY_BB. */
3132 si = bsi_last (entry_bb);
3133 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3135 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
3136 t = fold_convert (type, t);
3137 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3138 true, BSI_SAME_STMT);
3140 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
3141 t = fold_convert (type, t);
3142 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3143 true, BSI_SAME_STMT);
3145 fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
3146 true, NULL_TREE,
3147 true, BSI_SAME_STMT);
3148 fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
3149 true, NULL_TREE,
3150 true, BSI_SAME_STMT);
3151 fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
3152 true, NULL_TREE,
3153 true, BSI_SAME_STMT);
3154 fd->chunk_size
3155 = force_gimple_operand_bsi (&si, fold_convert (type,
3156 fd->chunk_size),
3157 true, NULL_TREE,
3158 true, BSI_SAME_STMT);
3160 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
3161 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
3162 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
3163 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
3164 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
3165 t = fold_convert (type, t);
3166 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3167 true, BSI_SAME_STMT);
3169 trip_var = create_tmp_var (type, ".trip");
3170 if (gimple_in_ssa_p (cfun))
3172 add_referenced_var (trip_var);
3173 trip_init = make_ssa_name (trip_var, NULL_TREE);
3174 trip_main = make_ssa_name (trip_var, NULL_TREE);
3175 trip_back = make_ssa_name (trip_var, NULL_TREE);
3177 else
3179 trip_init = trip_var;
3180 trip_main = trip_var;
3181 trip_back = trip_var;
3184 t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
3185 bsi_insert_before (&si, t, BSI_SAME_STMT);
3186 if (gimple_in_ssa_p (cfun))
3187 SSA_NAME_DEF_STMT (trip_init) = t;
3189 t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
3190 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3191 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3192 v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3193 true, BSI_SAME_STMT);
3195 /* Remove the OMP_FOR. */
3196 bsi_remove (&si, true);
3198 /* Iteration space partitioning goes in ITER_PART_BB. */
3199 si = bsi_last (iter_part_bb);
3201 t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
3202 t = fold_build2 (PLUS_EXPR, type, t, threadid);
3203 t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
3204 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3205 false, BSI_CONTINUE_LINKING);
3207 t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
3208 t = fold_build2 (MIN_EXPR, type, t, n);
3209 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3210 false, BSI_CONTINUE_LINKING);
3212 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3213 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3214 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3216 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3217 si = bsi_start (seq_start_bb);
3219 t = fold_convert (type, s0);
3220 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3221 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3222 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3223 false, BSI_CONTINUE_LINKING);
3224 t = build_gimple_modify_stmt (fd->v, t);
3225 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3226 if (gimple_in_ssa_p (cfun))
3227 SSA_NAME_DEF_STMT (fd->v) = t;
3229 t = fold_convert (type, e0);
3230 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3231 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3232 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3233 false, BSI_CONTINUE_LINKING);
3235 /* The code controlling the sequential loop goes in CONT_BB,
3236 replacing the OMP_CONTINUE. */
3237 si = bsi_last (cont_bb);
3238 cont = bsi_stmt (si);
3239 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3240 v_main = TREE_OPERAND (cont, 1);
3241 v_back = TREE_OPERAND (cont, 0);
3243 t = build2 (PLUS_EXPR, type, v_main, fd->step);
3244 t = build_gimple_modify_stmt (v_back, t);
3245 bsi_insert_before (&si, t, BSI_SAME_STMT);
3246 if (gimple_in_ssa_p (cfun))
3247 SSA_NAME_DEF_STMT (v_back) = t;
3249 t = build2 (fd->cond_code, boolean_type_node, v_back, e);
3250 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3251 bsi_insert_before (&si, t, BSI_SAME_STMT);
3253 /* Remove OMP_CONTINUE. */
3254 bsi_remove (&si, true);
3256 /* Trip update code goes into TRIP_UPDATE_BB. */
3257 si = bsi_start (trip_update_bb);
3259 t = build_int_cst (type, 1);
3260 t = build2 (PLUS_EXPR, type, trip_main, t);
3261 t = build_gimple_modify_stmt (trip_back, t);
3262 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3263 if (gimple_in_ssa_p (cfun))
3264 SSA_NAME_DEF_STMT (trip_back) = t;
3266 /* Replace the OMP_RETURN with a barrier, or nothing. */
3267 si = bsi_last (exit_bb);
3268 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3269 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3270 false, BSI_SAME_STMT);
3271 bsi_remove (&si, true);
3273 /* Connect the new blocks. */
3274 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
3275 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3277 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3278 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
3280 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
3282 if (gimple_in_ssa_p (cfun))
3284 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3285 remove arguments of the phi nodes in fin_bb. We need to create
3286 appropriate phi nodes in iter_part_bb instead. */
3287 se = single_pred_edge (fin_bb);
3288 re = single_succ_edge (trip_update_bb);
3289 ene = single_succ_edge (entry_bb);
3291 args = PENDING_STMT (re);
3292 PENDING_STMT (re) = NULL_TREE;
3293 for (phi = phi_nodes (fin_bb);
3294 phi && args;
3295 phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
3297 t = PHI_RESULT (phi);
3298 gcc_assert (t == TREE_PURPOSE (args));
3299 nphi = create_phi_node (t, iter_part_bb);
3300 SSA_NAME_DEF_STMT (t) = nphi;
3302 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
3303 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3304 need to use v_extra instead. */
3305 if (t == fd->v)
3306 t = v_extra;
3307 add_phi_arg (nphi, t, ene);
3308 add_phi_arg (nphi, TREE_VALUE (args), re);
3310 gcc_assert (!phi && !args);
3311 while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
3312 remove_phi_node (phi, NULL_TREE, false);
3314 /* Make phi node for trip. */
3315 phi = create_phi_node (trip_main, iter_part_bb);
3316 SSA_NAME_DEF_STMT (trip_main) = phi;
3317 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
3318 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
3321 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
3322 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
3323 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
3324 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3325 recompute_dominator (CDI_DOMINATORS, fin_bb));
3326 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
3327 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
3328 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3329 recompute_dominator (CDI_DOMINATORS, body_bb));
3333 /* Expand the OpenMP loop defined by REGION. */
3335 static void
3336 expand_omp_for (struct omp_region *region)
3338 struct omp_for_data fd;
3340 extract_omp_for_data (last_stmt (region->entry), &fd);
3341 region->sched_kind = fd.sched_kind;
3343 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
3344 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
3345 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
3346 if (region->cont)
3348 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
3349 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
3350 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
3353 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
3354 && !fd.have_ordered
3355 && region->cont != NULL)
3357 if (fd.chunk_size == NULL)
3358 expand_omp_for_static_nochunk (region, &fd);
3359 else
3360 expand_omp_for_static_chunk (region, &fd);
3362 else
3364 int fn_index = fd.sched_kind + fd.have_ordered * 4;
3365 int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
3366 int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
3367 expand_omp_for_generic (region, &fd, start_ix, next_ix);
3370 update_ssa (TODO_update_ssa_only_virtuals);
3374 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3376 v = GOMP_sections_start (n);
3378 switch (v)
3380 case 0:
3381 goto L2;
3382 case 1:
3383 section 1;
3384 goto L1;
3385 case 2:
3387 case n:
3389 default:
3390 abort ();
3393 v = GOMP_sections_next ();
3394 goto L0;
3396 reduction;
3398 If this is a combined parallel sections, replace the call to
3399 GOMP_sections_start with call to GOMP_sections_next. */
3401 static void
3402 expand_omp_sections (struct omp_region *region)
3404 tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
3405 unsigned i, casei, len;
3406 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
3407 block_stmt_iterator si;
3408 struct omp_region *inner;
3409 bool exit_reachable = region->cont != NULL;
3411 gcc_assert (exit_reachable == (region->exit != NULL));
3412 entry_bb = region->entry;
3413 l0_bb = single_succ (entry_bb);
3414 l1_bb = region->cont;
3415 l2_bb = region->exit;
3416 if (exit_reachable)
3418 gcc_assert (single_pred (l2_bb) == l0_bb);
3419 default_bb = create_empty_bb (l1_bb->prev_bb);
3420 l1 = tree_block_label (l1_bb);
3421 l2 = tree_block_label (l2_bb);
3423 else
3425 default_bb = create_empty_bb (l0_bb);
3426 l1 = NULL_TREE;
3427 l2 = tree_block_label (default_bb);
3430 /* We will build a switch() with enough cases for all the
3431 OMP_SECTION regions, a '0' case to handle the end of more work
3432 and a default case to abort if something goes wrong. */
3433 len = EDGE_COUNT (l0_bb->succs);
3434 label_vec = make_tree_vec (len + 1);
3436 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3437 OMP_SECTIONS statement. */
3438 si = bsi_last (entry_bb);
3439 sections_stmt = bsi_stmt (si);
3440 gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
3441 vin = OMP_SECTIONS_CONTROL (sections_stmt);
3442 if (!is_combined_parallel (region))
3444 /* If we are not inside a combined parallel+sections region,
3445 call GOMP_sections_start. */
3446 t = build_int_cst (unsigned_type_node,
3447 exit_reachable ? len - 1 : len);
3448 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
3449 t = build_call_expr (u, 1, t);
3451 else
3453 /* Otherwise, call GOMP_sections_next. */
3454 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
3455 t = build_call_expr (u, 0);
3457 t = build_gimple_modify_stmt (vin, t);
3458 bsi_insert_after (&si, t, BSI_SAME_STMT);
3459 if (gimple_in_ssa_p (cfun))
3460 SSA_NAME_DEF_STMT (vin) = t;
3461 bsi_remove (&si, true);
3463 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3464 si = bsi_last (l0_bb);
3465 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
3466 if (exit_reachable)
3468 cont = last_stmt (l1_bb);
3469 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3470 vmain = TREE_OPERAND (cont, 1);
3471 vnext = TREE_OPERAND (cont, 0);
3473 else
3475 vmain = vin;
3476 vnext = NULL_TREE;
3479 t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
3480 bsi_insert_after (&si, t, BSI_SAME_STMT);
3481 bsi_remove (&si, true);
3483 i = 0;
3484 if (exit_reachable)
3486 t = build3 (CASE_LABEL_EXPR, void_type_node,
3487 build_int_cst (unsigned_type_node, 0), NULL, l2);
3488 TREE_VEC_ELT (label_vec, 0) = t;
3489 i++;
3492 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3493 for (inner = region->inner, casei = 1;
3494 inner;
3495 inner = inner->next, i++, casei++)
3497 basic_block s_entry_bb, s_exit_bb;
3499 s_entry_bb = inner->entry;
3500 s_exit_bb = inner->exit;
3502 t = tree_block_label (s_entry_bb);
3503 u = build_int_cst (unsigned_type_node, casei);
3504 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
3505 TREE_VEC_ELT (label_vec, i) = u;
3507 si = bsi_last (s_entry_bb);
3508 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
3509 gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
3510 bsi_remove (&si, true);
3511 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
3513 if (s_exit_bb == NULL)
3514 continue;
3516 si = bsi_last (s_exit_bb);
3517 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3518 bsi_remove (&si, true);
3520 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
3523 /* Error handling code goes in DEFAULT_BB. */
3524 t = tree_block_label (default_bb);
3525 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
3526 TREE_VEC_ELT (label_vec, len) = u;
3527 make_edge (l0_bb, default_bb, 0);
3529 si = bsi_start (default_bb);
3530 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
3531 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3533 if (exit_reachable)
3535 /* Code to get the next section goes in L1_BB. */
3536 si = bsi_last (l1_bb);
3537 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
3539 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
3540 t = build_gimple_modify_stmt (vnext, t);
3541 bsi_insert_after (&si, t, BSI_SAME_STMT);
3542 if (gimple_in_ssa_p (cfun))
3543 SSA_NAME_DEF_STMT (vnext) = t;
3544 bsi_remove (&si, true);
3546 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
3548 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3549 si = bsi_last (l2_bb);
3550 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
3551 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
3552 else
3553 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
3554 t = build_call_expr (t, 0);
3555 bsi_insert_after (&si, t, BSI_SAME_STMT);
3556 bsi_remove (&si, true);
3559 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
3563 /* Expand code for an OpenMP single directive. We've already expanded
3564 much of the code, here we simply place the GOMP_barrier call. */
3566 static void
3567 expand_omp_single (struct omp_region *region)
3569 basic_block entry_bb, exit_bb;
3570 block_stmt_iterator si;
3571 bool need_barrier = false;
3573 entry_bb = region->entry;
3574 exit_bb = region->exit;
3576 si = bsi_last (entry_bb);
3577 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3578 be removed. We need to ensure that the thread that entered the single
3579 does not exit before the data is copied out by the other threads. */
3580 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
3581 OMP_CLAUSE_COPYPRIVATE))
3582 need_barrier = true;
3583 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
3584 bsi_remove (&si, true);
3585 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3587 si = bsi_last (exit_bb);
3588 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
3589 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3590 false, BSI_SAME_STMT);
3591 bsi_remove (&si, true);
3592 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3596 /* Generic expansion for OpenMP synchronization directives: master,
3597 ordered and critical. All we need to do here is remove the entry
3598 and exit markers for REGION. */
3600 static void
3601 expand_omp_synch (struct omp_region *region)
3603 basic_block entry_bb, exit_bb;
3604 block_stmt_iterator si;
3606 entry_bb = region->entry;
3607 exit_bb = region->exit;
3609 si = bsi_last (entry_bb);
3610 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
3611 || TREE_CODE (bsi_stmt (si)) == OMP_MASTER
3612 || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
3613 || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
3614 bsi_remove (&si, true);
3615 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3617 if (exit_bb)
3619 si = bsi_last (exit_bb);
3620 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3621 bsi_remove (&si, true);
3622 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3626 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3627 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3628 size of the data type, and thus usable to find the index of the builtin
3629 decl. Returns false if the expression is not of the proper form. */
3631 static bool
3632 expand_omp_atomic_fetch_op (basic_block load_bb,
3633 tree addr, tree loaded_val,
3634 tree stored_val, int index)
3636 enum built_in_function base;
3637 tree decl, itype, call;
3638 enum insn_code *optab;
3639 tree rhs;
3640 basic_block store_bb = single_succ (load_bb);
3641 block_stmt_iterator bsi;
3642 tree stmt;
3644 /* We expect to find the following sequences:
3646 load_bb:
3647 OMP_ATOMIC_LOAD (tmp, mem)
3649 store_bb:
3650 val = tmp OP something; (or: something OP tmp)
3651 OMP_STORE (val)
3653 ???FIXME: Allow a more flexible sequence.
3654 Perhaps use data flow to pick the statements.
3658 bsi = bsi_after_labels (store_bb);
3659 stmt = bsi_stmt (bsi);
3660 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3661 return false;
3662 bsi_next (&bsi);
3663 if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
3664 return false;
3666 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
3667 return false;
3669 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
3671 /* Check for one of the supported fetch-op operations. */
3672 switch (TREE_CODE (rhs))
3674 case PLUS_EXPR:
3675 case POINTER_PLUS_EXPR:
3676 base = BUILT_IN_FETCH_AND_ADD_N;
3677 optab = sync_add_optab;
3678 break;
3679 case MINUS_EXPR:
3680 base = BUILT_IN_FETCH_AND_SUB_N;
3681 optab = sync_add_optab;
3682 break;
3683 case BIT_AND_EXPR:
3684 base = BUILT_IN_FETCH_AND_AND_N;
3685 optab = sync_and_optab;
3686 break;
3687 case BIT_IOR_EXPR:
3688 base = BUILT_IN_FETCH_AND_OR_N;
3689 optab = sync_ior_optab;
3690 break;
3691 case BIT_XOR_EXPR:
3692 base = BUILT_IN_FETCH_AND_XOR_N;
3693 optab = sync_xor_optab;
3694 break;
3695 default:
3696 return false;
3698 /* Make sure the expression is of the proper form. */
3699 if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
3700 rhs = TREE_OPERAND (rhs, 1);
3701 else if (commutative_tree_code (TREE_CODE (rhs))
3702 && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
3703 rhs = TREE_OPERAND (rhs, 0);
3704 else
3705 return false;
3707 decl = built_in_decls[base + index + 1];
3708 itype = TREE_TYPE (TREE_TYPE (decl));
3710 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
3711 return false;
3713 bsi = bsi_last (load_bb);
3714 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3715 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
3716 force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
3717 bsi_remove (&bsi, true);
3719 bsi = bsi_last (store_bb);
3720 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3721 bsi_remove (&bsi, true);
3722 bsi = bsi_last (store_bb);
3723 bsi_remove (&bsi, true);
3725 if (gimple_in_ssa_p (cfun))
3726 update_ssa (TODO_update_ssa_no_phi);
3728 return true;
3731 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3733 oldval = *addr;
3734 repeat:
3735 newval = rhs; // with oldval replacing *addr in rhs
3736 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3737 if (oldval != newval)
3738 goto repeat;
3740 INDEX is log2 of the size of the data type, and thus usable to find the
3741 index of the builtin decl. */
3743 static bool
3744 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
3745 tree addr, tree loaded_val, tree stored_val,
3746 int index)
3748 tree loadedi, storedi, initial, new_stored, new_storedi, old_vali;
3749 tree type, itype, cmpxchg, iaddr;
3750 block_stmt_iterator bsi;
3751 basic_block loop_header = single_succ (load_bb);
3752 tree phi, x;
3753 edge e;
3755 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
3756 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3757 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
3759 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
3760 return false;
3762 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3763 bsi = bsi_last (load_bb);
3764 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3765 initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (addr),
3766 true, NULL_TREE, true, BSI_SAME_STMT);
3767 /* Move the value to the LOADED_VAL temporary. */
3768 if (gimple_in_ssa_p (cfun))
3770 gcc_assert (phi_nodes (loop_header) == NULL_TREE);
3771 phi = create_phi_node (loaded_val, loop_header);
3772 SSA_NAME_DEF_STMT (loaded_val) = phi;
3773 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
3774 initial);
3776 else
3777 bsi_insert_before (&bsi,
3778 build_gimple_modify_stmt (loaded_val, initial),
3779 BSI_SAME_STMT);
3780 bsi_remove (&bsi, true);
3782 bsi = bsi_last (store_bb);
3783 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3785 /* For floating-point values, we'll need to view-convert them to integers
3786 so that we can perform the atomic compare and swap. Simplify the
3787 following code by always setting up the "i"ntegral variables. */
3788 if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3790 loadedi = loaded_val;
3791 storedi = stored_val;
3792 iaddr = addr;
3794 else
3796 loadedi = force_gimple_operand_bsi (&bsi,
3797 build1 (VIEW_CONVERT_EXPR, itype,
3798 loaded_val), true,
3799 NULL_TREE, true, BSI_SAME_STMT);
3800 storedi =
3801 force_gimple_operand_bsi (&bsi,
3802 build1 (VIEW_CONVERT_EXPR, itype,
3803 stored_val), true, NULL_TREE, true,
3804 BSI_SAME_STMT);
3805 iaddr = fold_convert (build_pointer_type (itype), addr);
3808 /* Build the compare&swap statement. */
3809 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
3810 new_storedi = force_gimple_operand_bsi (&bsi,
3811 fold_convert (itype, new_storedi),
3812 true, NULL_TREE,
3813 true, BSI_SAME_STMT);
3814 if (storedi == stored_val)
3815 new_stored = new_storedi;
3816 else
3817 new_stored = force_gimple_operand_bsi (&bsi,
3818 build1 (VIEW_CONVERT_EXPR, type,
3819 new_storedi), true,
3820 NULL_TREE, true, BSI_SAME_STMT);
3822 if (gimple_in_ssa_p (cfun))
3823 old_vali = loadedi;
3824 else
3826 old_vali = create_tmp_var (itype, NULL);
3827 x = build_gimple_modify_stmt (old_vali, loadedi);
3828 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3830 x = build_gimple_modify_stmt (loaded_val, new_stored);
3831 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3834 /* Note that we always perform the comparison as an integer, even for
3835 floating point. This allows the atomic operation to properly
3836 succeed even with NaNs and -0.0. */
3837 x = build3 (COND_EXPR, void_type_node,
3838 build2 (NE_EXPR, boolean_type_node,
3839 new_storedi, old_vali), NULL_TREE, NULL_TREE);
3840 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3842 /* Update cfg. */
3843 e = single_succ_edge (store_bb);
3844 e->flags &= ~EDGE_FALLTHRU;
3845 e->flags |= EDGE_FALSE_VALUE;
3847 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
3849 /* Copy the new value to loaded_val (we already did that before the condition
3850 if we are not in SSA). */
3851 if (gimple_in_ssa_p (cfun))
3853 phi = phi_nodes (loop_header);
3854 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_stored);
3857 /* Remove OMP_ATOMIC_STORE. */
3858 bsi_remove (&bsi, true);
3860 if (gimple_in_ssa_p (cfun))
3861 update_ssa (TODO_update_ssa_no_phi);
3863 return true;
3866 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3868 GOMP_atomic_start ();
3869 *addr = rhs;
3870 GOMP_atomic_end ();
3872 The result is not globally atomic, but works so long as all parallel
3873 references are within #pragma omp atomic directives. According to
3874 responses received from omp@openmp.org, appears to be within spec.
3875 Which makes sense, since that's how several other compilers handle
3876 this situation as well.
3877 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3878 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3880 We replace
3881 OMP_ATOMIC_LOAD (loaded_val, addr) with
3882 loaded_val = *addr;
3884 and replace
3885 OMP_ATOMIC_ATORE (stored_val) with
3886 *addr = stored_val;
3889 static bool
3890 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
3891 tree addr, tree loaded_val, tree stored_val)
3893 block_stmt_iterator bsi;
3894 tree t;
3896 bsi = bsi_last (load_bb);
3897 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3899 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
3900 t = build_function_call_expr (t, 0);
3901 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3903 t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
3904 if (gimple_in_ssa_p (cfun))
3905 SSA_NAME_DEF_STMT (loaded_val) = t;
3906 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3907 bsi_remove (&bsi, true);
3909 bsi = bsi_last (store_bb);
3910 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3912 t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
3913 stored_val);
3914 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3916 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
3917 t = build_function_call_expr (t, 0);
3918 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3919 bsi_remove (&bsi, true);
3921 if (gimple_in_ssa_p (cfun))
3922 update_ssa (TODO_update_ssa_no_phi);
3923 return true;
3926 /* Expand an OMP_ATOMIC statement. We try to expand
3927 using expand_omp_atomic_fetch_op. If it failed, we try to
3928 call expand_omp_atomic_pipeline, and if it fails too, the
3929 ultimate fallback is wrapping the operation in a mutex
3930 (expand_omp_atomic_mutex). REGION is the atomic region built
3931 by build_omp_regions_1(). */
3933 static void
3934 expand_omp_atomic (struct omp_region *region)
3936 basic_block load_bb = region->entry, store_bb = region->exit;
3937 tree load = last_stmt (load_bb), store = last_stmt (store_bb);
3938 tree loaded_val = TREE_OPERAND (load, 0);
3939 tree addr = TREE_OPERAND (load, 1);
3940 tree stored_val = TREE_OPERAND (store, 0);
3941 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3942 HOST_WIDE_INT index;
3944 /* Make sure the type is one of the supported sizes. */
3945 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
3946 index = exact_log2 (index);
3947 if (index >= 0 && index <= 4)
3949 unsigned int align = TYPE_ALIGN_UNIT (type);
3951 /* __sync builtins require strict data alignment. */
3952 if (exact_log2 (align) >= index)
3954 /* When possible, use specialized atomic update functions. */
3955 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3956 && store_bb == single_succ (load_bb))
3958 if (expand_omp_atomic_fetch_op (load_bb, addr,
3959 loaded_val, stored_val, index))
3960 return;
3963 /* If we don't have specialized __sync builtins, try and implement
3964 as a compare and swap loop. */
3965 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
3966 loaded_val, stored_val, index))
3967 return;
3971 /* The ultimate fallback is wrapping the operation in a mutex. */
3972 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
3976 /* Expand the parallel region tree rooted at REGION. Expansion
3977 proceeds in depth-first order. Innermost regions are expanded
3978 first. This way, parallel regions that require a new function to
3979 be created (e.g., OMP_PARALLEL) can be expanded without having any
3980 internal dependencies in their body. */
3982 static void
3983 expand_omp (struct omp_region *region)
3985 while (region)
3987 /* First, determine whether this is a combined parallel+workshare
3988 region. */
3989 if (region->type == OMP_PARALLEL)
3990 determine_parallel_type (region);
3992 if (region->inner)
3993 expand_omp (region->inner);
3995 switch (region->type)
3997 case OMP_PARALLEL:
3998 expand_omp_parallel (region);
3999 break;
4001 case OMP_FOR:
4002 expand_omp_for (region);
4003 break;
4005 case OMP_SECTIONS:
4006 expand_omp_sections (region);
4007 break;
4009 case OMP_SECTION:
4010 /* Individual omp sections are handled together with their
4011 parent OMP_SECTIONS region. */
4012 break;
4014 case OMP_SINGLE:
4015 expand_omp_single (region);
4016 break;
4018 case OMP_MASTER:
4019 case OMP_ORDERED:
4020 case OMP_CRITICAL:
4021 expand_omp_synch (region);
4022 break;
4024 case OMP_ATOMIC_LOAD:
4025 expand_omp_atomic (region);
4026 break;
4029 default:
4030 gcc_unreachable ();
4033 region = region->next;
4038 /* Helper for build_omp_regions. Scan the dominator tree starting at
4039 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
4040 true, the function ends once a single tree is built (otherwise, whole
4041 forest of OMP constructs may be built). */
4043 static void
4044 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
4045 bool single_tree)
4047 block_stmt_iterator si;
4048 tree stmt;
4049 basic_block son;
4051 si = bsi_last (bb);
4052 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
4054 struct omp_region *region;
4055 enum tree_code code;
4057 stmt = bsi_stmt (si);
4058 code = TREE_CODE (stmt);
4059 if (code == OMP_RETURN)
4061 /* STMT is the return point out of region PARENT. Mark it
4062 as the exit point and make PARENT the immediately
4063 enclosing region. */
4064 gcc_assert (parent);
4065 region = parent;
4066 region->exit = bb;
4067 parent = parent->outer;
4069 else if (code == OMP_ATOMIC_STORE)
4071 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
4072 OMP_ATOMIC_LOAD. */
4073 gcc_assert (parent);
4074 gcc_assert (parent->type == OMP_ATOMIC_LOAD);
4075 region = parent;
4076 region->exit = bb;
4077 parent = parent->outer;
4080 else if (code == OMP_CONTINUE)
4082 gcc_assert (parent);
4083 parent->cont = bb;
4085 else if (code == OMP_SECTIONS_SWITCH)
4087 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4088 it. */ ;
4090 else
4092 /* Otherwise, this directive becomes the parent for a new
4093 region. */
4094 region = new_omp_region (bb, code, parent);
4095 parent = region;
4099 if (single_tree && !parent)
4100 return;
4102 for (son = first_dom_son (CDI_DOMINATORS, bb);
4103 son;
4104 son = next_dom_son (CDI_DOMINATORS, son))
4105 build_omp_regions_1 (son, parent, single_tree);
4108 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4109 root_omp_region. */
4111 static void
4112 build_omp_regions_root (basic_block root)
4114 gcc_assert (root_omp_region == NULL);
4115 build_omp_regions_1 (root, NULL, true);
4116 gcc_assert (root_omp_region != NULL);
4119 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4121 void
4122 omp_expand_local (basic_block head)
4124 build_omp_regions_root (head);
4125 if (dump_file && (dump_flags & TDF_DETAILS))
4127 fprintf (dump_file, "\nOMP region tree\n\n");
4128 dump_omp_region (dump_file, root_omp_region, 0);
4129 fprintf (dump_file, "\n");
4132 remove_exit_barriers (root_omp_region);
4133 expand_omp (root_omp_region);
4135 free_omp_regions ();
4138 /* Scan the CFG and build a tree of OMP regions. Return the root of
4139 the OMP region tree. */
4141 static void
4142 build_omp_regions (void)
4144 gcc_assert (root_omp_region == NULL);
4145 calculate_dominance_info (CDI_DOMINATORS);
4146 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
4150 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4152 static unsigned int
4153 execute_expand_omp (void)
4155 build_omp_regions ();
4157 if (!root_omp_region)
4158 return 0;
4160 if (dump_file)
4162 fprintf (dump_file, "\nOMP region tree\n\n");
4163 dump_omp_region (dump_file, root_omp_region, 0);
4164 fprintf (dump_file, "\n");
4167 remove_exit_barriers (root_omp_region);
4169 expand_omp (root_omp_region);
4171 cleanup_tree_cfg ();
4173 free_omp_regions ();
4175 return 0;
4178 /* OMP expansion in SSA form. For testing purposes only. */
4180 static bool
4181 gate_expand_omp_ssa (void)
4183 return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
4186 struct tree_opt_pass pass_expand_omp_ssa =
4188 "ompexpssa", /* name */
4189 gate_expand_omp_ssa, /* gate */
4190 execute_expand_omp, /* execute */
4191 NULL, /* sub */
4192 NULL, /* next */
4193 0, /* static_pass_number */
4194 0, /* tv_id */
4195 PROP_gimple_any, /* properties_required */
4196 PROP_gimple_lomp, /* properties_provided */
4197 0, /* properties_destroyed */
4198 0, /* todo_flags_start */
4199 TODO_dump_func, /* todo_flags_finish */
4200 0 /* letter */
4203 /* OMP expansion -- the default pass, run before creation of SSA form. */
4205 static bool
4206 gate_expand_omp (void)
4208 return ((!flag_openmp_ssa || !optimize)
4209 && flag_openmp != 0 && errorcount == 0);
4212 struct tree_opt_pass pass_expand_omp =
4214 "ompexp", /* name */
4215 gate_expand_omp, /* gate */
4216 execute_expand_omp, /* execute */
4217 NULL, /* sub */
4218 NULL, /* next */
4219 0, /* static_pass_number */
4220 0, /* tv_id */
4221 PROP_gimple_any, /* properties_required */
4222 PROP_gimple_lomp, /* properties_provided */
4223 0, /* properties_destroyed */
4224 0, /* todo_flags_start */
4225 TODO_dump_func, /* todo_flags_finish */
4226 0 /* letter */
4229 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4231 /* Lower the OpenMP sections directive in *STMT_P. */
4233 static void
4234 lower_omp_sections (tree *stmt_p, omp_context *ctx)
4236 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
4237 tree t, dlist;
4238 tree_stmt_iterator tsi;
4239 unsigned i, len;
4241 stmt = *stmt_p;
4243 push_gimplify_context ();
4245 dlist = NULL;
4246 ilist = NULL;
4247 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
4249 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4250 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
4251 continue;
4253 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4254 body = alloc_stmt_list ();
4255 for (i = 0; i < len; i++, tsi_next (&tsi))
4257 omp_context *sctx;
4258 tree sec_start, sec_end;
4260 sec_start = tsi_stmt (tsi);
4261 sctx = maybe_lookup_ctx (sec_start);
4262 gcc_assert (sctx);
4264 append_to_statement_list (sec_start, &body);
4266 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
4267 append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
4268 OMP_SECTION_BODY (sec_start) = NULL;
4270 if (i == len - 1)
4272 tree l = alloc_stmt_list ();
4273 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
4274 &l, ctx);
4275 append_to_statement_list (l, &body);
4276 OMP_SECTION_LAST (sec_start) = 1;
4279 sec_end = make_node (OMP_RETURN);
4280 append_to_statement_list (sec_end, &body);
4283 block = make_node (BLOCK);
4284 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
4286 olist = NULL_TREE;
4287 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
4289 pop_gimplify_context (NULL_TREE);
4290 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4292 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4293 TREE_SIDE_EFFECTS (new_stmt) = 1;
4295 new_body = alloc_stmt_list ();
4296 append_to_statement_list (ilist, &new_body);
4297 append_to_statement_list (stmt, &new_body);
4298 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
4299 append_to_statement_list (bind, &new_body);
4301 control = create_tmp_var (unsigned_type_node, ".section");
4302 t = build2 (OMP_CONTINUE, void_type_node, control, control);
4303 OMP_SECTIONS_CONTROL (stmt) = control;
4304 append_to_statement_list (t, &new_body);
4306 append_to_statement_list (olist, &new_body);
4307 append_to_statement_list (dlist, &new_body);
4309 maybe_catch_exception (&new_body);
4311 t = make_node (OMP_RETURN);
4312 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
4313 OMP_CLAUSE_NOWAIT);
4314 append_to_statement_list (t, &new_body);
4316 BIND_EXPR_BODY (new_stmt) = new_body;
4317 OMP_SECTIONS_BODY (stmt) = NULL;
4319 *stmt_p = new_stmt;
4323 /* A subroutine of lower_omp_single. Expand the simple form of
4324 an OMP_SINGLE, without a copyprivate clause:
4326 if (GOMP_single_start ())
4327 BODY;
4328 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4330 FIXME. It may be better to delay expanding the logic of this until
4331 pass_expand_omp. The expanded logic may make the job more difficult
4332 to a synchronization analysis pass. */
4334 static void
4335 lower_omp_single_simple (tree single_stmt, tree *pre_p)
4337 tree t;
4339 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
4340 t = build3 (COND_EXPR, void_type_node, t,
4341 OMP_SINGLE_BODY (single_stmt), NULL);
4342 gimplify_and_add (t, pre_p);
4346 /* A subroutine of lower_omp_single. Expand the simple form of
4347 an OMP_SINGLE, with a copyprivate clause:
4349 #pragma omp single copyprivate (a, b, c)
4351 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4354 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4356 BODY;
4357 copyout.a = a;
4358 copyout.b = b;
4359 copyout.c = c;
4360 GOMP_single_copy_end (&copyout);
4362 else
4364 a = copyout_p->a;
4365 b = copyout_p->b;
4366 c = copyout_p->c;
4368 GOMP_barrier ();
4371 FIXME. It may be better to delay expanding the logic of this until
4372 pass_expand_omp. The expanded logic may make the job more difficult
4373 to a synchronization analysis pass. */
4375 static void
4376 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
4378 tree ptr_type, t, l0, l1, l2, copyin_seq;
4380 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
4382 ptr_type = build_pointer_type (ctx->record_type);
4383 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
4385 l0 = create_artificial_label ();
4386 l1 = create_artificial_label ();
4387 l2 = create_artificial_label ();
4389 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
4390 t = fold_convert (ptr_type, t);
4391 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4392 gimplify_and_add (t, pre_p);
4394 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
4395 build_int_cst (ptr_type, 0));
4396 t = build3 (COND_EXPR, void_type_node, t,
4397 build_and_jump (&l0), build_and_jump (&l1));
4398 gimplify_and_add (t, pre_p);
4400 t = build1 (LABEL_EXPR, void_type_node, l0);
4401 gimplify_and_add (t, pre_p);
4403 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
4405 copyin_seq = NULL;
4406 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
4407 &copyin_seq, ctx);
4409 t = build_fold_addr_expr (ctx->sender_decl);
4410 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
4411 gimplify_and_add (t, pre_p);
4413 t = build_and_jump (&l2);
4414 gimplify_and_add (t, pre_p);
4416 t = build1 (LABEL_EXPR, void_type_node, l1);
4417 gimplify_and_add (t, pre_p);
4419 append_to_statement_list (copyin_seq, pre_p);
4421 t = build1 (LABEL_EXPR, void_type_node, l2);
4422 gimplify_and_add (t, pre_p);
4426 /* Expand code for an OpenMP single directive. */
4428 static void
4429 lower_omp_single (tree *stmt_p, omp_context *ctx)
4431 tree t, bind, block, single_stmt = *stmt_p, dlist;
4433 push_gimplify_context ();
4435 block = make_node (BLOCK);
4436 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4437 TREE_SIDE_EFFECTS (bind) = 1;
4439 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
4440 &BIND_EXPR_BODY (bind), &dlist, ctx);
4441 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
4443 append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
4445 if (ctx->record_type)
4446 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
4447 else
4448 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
4450 OMP_SINGLE_BODY (single_stmt) = NULL;
4452 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
4454 maybe_catch_exception (&BIND_EXPR_BODY (bind));
4456 t = make_node (OMP_RETURN);
4457 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
4458 OMP_CLAUSE_NOWAIT);
4459 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4461 pop_gimplify_context (bind);
4463 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4464 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4468 /* Expand code for an OpenMP master directive. */
4470 static void
4471 lower_omp_master (tree *stmt_p, omp_context *ctx)
4473 tree bind, block, stmt = *stmt_p, lab = NULL, x;
4475 push_gimplify_context ();
4477 block = make_node (BLOCK);
4478 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4479 TREE_SIDE_EFFECTS (bind) = 1;
4481 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4483 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4484 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
4485 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
4486 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4488 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
4489 maybe_catch_exception (&OMP_MASTER_BODY (stmt));
4490 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
4491 OMP_MASTER_BODY (stmt) = NULL;
4493 x = build1 (LABEL_EXPR, void_type_node, lab);
4494 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4496 x = make_node (OMP_RETURN);
4497 OMP_RETURN_NOWAIT (x) = 1;
4498 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4500 pop_gimplify_context (bind);
4502 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4503 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4507 /* Expand code for an OpenMP ordered directive. */
4509 static void
4510 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
4512 tree bind, block, stmt = *stmt_p, x;
4514 push_gimplify_context ();
4516 block = make_node (BLOCK);
4517 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4518 TREE_SIDE_EFFECTS (bind) = 1;
4520 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4522 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
4523 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4525 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
4526 maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
4527 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
4528 OMP_ORDERED_BODY (stmt) = NULL;
4530 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
4531 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4533 x = make_node (OMP_RETURN);
4534 OMP_RETURN_NOWAIT (x) = 1;
4535 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4537 pop_gimplify_context (bind);
4539 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4540 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4544 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4545 substitution of a couple of function calls. But in the NAMED case,
4546 requires that languages coordinate a symbol name. It is therefore
4547 best put here in common code. */
4549 static GTY((param1_is (tree), param2_is (tree)))
4550 splay_tree critical_name_mutexes;
4552 static void
4553 lower_omp_critical (tree *stmt_p, omp_context *ctx)
4555 tree bind, block, stmt = *stmt_p;
4556 tree t, lock, unlock, name;
4558 name = OMP_CRITICAL_NAME (stmt);
4559 if (name)
4561 tree decl;
4562 splay_tree_node n;
4564 if (!critical_name_mutexes)
4565 critical_name_mutexes
4566 = splay_tree_new_ggc (splay_tree_compare_pointers);
4568 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
4569 if (n == NULL)
4571 char *new_str;
4573 decl = create_tmp_var_raw (ptr_type_node, NULL);
4575 new_str = ACONCAT ((".gomp_critical_user_",
4576 IDENTIFIER_POINTER (name), NULL));
4577 DECL_NAME (decl) = get_identifier (new_str);
4578 TREE_PUBLIC (decl) = 1;
4579 TREE_STATIC (decl) = 1;
4580 DECL_COMMON (decl) = 1;
4581 DECL_ARTIFICIAL (decl) = 1;
4582 DECL_IGNORED_P (decl) = 1;
4583 varpool_finalize_decl (decl);
4585 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
4586 (splay_tree_value) decl);
4588 else
4589 decl = (tree) n->value;
4591 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
4592 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
4594 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
4595 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
4597 else
4599 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
4600 lock = build_call_expr (lock, 0);
4602 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
4603 unlock = build_call_expr (unlock, 0);
4606 push_gimplify_context ();
4608 block = make_node (BLOCK);
4609 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4610 TREE_SIDE_EFFECTS (bind) = 1;
4612 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4614 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
4616 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
4617 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
4618 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
4619 OMP_CRITICAL_BODY (stmt) = NULL;
4621 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
4623 t = make_node (OMP_RETURN);
4624 OMP_RETURN_NOWAIT (t) = 1;
4625 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4627 pop_gimplify_context (bind);
4628 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4629 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4633 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4634 for a lastprivate clause. Given a loop control predicate of (V
4635 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4636 is appended to *DLIST, iterator initialization is appended to
4637 *BODY_P. */
4639 static void
4640 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
4641 tree *dlist, struct omp_context *ctx)
4643 tree clauses, cond, stmts, vinit, t;
4644 enum tree_code cond_code;
4646 cond_code = fd->cond_code;
4647 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
4649 /* When possible, use a strict equality expression. This can let VRP
4650 type optimizations deduce the value and remove a copy. */
4651 if (host_integerp (fd->step, 0))
4653 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
4654 if (step == 1 || step == -1)
4655 cond_code = EQ_EXPR;
4658 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
4660 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
4661 stmts = NULL;
4662 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
4663 if (stmts != NULL)
4665 append_to_statement_list (stmts, dlist);
4667 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4668 vinit = fd->n1;
4669 if (cond_code == EQ_EXPR
4670 && host_integerp (fd->n2, 0)
4671 && ! integer_zerop (fd->n2))
4672 vinit = build_int_cst (TREE_TYPE (fd->v), 0);
4674 /* Initialize the iterator variable, so that threads that don't execute
4675 any iterations don't execute the lastprivate clauses by accident. */
4676 t = build_gimple_modify_stmt (fd->v, vinit);
4677 gimplify_and_add (t, body_p);
4682 /* Lower code for an OpenMP loop directive. */
4684 static void
4685 lower_omp_for (tree *stmt_p, omp_context *ctx)
4687 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
4688 struct omp_for_data fd;
4690 stmt = *stmt_p;
4692 push_gimplify_context ();
4694 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
4695 lower_omp (&OMP_FOR_BODY (stmt), ctx);
4697 /* Move declaration of temporaries in the loop body before we make
4698 it go away. */
4699 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
4700 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
4702 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4703 TREE_SIDE_EFFECTS (new_stmt) = 1;
4704 body_p = &BIND_EXPR_BODY (new_stmt);
4706 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4707 ilist = NULL;
4708 dlist = NULL;
4709 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
4710 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
4712 /* Lower the header expressions. At this point, we can assume that
4713 the header is of the form:
4715 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4717 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4718 using the .omp_data_s mapping, if needed. */
4719 rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
4720 if (!is_gimple_min_invariant (*rhs_p))
4721 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4723 rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
4724 if (!is_gimple_min_invariant (*rhs_p))
4725 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4727 rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
4728 if (!is_gimple_min_invariant (*rhs_p))
4729 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4731 /* Once lowered, extract the bounds and clauses. */
4732 extract_omp_for_data (stmt, &fd);
4734 lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
4736 append_to_statement_list (stmt, body_p);
4738 append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
4740 t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
4741 append_to_statement_list (t, body_p);
4743 /* After the loop, add exit clauses. */
4744 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
4745 append_to_statement_list (dlist, body_p);
4747 maybe_catch_exception (body_p);
4749 /* Region exit marker goes at the end of the loop body. */
4750 t = make_node (OMP_RETURN);
4751 OMP_RETURN_NOWAIT (t) = fd.have_nowait;
4752 append_to_statement_list (t, body_p);
4754 pop_gimplify_context (NULL_TREE);
4755 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4757 OMP_FOR_BODY (stmt) = NULL_TREE;
4758 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
4759 *stmt_p = new_stmt;
4762 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4763 or OMP_PARALLEL. */
4765 static tree
4766 check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
4768 struct walk_stmt_info *wi = data;
4769 int *info = wi->info;
4771 *walk_subtrees = 0;
4772 switch (TREE_CODE (*tp))
4774 case OMP_FOR:
4775 case OMP_SECTIONS:
4776 *info = *info == 0 ? 1 : -1;
4777 break;
4778 default:
4779 *info = -1;
4780 break;
4782 return NULL;
4785 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4786 information for the directive. */
4788 static void
4789 lower_omp_parallel (tree *stmt_p, omp_context *ctx)
4791 tree clauses, par_bind, par_body, new_body, bind;
4792 tree olist, ilist, par_olist, par_ilist;
4793 tree stmt, child_fn, t;
4795 stmt = *stmt_p;
4797 clauses = OMP_PARALLEL_CLAUSES (stmt);
4798 par_bind = OMP_PARALLEL_BODY (stmt);
4799 par_body = BIND_EXPR_BODY (par_bind);
4800 child_fn = ctx->cb.dst_fn;
4801 if (!OMP_PARALLEL_COMBINED (stmt))
4803 struct walk_stmt_info wi;
4804 int ws_num = 0;
4806 memset (&wi, 0, sizeof (wi));
4807 wi.callback = check_combined_parallel;
4808 wi.info = &ws_num;
4809 wi.val_only = true;
4810 walk_stmts (&wi, &par_bind);
4811 if (ws_num == 1)
4812 OMP_PARALLEL_COMBINED (stmt) = 1;
4815 push_gimplify_context ();
4817 par_olist = NULL_TREE;
4818 par_ilist = NULL_TREE;
4819 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
4820 lower_omp (&par_body, ctx);
4821 lower_reduction_clauses (clauses, &par_olist, ctx);
4823 /* Declare all the variables created by mapping and the variables
4824 declared in the scope of the parallel body. */
4825 record_vars_into (ctx->block_vars, child_fn);
4826 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
4828 if (ctx->record_type)
4830 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
4831 OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
4834 olist = NULL_TREE;
4835 ilist = NULL_TREE;
4836 lower_send_clauses (clauses, &ilist, &olist, ctx);
4837 lower_send_shared_vars (&ilist, &olist, ctx);
4839 /* Once all the expansions are done, sequence all the different
4840 fragments inside OMP_PARALLEL_BODY. */
4841 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4842 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
4844 new_body = alloc_stmt_list ();
4846 if (ctx->record_type)
4848 t = build_fold_addr_expr (ctx->sender_decl);
4849 /* fixup_child_record_type might have changed receiver_decl's type. */
4850 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
4851 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4852 append_to_statement_list (t, &new_body);
4855 append_to_statement_list (par_ilist, &new_body);
4856 append_to_statement_list (par_body, &new_body);
4857 append_to_statement_list (par_olist, &new_body);
4858 maybe_catch_exception (&new_body);
4859 t = make_node (OMP_RETURN);
4860 append_to_statement_list (t, &new_body);
4861 OMP_PARALLEL_BODY (stmt) = new_body;
4863 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4864 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
4866 *stmt_p = bind;
4868 pop_gimplify_context (NULL_TREE);
4872 /* Pass *TP back through the gimplifier within the context determined by WI.
4873 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
4874 flags on ADDR_EXPR. */
4876 static void
4877 lower_regimplify (tree *tp, struct walk_stmt_info *wi)
4879 enum gimplify_status gs;
4880 tree pre = NULL;
4882 if (wi->is_lhs)
4883 gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
4884 else if (wi->val_only)
4885 gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
4886 else
4887 gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
4888 gcc_assert (gs == GS_ALL_DONE);
4890 if (pre)
4891 tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
4894 /* Copy EXP into a temporary. Insert the initialization statement before TSI. */
4896 static tree
4897 init_tmp_var (tree exp, tree_stmt_iterator *tsi)
4899 tree t, stmt;
4901 t = create_tmp_var (TREE_TYPE (exp), NULL);
4902 DECL_GIMPLE_REG_P (t) = 1;
4903 stmt = build_gimple_modify_stmt (t, exp);
4904 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4905 tsi_link_before (tsi, stmt, TSI_SAME_STMT);
4907 return t;
4910 /* Similarly, but copy from the temporary and insert the statement
4911 after the iterator. */
4913 static tree
4914 save_tmp_var (tree exp, tree_stmt_iterator *tsi)
4916 tree t, stmt;
4918 t = create_tmp_var (TREE_TYPE (exp), NULL);
4919 DECL_GIMPLE_REG_P (t) = 1;
4920 stmt = build_gimple_modify_stmt (exp, t);
4921 SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi)));
4922 tsi_link_after (tsi, stmt, TSI_SAME_STMT);
4924 return t;
4927 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
4929 static tree
4930 lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
4932 struct walk_stmt_info *wi = data;
4933 omp_context *ctx = wi->info;
4934 tree t = *tp;
4936 /* If we have issued syntax errors, avoid doing any heavy lifting.
4937 Just replace the OpenMP directives with a NOP to avoid
4938 confusing RTL expansion. */
4939 if (errorcount && OMP_DIRECTIVE_P (*tp))
4941 *tp = build_empty_stmt ();
4942 return NULL_TREE;
4945 *walk_subtrees = 0;
4946 switch (TREE_CODE (*tp))
4948 case OMP_PARALLEL:
4949 ctx = maybe_lookup_ctx (t);
4950 lower_omp_parallel (tp, ctx);
4951 break;
4953 case OMP_FOR:
4954 ctx = maybe_lookup_ctx (t);
4955 gcc_assert (ctx);
4956 lower_omp_for (tp, ctx);
4957 break;
4959 case OMP_SECTIONS:
4960 ctx = maybe_lookup_ctx (t);
4961 gcc_assert (ctx);
4962 lower_omp_sections (tp, ctx);
4963 break;
4965 case OMP_SINGLE:
4966 ctx = maybe_lookup_ctx (t);
4967 gcc_assert (ctx);
4968 lower_omp_single (tp, ctx);
4969 break;
4971 case OMP_MASTER:
4972 ctx = maybe_lookup_ctx (t);
4973 gcc_assert (ctx);
4974 lower_omp_master (tp, ctx);
4975 break;
4977 case OMP_ORDERED:
4978 ctx = maybe_lookup_ctx (t);
4979 gcc_assert (ctx);
4980 lower_omp_ordered (tp, ctx);
4981 break;
4983 case OMP_CRITICAL:
4984 ctx = maybe_lookup_ctx (t);
4985 gcc_assert (ctx);
4986 lower_omp_critical (tp, ctx);
4987 break;
4989 case VAR_DECL:
4990 if (ctx && DECL_HAS_VALUE_EXPR_P (t))
4992 lower_regimplify (&t, wi);
4993 if (wi->val_only)
4995 if (wi->is_lhs)
4996 t = save_tmp_var (t, &wi->tsi);
4997 else
4998 t = init_tmp_var (t, &wi->tsi);
5000 *tp = t;
5002 break;
5004 case ADDR_EXPR:
5005 if (ctx)
5006 lower_regimplify (tp, wi);
5007 break;
5009 case ARRAY_REF:
5010 case ARRAY_RANGE_REF:
5011 case REALPART_EXPR:
5012 case IMAGPART_EXPR:
5013 case COMPONENT_REF:
5014 case VIEW_CONVERT_EXPR:
5015 if (ctx)
5016 lower_regimplify (tp, wi);
5017 break;
5019 case INDIRECT_REF:
5020 if (ctx)
5022 wi->is_lhs = false;
5023 wi->val_only = true;
5024 lower_regimplify (&TREE_OPERAND (t, 0), wi);
5026 break;
5028 default:
5029 if (!TYPE_P (t) && !DECL_P (t))
5030 *walk_subtrees = 1;
5031 break;
5034 return NULL_TREE;
5037 static void
5038 lower_omp (tree *stmt_p, omp_context *ctx)
5040 struct walk_stmt_info wi;
5042 memset (&wi, 0, sizeof (wi));
5043 wi.callback = lower_omp_1;
5044 wi.info = ctx;
5045 wi.val_only = true;
5046 wi.want_locations = true;
5048 walk_stmts (&wi, stmt_p);
5051 /* Main entry point. */
5053 static unsigned int
5054 execute_lower_omp (void)
5056 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
5057 delete_omp_context);
5059 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5060 gcc_assert (parallel_nesting_level == 0);
5062 if (all_contexts->root)
5063 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5065 if (all_contexts)
5067 splay_tree_delete (all_contexts);
5068 all_contexts = NULL;
5070 return 0;
5073 static bool
5074 gate_lower_omp (void)
5076 return flag_openmp != 0;
5079 struct tree_opt_pass pass_lower_omp =
5081 "omplower", /* name */
5082 gate_lower_omp, /* gate */
5083 execute_lower_omp, /* execute */
5084 NULL, /* sub */
5085 NULL, /* next */
5086 0, /* static_pass_number */
5087 0, /* tv_id */
5088 PROP_gimple_any, /* properties_required */
5089 PROP_gimple_lomp, /* properties_provided */
5090 0, /* properties_destroyed */
5091 0, /* todo_flags_start */
5092 TODO_dump_func, /* todo_flags_finish */
5093 0 /* letter */
5096 /* The following is a utility to diagnose OpenMP structured block violations.
5097 It is not part of the "omplower" pass, as that's invoked too late. It
5098 should be invoked by the respective front ends after gimplification. */
5100 static splay_tree all_labels;
5102 /* Check for mismatched contexts and generate an error if needed. Return
5103 true if an error is detected. */
5105 static bool
5106 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
5108 bool exit_p = true;
5110 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
5111 return false;
5113 /* Try to avoid confusing the user by producing and error message
5114 with correct "exit" or "enter" verbage. We prefer "exit"
5115 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5116 if (branch_ctx == NULL)
5117 exit_p = false;
5118 else
5120 while (label_ctx)
5122 if (TREE_VALUE (label_ctx) == branch_ctx)
5124 exit_p = false;
5125 break;
5127 label_ctx = TREE_CHAIN (label_ctx);
5131 if (exit_p)
5132 error ("invalid exit from OpenMP structured block");
5133 else
5134 error ("invalid entry to OpenMP structured block");
5136 *stmt_p = build_empty_stmt ();
5137 return true;
5140 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5141 where in the tree each label is found. */
5143 static tree
5144 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
5146 struct walk_stmt_info *wi = data;
5147 tree context = (tree) wi->info;
5148 tree inner_context;
5149 tree t = *tp;
5151 *walk_subtrees = 0;
5152 switch (TREE_CODE (t))
5154 case OMP_PARALLEL:
5155 case OMP_SECTIONS:
5156 case OMP_SINGLE:
5157 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
5158 /* FALLTHRU */
5159 case OMP_SECTION:
5160 case OMP_MASTER:
5161 case OMP_ORDERED:
5162 case OMP_CRITICAL:
5163 /* The minimal context here is just a tree of statements. */
5164 inner_context = tree_cons (NULL, t, context);
5165 wi->info = inner_context;
5166 walk_stmts (wi, &OMP_BODY (t));
5167 wi->info = context;
5168 break;
5170 case OMP_FOR:
5171 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
5172 inner_context = tree_cons (NULL, t, context);
5173 wi->info = inner_context;
5174 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
5175 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
5176 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
5177 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5178 walk_stmts (wi, &OMP_FOR_BODY (t));
5179 wi->info = context;
5180 break;
5182 case LABEL_EXPR:
5183 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
5184 (splay_tree_value) context);
5185 break;
5187 default:
5188 break;
5191 return NULL_TREE;
5194 /* Pass 2: Check each branch and see if its context differs from that of
5195 the destination label's context. */
5197 static tree
5198 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
5200 struct walk_stmt_info *wi = data;
5201 tree context = (tree) wi->info;
5202 splay_tree_node n;
5203 tree t = *tp;
5205 *walk_subtrees = 0;
5206 switch (TREE_CODE (t))
5208 case OMP_PARALLEL:
5209 case OMP_SECTIONS:
5210 case OMP_SINGLE:
5211 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
5212 /* FALLTHRU */
5213 case OMP_SECTION:
5214 case OMP_MASTER:
5215 case OMP_ORDERED:
5216 case OMP_CRITICAL:
5217 wi->info = t;
5218 walk_stmts (wi, &OMP_BODY (t));
5219 wi->info = context;
5220 break;
5222 case OMP_FOR:
5223 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
5224 wi->info = t;
5225 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
5226 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
5227 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
5228 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5229 walk_stmts (wi, &OMP_FOR_BODY (t));
5230 wi->info = context;
5231 break;
5233 case GOTO_EXPR:
5235 tree lab = GOTO_DESTINATION (t);
5236 if (TREE_CODE (lab) != LABEL_DECL)
5237 break;
5239 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5240 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
5242 break;
5244 case SWITCH_EXPR:
5246 tree vec = SWITCH_LABELS (t);
5247 int i, len = TREE_VEC_LENGTH (vec);
5248 for (i = 0; i < len; ++i)
5250 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
5251 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5252 if (diagnose_sb_0 (tp, context, (tree) n->value))
5253 break;
5256 break;
5258 case RETURN_EXPR:
5259 diagnose_sb_0 (tp, context, NULL_TREE);
5260 break;
5262 default:
5263 break;
5266 return NULL_TREE;
5269 void
5270 diagnose_omp_structured_block_errors (tree fndecl)
5272 tree save_current = current_function_decl;
5273 struct walk_stmt_info wi;
5275 current_function_decl = fndecl;
5277 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
5279 memset (&wi, 0, sizeof (wi));
5280 wi.callback = diagnose_sb_1;
5281 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5283 memset (&wi, 0, sizeof (wi));
5284 wi.callback = diagnose_sb_2;
5285 wi.want_locations = true;
5286 wi.want_return_expr = true;
5287 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5289 splay_tree_delete (all_labels);
5290 all_labels = NULL;
5292 current_function_decl = save_current;
5295 #include "gt-omp-low.h"