Merge aosp-toolchain/gcc/gcc-4_9 changes.
[official-gcc.git] / gcc-4_6 / gcc / omp-low.c
blobbb0017a7671336df87da7fe0d2149abca739f80d
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 default:
261 break;
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
315 loop->n2, size_one_node);
316 else
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
326 loop->n2, size_int (-1));
327 else
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
337 t = gimple_omp_for_incr (for_stmt, i);
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
355 if (iter_type != long_long_unsigned_type_node)
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
363 tree n;
365 if (loop->cond_code == LT_EXPR)
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
378 tree n1, n2;
380 if (loop->cond_code == LT_EXPR)
382 n1 = loop->n1;
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
385 loop->n2, loop->step);
387 else
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
391 loop->n2, loop->step);
392 n2 = loop->n1;
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
402 if (collapse_count && *collapse_count == NULL)
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
410 tree itype = TREE_TYPE (loop->v);
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
429 else
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
433 if (count != NULL_TREE)
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
442 else
443 count = NULL_TREE;
447 if (count)
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
461 if (count)
462 *collapse_count = fold_convert_loc (loc, iter_type, count);
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
467 if (fd->collapse > 1)
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
492 Is lowered into:
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
520 static bool
521 workshare_safe_to_combine_p (basic_block ws_entry_bb)
523 struct omp_for_data fd;
524 gimple ws_stmt = last_stmt (ws_entry_bb);
526 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
527 return true;
529 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531 extract_omp_for_data (ws_stmt, &fd, NULL);
533 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
534 return false;
535 if (fd.iter_type != long_integer_type_node)
536 return false;
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
542 see through this. */
543 if (!is_gimple_min_invariant (fd.loop.n1)
544 || !is_gimple_min_invariant (fd.loop.n2)
545 || !is_gimple_min_invariant (fd.loop.step)
546 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
547 return false;
549 return true;
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
555 expanded. */
557 static VEC(tree,gc) *
558 get_ws_args_for (gimple ws_stmt)
560 tree t;
561 location_t loc = gimple_location (ws_stmt);
562 VEC(tree,gc) *ws_args;
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
566 struct omp_for_data fd;
568 extract_omp_for_data (ws_stmt, &fd, NULL);
570 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
573 VEC_quick_push (tree, ws_args, t);
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
576 VEC_quick_push (tree, ws_args, t);
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 VEC_quick_push (tree, ws_args, t);
581 if (fd.chunk_size)
583 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
584 VEC_quick_push (tree, ws_args, t);
587 return ws_args;
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 ws_args = VEC_alloc (tree, gc, 1);
597 VEC_quick_push (tree, ws_args, t);
598 return ws_args;
601 gcc_unreachable ();
605 /* Discover whether REGION is a combined parallel+workshare region. */
607 static void
608 determine_parallel_type (struct omp_region *region)
610 basic_block par_entry_bb, par_exit_bb;
611 basic_block ws_entry_bb, ws_exit_bb;
613 if (region == NULL || region->inner == NULL
614 || region->exit == NULL || region->inner->exit == NULL
615 || region->inner->cont == NULL)
616 return;
618 /* We only support parallel+for and parallel+sections. */
619 if (region->type != GIMPLE_OMP_PARALLEL
620 || (region->inner->type != GIMPLE_OMP_FOR
621 && region->inner->type != GIMPLE_OMP_SECTIONS))
622 return;
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb = region->entry;
627 par_exit_bb = region->exit;
628 ws_entry_bb = region->inner->entry;
629 ws_exit_bb = region->inner->exit;
631 if (single_succ (par_entry_bb) == ws_entry_bb
632 && single_succ (ws_exit_bb) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
635 || (last_and_only_stmt (ws_entry_bb)
636 && last_and_only_stmt (par_exit_bb))))
638 gimple ws_stmt = last_stmt (ws_entry_bb);
640 if (region->inner->type == GIMPLE_OMP_FOR)
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses = gimple_omp_for_clauses (ws_stmt);
652 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
653 if (c == NULL
654 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
657 region->is_combined_parallel = false;
658 region->inner->is_combined_parallel = false;
659 return;
663 region->is_combined_parallel = true;
664 region->inner->is_combined_parallel = true;
665 region->ws_args = get_ws_args_for (ws_stmt);
670 /* Return true if EXPR is variable sized. */
672 static inline bool
673 is_variable_sized (const_tree expr)
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
678 /* Return true if DECL is a reference type. */
680 static inline bool
681 is_reference (tree decl)
683 return lang_hooks.decls.omp_privatize_by_reference (decl);
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
690 static inline tree
691 lookup_decl (tree var, omp_context *ctx)
693 tree *n;
694 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 return *n;
698 static inline tree
699 maybe_lookup_decl (const_tree var, omp_context *ctx)
701 tree *n;
702 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
703 return n ? *n : NULL_TREE;
706 static inline tree
707 lookup_field (tree var, omp_context *ctx)
709 splay_tree_node n;
710 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
711 return (tree) n->value;
714 static inline tree
715 lookup_sfield (tree var, omp_context *ctx)
717 splay_tree_node n;
718 n = splay_tree_lookup (ctx->sfield_map
719 ? ctx->sfield_map : ctx->field_map,
720 (splay_tree_key) var);
721 return (tree) n->value;
724 static inline tree
725 maybe_lookup_field (tree var, omp_context *ctx)
727 splay_tree_node n;
728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
729 return n ? (tree) n->value : NULL_TREE;
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
735 static bool
736 use_pointer_for_field (tree decl, omp_context *shared_ctx)
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
739 return true;
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
743 if (shared_ctx)
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
750 return true;
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
757 return true;
759 /* Do not use copy-in/copy-out for variables that have their
760 address taken. */
761 if (TREE_ADDRESSABLE (decl))
762 return true;
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
771 omp_context *up;
773 for (up = shared_ctx->outer; up; up = up->outer)
774 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
775 break;
777 if (up)
779 tree c;
781 for (c = gimple_omp_taskreg_clauses (up->stmt);
782 c; c = OMP_CLAUSE_CHAIN (c))
783 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c) == decl)
785 break;
787 if (c)
788 goto maybe_mark_addressable_and_ret;
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
798 tree outer;
799 maybe_mark_addressable_and_ret:
800 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
801 if (is_gimple_reg (outer))
803 /* Taking address of OUTER in lower_send_shared_vars
804 might need regimplification of everything that uses the
805 variable. */
806 if (!task_shared_vars)
807 task_shared_vars = BITMAP_ALLOC (NULL);
808 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
809 TREE_ADDRESSABLE (outer) = 1;
811 return true;
815 return false;
818 /* Create a new VAR_DECL and copy information from VAR to it. */
820 tree
821 copy_var_decl (tree var, tree name, tree type)
823 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
825 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
826 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
827 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
828 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
829 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
830 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
831 TREE_USED (copy) = 1;
832 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
834 return copy;
837 /* Construct a new automatic decl similar to VAR. */
839 static tree
840 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
842 tree copy = copy_var_decl (var, name, type);
844 DECL_CONTEXT (copy) = current_function_decl;
845 DECL_CHAIN (copy) = ctx->block_vars;
846 ctx->block_vars = copy;
848 return copy;
851 static tree
852 omp_copy_decl_1 (tree var, omp_context *ctx)
854 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
857 /* Build tree nodes to access the field for VAR on the receiver side. */
859 static tree
860 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
862 tree x, field = lookup_field (var, ctx);
864 /* If the receiver record type was remapped in the child function,
865 remap the field into the new record type. */
866 x = maybe_lookup_field (field, ctx);
867 if (x != NULL)
868 field = x;
870 x = build_simple_mem_ref (ctx->receiver_decl);
871 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
872 if (by_ref)
873 x = build_simple_mem_ref (x);
875 return x;
878 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
879 of a parallel, this is a component reference; for workshare constructs
880 this is some variable. */
882 static tree
883 build_outer_var_ref (tree var, omp_context *ctx)
885 tree x;
887 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
888 x = var;
889 else if (is_variable_sized (var))
891 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
892 x = build_outer_var_ref (x, ctx);
893 x = build_simple_mem_ref (x);
895 else if (is_taskreg_ctx (ctx))
897 bool by_ref = use_pointer_for_field (var, NULL);
898 x = build_receiver_ref (var, by_ref, ctx);
900 else if (ctx->outer)
901 x = lookup_decl (var, ctx->outer);
902 else if (is_reference (var))
903 /* This can happen with orphaned constructs. If var is reference, it is
904 possible it is shared and as such valid. */
905 x = var;
906 else
907 gcc_unreachable ();
909 if (is_reference (var))
910 x = build_simple_mem_ref (x);
912 return x;
915 /* Build tree nodes to access the field for VAR on the sender side. */
917 static tree
918 build_sender_ref (tree var, omp_context *ctx)
920 tree field = lookup_sfield (var, ctx);
921 return build3 (COMPONENT_REF, TREE_TYPE (field),
922 ctx->sender_decl, field, NULL);
925 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
927 static void
928 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
930 tree field, type, sfield = NULL_TREE;
932 gcc_assert ((mask & 1) == 0
933 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
934 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
935 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
937 type = TREE_TYPE (var);
938 if (by_ref)
939 type = build_pointer_type (type);
940 else if ((mask & 3) == 1 && is_reference (var))
941 type = TREE_TYPE (type);
943 field = build_decl (DECL_SOURCE_LOCATION (var),
944 FIELD_DECL, DECL_NAME (var), type);
946 /* Remember what variable this field was created for. This does have a
947 side effect of making dwarf2out ignore this member, so for helpful
948 debugging we clear it later in delete_omp_context. */
949 DECL_ABSTRACT_ORIGIN (field) = var;
950 if (type == TREE_TYPE (var))
952 DECL_ALIGN (field) = DECL_ALIGN (var);
953 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
954 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
956 else
957 DECL_ALIGN (field) = TYPE_ALIGN (type);
959 if ((mask & 3) == 3)
961 insert_field_into_struct (ctx->record_type, field);
962 if (ctx->srecord_type)
964 sfield = build_decl (DECL_SOURCE_LOCATION (var),
965 FIELD_DECL, DECL_NAME (var), type);
966 DECL_ABSTRACT_ORIGIN (sfield) = var;
967 DECL_ALIGN (sfield) = DECL_ALIGN (field);
968 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
969 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
970 insert_field_into_struct (ctx->srecord_type, sfield);
973 else
975 if (ctx->srecord_type == NULL_TREE)
977 tree t;
979 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
980 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
981 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
983 sfield = build_decl (DECL_SOURCE_LOCATION (var),
984 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
985 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
986 insert_field_into_struct (ctx->srecord_type, sfield);
987 splay_tree_insert (ctx->sfield_map,
988 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
989 (splay_tree_value) sfield);
992 sfield = field;
993 insert_field_into_struct ((mask & 1) ? ctx->record_type
994 : ctx->srecord_type, field);
997 if (mask & 1)
998 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
999 (splay_tree_value) field);
1000 if ((mask & 2) && ctx->sfield_map)
1001 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1002 (splay_tree_value) sfield);
1005 static tree
1006 install_var_local (tree var, omp_context *ctx)
1008 tree new_var = omp_copy_decl_1 (var, ctx);
1009 insert_decl_map (&ctx->cb, var, new_var);
1010 return new_var;
1013 /* Adjust the replacement for DECL in CTX for the new context. This means
1014 copying the DECL_VALUE_EXPR, and fixing up the type. */
1016 static void
1017 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1019 tree new_decl, size;
1021 new_decl = lookup_decl (decl, ctx);
1023 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1025 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1026 && DECL_HAS_VALUE_EXPR_P (decl))
1028 tree ve = DECL_VALUE_EXPR (decl);
1029 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1030 SET_DECL_VALUE_EXPR (new_decl, ve);
1031 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1034 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1036 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1037 if (size == error_mark_node)
1038 size = TYPE_SIZE (TREE_TYPE (new_decl));
1039 DECL_SIZE (new_decl) = size;
1041 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1042 if (size == error_mark_node)
1043 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1044 DECL_SIZE_UNIT (new_decl) = size;
1048 /* The callback for remap_decl. Search all containing contexts for a
1049 mapping of the variable; this avoids having to duplicate the splay
1050 tree ahead of time. We know a mapping doesn't already exist in the
1051 given context. Create new mappings to implement default semantics. */
1053 static tree
1054 omp_copy_decl (tree var, copy_body_data *cb)
1056 omp_context *ctx = (omp_context *) cb;
1057 tree new_var;
1059 if (TREE_CODE (var) == LABEL_DECL)
1061 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1062 DECL_CONTEXT (new_var) = current_function_decl;
1063 insert_decl_map (&ctx->cb, var, new_var);
1064 return new_var;
1067 while (!is_taskreg_ctx (ctx))
1069 ctx = ctx->outer;
1070 if (ctx == NULL)
1071 return var;
1072 new_var = maybe_lookup_decl (var, ctx);
1073 if (new_var)
1074 return new_var;
1077 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1078 return var;
1080 return error_mark_node;
1084 /* Return the parallel region associated with STMT. */
1086 /* Debugging dumps for parallel regions. */
1087 void dump_omp_region (FILE *, struct omp_region *, int);
1088 void debug_omp_region (struct omp_region *);
1089 void debug_all_omp_regions (void);
1091 /* Dump the parallel region tree rooted at REGION. */
1093 void
1094 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1096 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1097 gimple_code_name[region->type]);
1099 if (region->inner)
1100 dump_omp_region (file, region->inner, indent + 4);
1102 if (region->cont)
1104 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1105 region->cont->index);
1108 if (region->exit)
1109 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1110 region->exit->index);
1111 else
1112 fprintf (file, "%*s[no exit marker]\n", indent, "");
1114 if (region->next)
1115 dump_omp_region (file, region->next, indent);
1118 DEBUG_FUNCTION void
1119 debug_omp_region (struct omp_region *region)
1121 dump_omp_region (stderr, region, 0);
1124 DEBUG_FUNCTION void
1125 debug_all_omp_regions (void)
1127 dump_omp_region (stderr, root_omp_region, 0);
1131 /* Create a new parallel region starting at STMT inside region PARENT. */
1133 struct omp_region *
1134 new_omp_region (basic_block bb, enum gimple_code type,
1135 struct omp_region *parent)
1137 struct omp_region *region = XCNEW (struct omp_region);
1139 region->outer = parent;
1140 region->entry = bb;
1141 region->type = type;
1143 if (parent)
1145 /* This is a nested region. Add it to the list of inner
1146 regions in PARENT. */
1147 region->next = parent->inner;
1148 parent->inner = region;
1150 else
1152 /* This is a toplevel region. Add it to the list of toplevel
1153 regions in ROOT_OMP_REGION. */
1154 region->next = root_omp_region;
1155 root_omp_region = region;
1158 return region;
1161 /* Release the memory associated with the region tree rooted at REGION. */
1163 static void
1164 free_omp_region_1 (struct omp_region *region)
1166 struct omp_region *i, *n;
1168 for (i = region->inner; i ; i = n)
1170 n = i->next;
1171 free_omp_region_1 (i);
1174 free (region);
1177 /* Release the memory for the entire omp region tree. */
1179 void
1180 free_omp_regions (void)
1182 struct omp_region *r, *n;
1183 for (r = root_omp_region; r ; r = n)
1185 n = r->next;
1186 free_omp_region_1 (r);
1188 root_omp_region = NULL;
1192 /* Create a new context, with OUTER_CTX being the surrounding context. */
1194 static omp_context *
1195 new_omp_context (gimple stmt, omp_context *outer_ctx)
1197 omp_context *ctx = XCNEW (omp_context);
1199 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1200 (splay_tree_value) ctx);
1201 ctx->stmt = stmt;
1203 if (outer_ctx)
1205 ctx->outer = outer_ctx;
1206 ctx->cb = outer_ctx->cb;
1207 ctx->cb.block = NULL;
1208 ctx->depth = outer_ctx->depth + 1;
1210 else
1212 ctx->cb.src_fn = current_function_decl;
1213 ctx->cb.dst_fn = current_function_decl;
1214 ctx->cb.src_node = cgraph_node (current_function_decl);
1215 ctx->cb.dst_node = ctx->cb.src_node;
1216 ctx->cb.src_cfun = cfun;
1217 ctx->cb.copy_decl = omp_copy_decl;
1218 ctx->cb.eh_lp_nr = 0;
1219 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1220 ctx->depth = 1;
1223 ctx->cb.decl_map = pointer_map_create ();
1225 return ctx;
1228 static gimple_seq maybe_catch_exception (gimple_seq);
1230 /* Finalize task copyfn. */
1232 static void
1233 finalize_task_copyfn (gimple task_stmt)
1235 struct function *child_cfun;
1236 tree child_fn, old_fn;
1237 gimple_seq seq, new_seq;
1238 gimple bind;
1240 child_fn = gimple_omp_task_copy_fn (task_stmt);
1241 if (child_fn == NULL_TREE)
1242 return;
1244 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1246 /* Inform the callgraph about the new function. */
1247 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1248 = cfun->curr_properties;
1250 old_fn = current_function_decl;
1251 push_cfun (child_cfun);
1252 current_function_decl = child_fn;
1253 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1254 seq = gimple_seq_alloc ();
1255 gimple_seq_add_stmt (&seq, bind);
1256 new_seq = maybe_catch_exception (seq);
1257 if (new_seq != seq)
1259 bind = gimple_build_bind (NULL, new_seq, NULL);
1260 seq = gimple_seq_alloc ();
1261 gimple_seq_add_stmt (&seq, bind);
1263 gimple_set_body (child_fn, seq);
1264 pop_cfun ();
1265 current_function_decl = old_fn;
1267 cgraph_add_new_function (child_fn, false);
1270 /* Destroy a omp_context data structures. Called through the splay tree
1271 value delete callback. */
1273 static void
1274 delete_omp_context (splay_tree_value value)
1276 omp_context *ctx = (omp_context *) value;
1278 pointer_map_destroy (ctx->cb.decl_map);
1280 if (ctx->field_map)
1281 splay_tree_delete (ctx->field_map);
1282 if (ctx->sfield_map)
1283 splay_tree_delete (ctx->sfield_map);
1285 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1286 it produces corrupt debug information. */
1287 if (ctx->record_type)
1289 tree t;
1290 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1291 DECL_ABSTRACT_ORIGIN (t) = NULL;
1293 if (ctx->srecord_type)
1295 tree t;
1296 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1297 DECL_ABSTRACT_ORIGIN (t) = NULL;
1300 if (is_task_ctx (ctx))
1301 finalize_task_copyfn (ctx->stmt);
1303 XDELETE (ctx);
1306 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1307 context. */
1309 static void
1310 fixup_child_record_type (omp_context *ctx)
1312 tree f, type = ctx->record_type;
1314 /* ??? It isn't sufficient to just call remap_type here, because
1315 variably_modified_type_p doesn't work the way we expect for
1316 record types. Testing each field for whether it needs remapping
1317 and creating a new record by hand works, however. */
1318 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1319 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1320 break;
1321 if (f)
1323 tree name, new_fields = NULL;
1325 type = lang_hooks.types.make_type (RECORD_TYPE);
1326 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1327 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1328 TYPE_DECL, name, type);
1329 TYPE_NAME (type) = name;
1331 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1333 tree new_f = copy_node (f);
1334 DECL_CONTEXT (new_f) = type;
1335 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1336 DECL_CHAIN (new_f) = new_fields;
1337 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1338 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1339 &ctx->cb, NULL);
1340 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1341 &ctx->cb, NULL);
1342 new_fields = new_f;
1344 /* Arrange to be able to look up the receiver field
1345 given the sender field. */
1346 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1347 (splay_tree_value) new_f);
1349 TYPE_FIELDS (type) = nreverse (new_fields);
1350 layout_type (type);
1353 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1356 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1357 specified by CLAUSES. */
1359 static void
1360 scan_sharing_clauses (tree clauses, omp_context *ctx)
1362 tree c, decl;
1363 bool scan_array_reductions = false;
1365 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1367 bool by_ref;
1369 switch (OMP_CLAUSE_CODE (c))
1371 case OMP_CLAUSE_PRIVATE:
1372 decl = OMP_CLAUSE_DECL (c);
1373 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1374 goto do_private;
1375 else if (!is_variable_sized (decl))
1376 install_var_local (decl, ctx);
1377 break;
1379 case OMP_CLAUSE_SHARED:
1380 gcc_assert (is_taskreg_ctx (ctx));
1381 decl = OMP_CLAUSE_DECL (c);
1382 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1383 || !is_variable_sized (decl));
1384 /* Global variables don't need to be copied,
1385 the receiver side will use them directly. */
1386 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1387 break;
1388 by_ref = use_pointer_for_field (decl, ctx);
1389 if (! TREE_READONLY (decl)
1390 || TREE_ADDRESSABLE (decl)
1391 || by_ref
1392 || is_reference (decl))
1394 install_var_field (decl, by_ref, 3, ctx);
1395 install_var_local (decl, ctx);
1396 break;
1398 /* We don't need to copy const scalar vars back. */
1399 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1400 goto do_private;
1402 case OMP_CLAUSE_LASTPRIVATE:
1403 /* Let the corresponding firstprivate clause create
1404 the variable. */
1405 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1406 break;
1407 /* FALLTHRU */
1409 case OMP_CLAUSE_FIRSTPRIVATE:
1410 case OMP_CLAUSE_REDUCTION:
1411 decl = OMP_CLAUSE_DECL (c);
1412 do_private:
1413 if (is_variable_sized (decl))
1415 if (is_task_ctx (ctx))
1416 install_var_field (decl, false, 1, ctx);
1417 break;
1419 else if (is_taskreg_ctx (ctx))
1421 bool global
1422 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1423 by_ref = use_pointer_for_field (decl, NULL);
1425 if (is_task_ctx (ctx)
1426 && (global || by_ref || is_reference (decl)))
1428 install_var_field (decl, false, 1, ctx);
1429 if (!global)
1430 install_var_field (decl, by_ref, 2, ctx);
1432 else if (!global)
1433 install_var_field (decl, by_ref, 3, ctx);
1435 install_var_local (decl, ctx);
1436 break;
1438 case OMP_CLAUSE_COPYPRIVATE:
1439 case OMP_CLAUSE_COPYIN:
1440 decl = OMP_CLAUSE_DECL (c);
1441 by_ref = use_pointer_for_field (decl, NULL);
1442 install_var_field (decl, by_ref, 3, ctx);
1443 break;
1445 case OMP_CLAUSE_DEFAULT:
1446 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1447 break;
1449 case OMP_CLAUSE_IF:
1450 case OMP_CLAUSE_NUM_THREADS:
1451 case OMP_CLAUSE_SCHEDULE:
1452 if (ctx->outer)
1453 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1454 break;
1456 case OMP_CLAUSE_NOWAIT:
1457 case OMP_CLAUSE_ORDERED:
1458 case OMP_CLAUSE_COLLAPSE:
1459 case OMP_CLAUSE_UNTIED:
1460 break;
1462 default:
1463 gcc_unreachable ();
1467 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1469 switch (OMP_CLAUSE_CODE (c))
1471 case OMP_CLAUSE_LASTPRIVATE:
1472 /* Let the corresponding firstprivate clause create
1473 the variable. */
1474 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1475 scan_array_reductions = true;
1476 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1477 break;
1478 /* FALLTHRU */
1480 case OMP_CLAUSE_PRIVATE:
1481 case OMP_CLAUSE_FIRSTPRIVATE:
1482 case OMP_CLAUSE_REDUCTION:
1483 decl = OMP_CLAUSE_DECL (c);
1484 if (is_variable_sized (decl))
1485 install_var_local (decl, ctx);
1486 fixup_remapped_decl (decl, ctx,
1487 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1488 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1489 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1490 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1491 scan_array_reductions = true;
1492 break;
1494 case OMP_CLAUSE_SHARED:
1495 decl = OMP_CLAUSE_DECL (c);
1496 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1497 fixup_remapped_decl (decl, ctx, false);
1498 break;
1500 case OMP_CLAUSE_COPYPRIVATE:
1501 case OMP_CLAUSE_COPYIN:
1502 case OMP_CLAUSE_DEFAULT:
1503 case OMP_CLAUSE_IF:
1504 case OMP_CLAUSE_NUM_THREADS:
1505 case OMP_CLAUSE_SCHEDULE:
1506 case OMP_CLAUSE_NOWAIT:
1507 case OMP_CLAUSE_ORDERED:
1508 case OMP_CLAUSE_COLLAPSE:
1509 case OMP_CLAUSE_UNTIED:
1510 break;
1512 default:
1513 gcc_unreachable ();
1517 if (scan_array_reductions)
1518 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1519 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1520 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1525 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1526 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1527 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1530 /* Create a new name for omp child function. Returns an identifier. */
1532 static GTY(()) unsigned int tmp_ompfn_id_num;
1534 static tree
1535 create_omp_child_function_name (bool task_copy)
1537 return (clone_function_name (current_function_decl,
1538 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1541 /* Build a decl for the omp child function. It'll not contain a body
1542 yet, just the bare decl. */
1544 static void
1545 create_omp_child_function (omp_context *ctx, bool task_copy)
1547 tree decl, type, name, t;
1549 name = create_omp_child_function_name (task_copy);
1550 if (task_copy)
1551 type = build_function_type_list (void_type_node, ptr_type_node,
1552 ptr_type_node, NULL_TREE);
1553 else
1554 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1556 decl = build_decl (gimple_location (ctx->stmt),
1557 FUNCTION_DECL, name, type);
1559 if (!task_copy)
1560 ctx->cb.dst_fn = decl;
1561 else
1562 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1564 TREE_STATIC (decl) = 1;
1565 TREE_USED (decl) = 1;
1566 DECL_ARTIFICIAL (decl) = 1;
1567 DECL_NAMELESS (decl) = 1;
1568 DECL_IGNORED_P (decl) = 0;
1569 TREE_PUBLIC (decl) = 0;
1570 DECL_UNINLINABLE (decl) = 1;
1571 DECL_EXTERNAL (decl) = 0;
1572 DECL_CONTEXT (decl) = NULL_TREE;
1573 DECL_INITIAL (decl) = make_node (BLOCK);
1575 t = build_decl (DECL_SOURCE_LOCATION (decl),
1576 RESULT_DECL, NULL_TREE, void_type_node);
1577 DECL_ARTIFICIAL (t) = 1;
1578 DECL_IGNORED_P (t) = 1;
1579 DECL_CONTEXT (t) = decl;
1580 DECL_RESULT (decl) = t;
1582 t = build_decl (DECL_SOURCE_LOCATION (decl),
1583 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1584 DECL_ARTIFICIAL (t) = 1;
1585 DECL_NAMELESS (t) = 1;
1586 DECL_ARG_TYPE (t) = ptr_type_node;
1587 DECL_CONTEXT (t) = current_function_decl;
1588 TREE_USED (t) = 1;
1589 DECL_ARGUMENTS (decl) = t;
1590 if (!task_copy)
1591 ctx->receiver_decl = t;
1592 else
1594 t = build_decl (DECL_SOURCE_LOCATION (decl),
1595 PARM_DECL, get_identifier (".omp_data_o"),
1596 ptr_type_node);
1597 DECL_ARTIFICIAL (t) = 1;
1598 DECL_NAMELESS (t) = 1;
1599 DECL_ARG_TYPE (t) = ptr_type_node;
1600 DECL_CONTEXT (t) = current_function_decl;
1601 TREE_USED (t) = 1;
1602 TREE_ADDRESSABLE (t) = 1;
1603 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1604 DECL_ARGUMENTS (decl) = t;
1607 /* Allocate memory for the function structure. The call to
1608 allocate_struct_function clobbers CFUN, so we need to restore
1609 it afterward. */
1610 push_struct_function (decl);
1611 cfun->function_end_locus = gimple_location (ctx->stmt);
1612 pop_cfun ();
1616 /* Scan an OpenMP parallel directive. */
1618 static void
1619 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1621 omp_context *ctx;
1622 tree name;
1623 gimple stmt = gsi_stmt (*gsi);
1625 /* Ignore parallel directives with empty bodies, unless there
1626 are copyin clauses. */
1627 if (optimize > 0
1628 && empty_body_p (gimple_omp_body (stmt))
1629 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1630 OMP_CLAUSE_COPYIN) == NULL)
1632 gsi_replace (gsi, gimple_build_nop (), false);
1633 return;
1636 ctx = new_omp_context (stmt, outer_ctx);
1637 if (taskreg_nesting_level > 1)
1638 ctx->is_nested = true;
1639 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1640 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1641 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1642 name = create_tmp_var_name (".omp_data_s");
1643 name = build_decl (gimple_location (stmt),
1644 TYPE_DECL, name, ctx->record_type);
1645 DECL_ARTIFICIAL (name) = 1;
1646 DECL_NAMELESS (name) = 1;
1647 TYPE_NAME (ctx->record_type) = name;
1648 create_omp_child_function (ctx, false);
1649 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1651 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1652 scan_omp (gimple_omp_body (stmt), ctx);
1654 if (TYPE_FIELDS (ctx->record_type) == NULL)
1655 ctx->record_type = ctx->receiver_decl = NULL;
1656 else
1658 layout_type (ctx->record_type);
1659 fixup_child_record_type (ctx);
1663 /* Scan an OpenMP task directive. */
1665 static void
1666 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1668 omp_context *ctx;
1669 tree name, t;
1670 gimple stmt = gsi_stmt (*gsi);
1671 location_t loc = gimple_location (stmt);
1673 /* Ignore task directives with empty bodies. */
1674 if (optimize > 0
1675 && empty_body_p (gimple_omp_body (stmt)))
1677 gsi_replace (gsi, gimple_build_nop (), false);
1678 return;
1681 ctx = new_omp_context (stmt, outer_ctx);
1682 if (taskreg_nesting_level > 1)
1683 ctx->is_nested = true;
1684 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1685 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1686 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1687 name = create_tmp_var_name (".omp_data_s");
1688 name = build_decl (gimple_location (stmt),
1689 TYPE_DECL, name, ctx->record_type);
1690 DECL_ARTIFICIAL (name) = 1;
1691 DECL_NAMELESS (name) = 1;
1692 TYPE_NAME (ctx->record_type) = name;
1693 create_omp_child_function (ctx, false);
1694 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1696 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1698 if (ctx->srecord_type)
1700 name = create_tmp_var_name (".omp_data_a");
1701 name = build_decl (gimple_location (stmt),
1702 TYPE_DECL, name, ctx->srecord_type);
1703 DECL_ARTIFICIAL (name) = 1;
1704 DECL_NAMELESS (name) = 1;
1705 TYPE_NAME (ctx->srecord_type) = name;
1706 create_omp_child_function (ctx, true);
1709 scan_omp (gimple_omp_body (stmt), ctx);
1711 if (TYPE_FIELDS (ctx->record_type) == NULL)
1713 ctx->record_type = ctx->receiver_decl = NULL;
1714 t = build_int_cst (long_integer_type_node, 0);
1715 gimple_omp_task_set_arg_size (stmt, t);
1716 t = build_int_cst (long_integer_type_node, 1);
1717 gimple_omp_task_set_arg_align (stmt, t);
1719 else
1721 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1722 /* Move VLA fields to the end. */
1723 p = &TYPE_FIELDS (ctx->record_type);
1724 while (*p)
1725 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1726 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1728 *q = *p;
1729 *p = TREE_CHAIN (*p);
1730 TREE_CHAIN (*q) = NULL_TREE;
1731 q = &TREE_CHAIN (*q);
1733 else
1734 p = &DECL_CHAIN (*p);
1735 *p = vla_fields;
1736 layout_type (ctx->record_type);
1737 fixup_child_record_type (ctx);
1738 if (ctx->srecord_type)
1739 layout_type (ctx->srecord_type);
1740 t = fold_convert_loc (loc, long_integer_type_node,
1741 TYPE_SIZE_UNIT (ctx->record_type));
1742 gimple_omp_task_set_arg_size (stmt, t);
1743 t = build_int_cst (long_integer_type_node,
1744 TYPE_ALIGN_UNIT (ctx->record_type));
1745 gimple_omp_task_set_arg_align (stmt, t);
1750 /* Scan an OpenMP loop directive. */
1752 static void
1753 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1755 omp_context *ctx;
1756 size_t i;
1758 ctx = new_omp_context (stmt, outer_ctx);
1760 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1762 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1763 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1765 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1766 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1770 scan_omp (gimple_omp_body (stmt), ctx);
1773 /* Scan an OpenMP sections directive. */
1775 static void
1776 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1778 omp_context *ctx;
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1782 scan_omp (gimple_omp_body (stmt), ctx);
1785 /* Scan an OpenMP single directive. */
1787 static void
1788 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1790 omp_context *ctx;
1791 tree name;
1793 ctx = new_omp_context (stmt, outer_ctx);
1794 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1795 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1796 name = create_tmp_var_name (".omp_copy_s");
1797 name = build_decl (gimple_location (stmt),
1798 TYPE_DECL, name, ctx->record_type);
1799 TYPE_NAME (ctx->record_type) = name;
1801 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1802 scan_omp (gimple_omp_body (stmt), ctx);
1804 if (TYPE_FIELDS (ctx->record_type) == NULL)
1805 ctx->record_type = NULL;
1806 else
1807 layout_type (ctx->record_type);
1811 /* Check OpenMP nesting restrictions. */
1812 static void
1813 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1815 switch (gimple_code (stmt))
1817 case GIMPLE_OMP_FOR:
1818 case GIMPLE_OMP_SECTIONS:
1819 case GIMPLE_OMP_SINGLE:
1820 case GIMPLE_CALL:
1821 for (; ctx != NULL; ctx = ctx->outer)
1822 switch (gimple_code (ctx->stmt))
1824 case GIMPLE_OMP_FOR:
1825 case GIMPLE_OMP_SECTIONS:
1826 case GIMPLE_OMP_SINGLE:
1827 case GIMPLE_OMP_ORDERED:
1828 case GIMPLE_OMP_MASTER:
1829 case GIMPLE_OMP_TASK:
1830 if (is_gimple_call (stmt))
1832 warning (0, "barrier region may not be closely nested inside "
1833 "of work-sharing, critical, ordered, master or "
1834 "explicit task region");
1835 return;
1837 warning (0, "work-sharing region may not be closely nested inside "
1838 "of work-sharing, critical, ordered, master or explicit "
1839 "task region");
1840 return;
1841 case GIMPLE_OMP_PARALLEL:
1842 return;
1843 default:
1844 break;
1846 break;
1847 case GIMPLE_OMP_MASTER:
1848 for (; ctx != NULL; ctx = ctx->outer)
1849 switch (gimple_code (ctx->stmt))
1851 case GIMPLE_OMP_FOR:
1852 case GIMPLE_OMP_SECTIONS:
1853 case GIMPLE_OMP_SINGLE:
1854 case GIMPLE_OMP_TASK:
1855 warning (0, "master region may not be closely nested inside "
1856 "of work-sharing or explicit task region");
1857 return;
1858 case GIMPLE_OMP_PARALLEL:
1859 return;
1860 default:
1861 break;
1863 break;
1864 case GIMPLE_OMP_ORDERED:
1865 for (; ctx != NULL; ctx = ctx->outer)
1866 switch (gimple_code (ctx->stmt))
1868 case GIMPLE_OMP_CRITICAL:
1869 case GIMPLE_OMP_TASK:
1870 warning (0, "ordered region may not be closely nested inside "
1871 "of critical or explicit task region");
1872 return;
1873 case GIMPLE_OMP_FOR:
1874 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1875 OMP_CLAUSE_ORDERED) == NULL)
1876 warning (0, "ordered region must be closely nested inside "
1877 "a loop region with an ordered clause");
1878 return;
1879 case GIMPLE_OMP_PARALLEL:
1880 return;
1881 default:
1882 break;
1884 break;
1885 case GIMPLE_OMP_CRITICAL:
1886 for (; ctx != NULL; ctx = ctx->outer)
1887 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1888 && (gimple_omp_critical_name (stmt)
1889 == gimple_omp_critical_name (ctx->stmt)))
1891 warning (0, "critical region may not be nested inside a critical "
1892 "region with the same name");
1893 return;
1895 break;
1896 default:
1897 break;
1902 /* Helper function scan_omp.
1904 Callback for walk_tree or operators in walk_gimple_stmt used to
1905 scan for OpenMP directives in TP. */
1907 static tree
1908 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1910 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1911 omp_context *ctx = (omp_context *) wi->info;
1912 tree t = *tp;
1914 switch (TREE_CODE (t))
1916 case VAR_DECL:
1917 case PARM_DECL:
1918 case LABEL_DECL:
1919 case RESULT_DECL:
1920 if (ctx)
1921 *tp = remap_decl (t, &ctx->cb);
1922 break;
1924 default:
1925 if (ctx && TYPE_P (t))
1926 *tp = remap_type (t, &ctx->cb);
1927 else if (!DECL_P (t))
1929 *walk_subtrees = 1;
1930 if (ctx)
1932 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1933 if (tem != TREE_TYPE (t))
1935 if (TREE_CODE (t) == INTEGER_CST)
1936 *tp = build_int_cst_wide (tem,
1937 TREE_INT_CST_LOW (t),
1938 TREE_INT_CST_HIGH (t));
1939 else
1940 TREE_TYPE (t) = tem;
1944 break;
1947 return NULL_TREE;
1951 /* Helper function for scan_omp.
1953 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1954 the current statement in GSI. */
1956 static tree
1957 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1958 struct walk_stmt_info *wi)
1960 gimple stmt = gsi_stmt (*gsi);
1961 omp_context *ctx = (omp_context *) wi->info;
1963 if (gimple_has_location (stmt))
1964 input_location = gimple_location (stmt);
1966 /* Check the OpenMP nesting restrictions. */
1967 if (ctx != NULL)
1969 if (is_gimple_omp (stmt))
1970 check_omp_nesting_restrictions (stmt, ctx);
1971 else if (is_gimple_call (stmt))
1973 tree fndecl = gimple_call_fndecl (stmt);
1974 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1975 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1976 check_omp_nesting_restrictions (stmt, ctx);
1980 *handled_ops_p = true;
1982 switch (gimple_code (stmt))
1984 case GIMPLE_OMP_PARALLEL:
1985 taskreg_nesting_level++;
1986 scan_omp_parallel (gsi, ctx);
1987 taskreg_nesting_level--;
1988 break;
1990 case GIMPLE_OMP_TASK:
1991 taskreg_nesting_level++;
1992 scan_omp_task (gsi, ctx);
1993 taskreg_nesting_level--;
1994 break;
1996 case GIMPLE_OMP_FOR:
1997 scan_omp_for (stmt, ctx);
1998 break;
2000 case GIMPLE_OMP_SECTIONS:
2001 scan_omp_sections (stmt, ctx);
2002 break;
2004 case GIMPLE_OMP_SINGLE:
2005 scan_omp_single (stmt, ctx);
2006 break;
2008 case GIMPLE_OMP_SECTION:
2009 case GIMPLE_OMP_MASTER:
2010 case GIMPLE_OMP_ORDERED:
2011 case GIMPLE_OMP_CRITICAL:
2012 ctx = new_omp_context (stmt, ctx);
2013 scan_omp (gimple_omp_body (stmt), ctx);
2014 break;
2016 case GIMPLE_BIND:
2018 tree var;
2020 *handled_ops_p = false;
2021 if (ctx)
2022 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2023 insert_decl_map (&ctx->cb, var, var);
2025 break;
2026 default:
2027 *handled_ops_p = false;
2028 break;
2031 return NULL_TREE;
2035 /* Scan all the statements starting at the current statement. CTX
2036 contains context information about the OpenMP directives and
2037 clauses found during the scan. */
2039 static void
2040 scan_omp (gimple_seq body, omp_context *ctx)
2042 location_t saved_location;
2043 struct walk_stmt_info wi;
2045 memset (&wi, 0, sizeof (wi));
2046 wi.info = ctx;
2047 wi.want_locations = true;
2049 saved_location = input_location;
2050 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2051 input_location = saved_location;
2054 /* Re-gimplification and code generation routines. */
2056 /* Build a call to GOMP_barrier. */
2058 static tree
2059 build_omp_barrier (void)
2061 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2064 /* If a context was created for STMT when it was scanned, return it. */
2066 static omp_context *
2067 maybe_lookup_ctx (gimple stmt)
2069 splay_tree_node n;
2070 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2071 return n ? (omp_context *) n->value : NULL;
2075 /* Find the mapping for DECL in CTX or the immediately enclosing
2076 context that has a mapping for DECL.
2078 If CTX is a nested parallel directive, we may have to use the decl
2079 mappings created in CTX's parent context. Suppose that we have the
2080 following parallel nesting (variable UIDs showed for clarity):
2082 iD.1562 = 0;
2083 #omp parallel shared(iD.1562) -> outer parallel
2084 iD.1562 = iD.1562 + 1;
2086 #omp parallel shared (iD.1562) -> inner parallel
2087 iD.1562 = iD.1562 - 1;
2089 Each parallel structure will create a distinct .omp_data_s structure
2090 for copying iD.1562 in/out of the directive:
2092 outer parallel .omp_data_s.1.i -> iD.1562
2093 inner parallel .omp_data_s.2.i -> iD.1562
2095 A shared variable mapping will produce a copy-out operation before
2096 the parallel directive and a copy-in operation after it. So, in
2097 this case we would have:
2099 iD.1562 = 0;
2100 .omp_data_o.1.i = iD.1562;
2101 #omp parallel shared(iD.1562) -> outer parallel
2102 .omp_data_i.1 = &.omp_data_o.1
2103 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2105 .omp_data_o.2.i = iD.1562; -> **
2106 #omp parallel shared(iD.1562) -> inner parallel
2107 .omp_data_i.2 = &.omp_data_o.2
2108 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2111 ** This is a problem. The symbol iD.1562 cannot be referenced
2112 inside the body of the outer parallel region. But since we are
2113 emitting this copy operation while expanding the inner parallel
2114 directive, we need to access the CTX structure of the outer
2115 parallel directive to get the correct mapping:
2117 .omp_data_o.2.i = .omp_data_i.1->i
2119 Since there may be other workshare or parallel directives enclosing
2120 the parallel directive, it may be necessary to walk up the context
2121 parent chain. This is not a problem in general because nested
2122 parallelism happens only rarely. */
2124 static tree
2125 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2127 tree t;
2128 omp_context *up;
2130 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2131 t = maybe_lookup_decl (decl, up);
2133 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2135 return t ? t : decl;
2139 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2140 in outer contexts. */
2142 static tree
2143 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2145 tree t = NULL;
2146 omp_context *up;
2148 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2149 t = maybe_lookup_decl (decl, up);
2151 return t ? t : decl;
2155 /* Construct the initialization value for reduction CLAUSE. */
2157 tree
2158 omp_reduction_init (tree clause, tree type)
2160 location_t loc = OMP_CLAUSE_LOCATION (clause);
2161 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2163 case PLUS_EXPR:
2164 case MINUS_EXPR:
2165 case BIT_IOR_EXPR:
2166 case BIT_XOR_EXPR:
2167 case TRUTH_OR_EXPR:
2168 case TRUTH_ORIF_EXPR:
2169 case TRUTH_XOR_EXPR:
2170 case NE_EXPR:
2171 return build_zero_cst (type);
2173 case MULT_EXPR:
2174 case TRUTH_AND_EXPR:
2175 case TRUTH_ANDIF_EXPR:
2176 case EQ_EXPR:
2177 return fold_convert_loc (loc, type, integer_one_node);
2179 case BIT_AND_EXPR:
2180 return fold_convert_loc (loc, type, integer_minus_one_node);
2182 case MAX_EXPR:
2183 if (SCALAR_FLOAT_TYPE_P (type))
2185 REAL_VALUE_TYPE max, min;
2186 if (HONOR_INFINITIES (TYPE_MODE (type)))
2188 real_inf (&max);
2189 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2191 else
2192 real_maxval (&min, 1, TYPE_MODE (type));
2193 return build_real (type, min);
2195 else
2197 gcc_assert (INTEGRAL_TYPE_P (type));
2198 return TYPE_MIN_VALUE (type);
2201 case MIN_EXPR:
2202 if (SCALAR_FLOAT_TYPE_P (type))
2204 REAL_VALUE_TYPE max;
2205 if (HONOR_INFINITIES (TYPE_MODE (type)))
2206 real_inf (&max);
2207 else
2208 real_maxval (&max, 0, TYPE_MODE (type));
2209 return build_real (type, max);
2211 else
2213 gcc_assert (INTEGRAL_TYPE_P (type));
2214 return TYPE_MAX_VALUE (type);
2217 default:
2218 gcc_unreachable ();
2222 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2223 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2224 private variables. Initialization statements go in ILIST, while calls
2225 to destructors go in DLIST. */
2227 static void
2228 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2229 omp_context *ctx)
2231 gimple_stmt_iterator diter;
2232 tree c, dtor, copyin_seq, x, ptr;
2233 bool copyin_by_ref = false;
2234 bool lastprivate_firstprivate = false;
2235 int pass;
2237 *dlist = gimple_seq_alloc ();
2238 diter = gsi_start (*dlist);
2239 copyin_seq = NULL;
2241 /* Do all the fixed sized types in the first pass, and the variable sized
2242 types in the second pass. This makes sure that the scalar arguments to
2243 the variable sized types are processed before we use them in the
2244 variable sized operations. */
2245 for (pass = 0; pass < 2; ++pass)
2247 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2249 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2250 tree var, new_var;
2251 bool by_ref;
2252 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2254 switch (c_kind)
2256 case OMP_CLAUSE_PRIVATE:
2257 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2258 continue;
2259 break;
2260 case OMP_CLAUSE_SHARED:
2261 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2263 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2264 continue;
2266 case OMP_CLAUSE_FIRSTPRIVATE:
2267 case OMP_CLAUSE_COPYIN:
2268 case OMP_CLAUSE_REDUCTION:
2269 break;
2270 case OMP_CLAUSE_LASTPRIVATE:
2271 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2273 lastprivate_firstprivate = true;
2274 if (pass != 0)
2275 continue;
2277 break;
2278 default:
2279 continue;
2282 new_var = var = OMP_CLAUSE_DECL (c);
2283 if (c_kind != OMP_CLAUSE_COPYIN)
2284 new_var = lookup_decl (var, ctx);
2286 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2288 if (pass != 0)
2289 continue;
2291 else if (is_variable_sized (var))
2293 /* For variable sized types, we need to allocate the
2294 actual storage here. Call alloca and store the
2295 result in the pointer decl that we created elsewhere. */
2296 if (pass == 0)
2297 continue;
2299 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2301 gimple stmt;
2302 tree tmp;
2304 ptr = DECL_VALUE_EXPR (new_var);
2305 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2306 ptr = TREE_OPERAND (ptr, 0);
2307 gcc_assert (DECL_P (ptr));
2308 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2310 /* void *tmp = __builtin_alloca */
2311 stmt
2312 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2313 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2314 gimple_add_tmp_var (tmp);
2315 gimple_call_set_lhs (stmt, tmp);
2317 gimple_seq_add_stmt (ilist, stmt);
2319 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2320 gimplify_assign (ptr, x, ilist);
2323 else if (is_reference (var))
2325 /* For references that are being privatized for Fortran,
2326 allocate new backing storage for the new pointer
2327 variable. This allows us to avoid changing all the
2328 code that expects a pointer to something that expects
2329 a direct variable. Note that this doesn't apply to
2330 C++, since reference types are disallowed in data
2331 sharing clauses there, except for NRV optimized
2332 return values. */
2333 if (pass == 0)
2334 continue;
2336 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2337 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2339 x = build_receiver_ref (var, false, ctx);
2340 x = build_fold_addr_expr_loc (clause_loc, x);
2342 else if (TREE_CONSTANT (x))
2344 const char *name = NULL;
2345 if (DECL_NAME (var))
2346 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2348 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2349 name);
2350 gimple_add_tmp_var (x);
2351 TREE_ADDRESSABLE (x) = 1;
2352 x = build_fold_addr_expr_loc (clause_loc, x);
2354 else
2356 x = build_call_expr_loc (clause_loc,
2357 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2360 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2361 gimplify_assign (new_var, x, ilist);
2363 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2365 else if (c_kind == OMP_CLAUSE_REDUCTION
2366 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2368 if (pass == 0)
2369 continue;
2371 else if (pass != 0)
2372 continue;
2374 switch (OMP_CLAUSE_CODE (c))
2376 case OMP_CLAUSE_SHARED:
2377 /* Shared global vars are just accessed directly. */
2378 if (is_global_var (new_var))
2379 break;
2380 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2381 needs to be delayed until after fixup_child_record_type so
2382 that we get the correct type during the dereference. */
2383 by_ref = use_pointer_for_field (var, ctx);
2384 x = build_receiver_ref (var, by_ref, ctx);
2385 SET_DECL_VALUE_EXPR (new_var, x);
2386 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2388 /* ??? If VAR is not passed by reference, and the variable
2389 hasn't been initialized yet, then we'll get a warning for
2390 the store into the omp_data_s structure. Ideally, we'd be
2391 able to notice this and not store anything at all, but
2392 we're generating code too early. Suppress the warning. */
2393 if (!by_ref)
2394 TREE_NO_WARNING (var) = 1;
2395 break;
2397 case OMP_CLAUSE_LASTPRIVATE:
2398 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2399 break;
2400 /* FALLTHRU */
2402 case OMP_CLAUSE_PRIVATE:
2403 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2404 x = build_outer_var_ref (var, ctx);
2405 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2407 if (is_task_ctx (ctx))
2408 x = build_receiver_ref (var, false, ctx);
2409 else
2410 x = build_outer_var_ref (var, ctx);
2412 else
2413 x = NULL;
2414 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2415 if (x)
2416 gimplify_and_add (x, ilist);
2417 /* FALLTHRU */
2419 do_dtor:
2420 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2421 if (x)
2423 gimple_seq tseq = NULL;
2425 dtor = x;
2426 gimplify_stmt (&dtor, &tseq);
2427 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2429 break;
2431 case OMP_CLAUSE_FIRSTPRIVATE:
2432 if (is_task_ctx (ctx))
2434 if (is_reference (var) || is_variable_sized (var))
2435 goto do_dtor;
2436 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2437 ctx))
2438 || use_pointer_for_field (var, NULL))
2440 x = build_receiver_ref (var, false, ctx);
2441 SET_DECL_VALUE_EXPR (new_var, x);
2442 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2443 goto do_dtor;
2446 x = build_outer_var_ref (var, ctx);
2447 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2448 gimplify_and_add (x, ilist);
2449 goto do_dtor;
2450 break;
2452 case OMP_CLAUSE_COPYIN:
2453 by_ref = use_pointer_for_field (var, NULL);
2454 x = build_receiver_ref (var, by_ref, ctx);
2455 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2456 append_to_statement_list (x, &copyin_seq);
2457 copyin_by_ref |= by_ref;
2458 break;
2460 case OMP_CLAUSE_REDUCTION:
2461 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2463 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2464 x = build_outer_var_ref (var, ctx);
2466 if (is_reference (var))
2467 x = build_fold_addr_expr_loc (clause_loc, x);
2468 SET_DECL_VALUE_EXPR (placeholder, x);
2469 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2470 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2471 gimple_seq_add_seq (ilist,
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2473 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2474 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2476 else
2478 x = omp_reduction_init (c, TREE_TYPE (new_var));
2479 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2480 gimplify_assign (new_var, x, ilist);
2482 break;
2484 default:
2485 gcc_unreachable ();
2490 /* The copyin sequence is not to be executed by the main thread, since
2491 that would result in self-copies. Perhaps not visible to scalars,
2492 but it certainly is to C++ operator=. */
2493 if (copyin_seq)
2495 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2496 x = build2 (NE_EXPR, boolean_type_node, x,
2497 build_int_cst (TREE_TYPE (x), 0));
2498 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2499 gimplify_and_add (x, ilist);
2502 /* If any copyin variable is passed by reference, we must ensure the
2503 master thread doesn't modify it before it is copied over in all
2504 threads. Similarly for variables in both firstprivate and
2505 lastprivate clauses we need to ensure the lastprivate copying
2506 happens after firstprivate copying in all threads. */
2507 if (copyin_by_ref || lastprivate_firstprivate)
2508 gimplify_and_add (build_omp_barrier (), ilist);
2512 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2513 both parallel and workshare constructs. PREDICATE may be NULL if it's
2514 always true. */
2516 static void
2517 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2518 omp_context *ctx)
2520 tree x, c, label = NULL;
2521 bool par_clauses = false;
2523 /* Early exit if there are no lastprivate clauses. */
2524 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2525 if (clauses == NULL)
2527 /* If this was a workshare clause, see if it had been combined
2528 with its parallel. In that case, look for the clauses on the
2529 parallel statement itself. */
2530 if (is_parallel_ctx (ctx))
2531 return;
2533 ctx = ctx->outer;
2534 if (ctx == NULL || !is_parallel_ctx (ctx))
2535 return;
2537 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2538 OMP_CLAUSE_LASTPRIVATE);
2539 if (clauses == NULL)
2540 return;
2541 par_clauses = true;
2544 if (predicate)
2546 gimple stmt;
2547 tree label_true, arm1, arm2;
2549 label = create_artificial_label (UNKNOWN_LOCATION);
2550 label_true = create_artificial_label (UNKNOWN_LOCATION);
2551 arm1 = TREE_OPERAND (predicate, 0);
2552 arm2 = TREE_OPERAND (predicate, 1);
2553 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2554 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2555 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2556 label_true, label);
2557 gimple_seq_add_stmt (stmt_list, stmt);
2558 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2561 for (c = clauses; c ;)
2563 tree var, new_var;
2564 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2566 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2568 var = OMP_CLAUSE_DECL (c);
2569 new_var = lookup_decl (var, ctx);
2571 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2573 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2574 gimple_seq_add_seq (stmt_list,
2575 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2577 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2579 x = build_outer_var_ref (var, ctx);
2580 if (is_reference (var))
2581 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2582 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2583 gimplify_and_add (x, stmt_list);
2585 c = OMP_CLAUSE_CHAIN (c);
2586 if (c == NULL && !par_clauses)
2588 /* If this was a workshare clause, see if it had been combined
2589 with its parallel. In that case, continue looking for the
2590 clauses also on the parallel statement itself. */
2591 if (is_parallel_ctx (ctx))
2592 break;
2594 ctx = ctx->outer;
2595 if (ctx == NULL || !is_parallel_ctx (ctx))
2596 break;
2598 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2599 OMP_CLAUSE_LASTPRIVATE);
2600 par_clauses = true;
2604 if (label)
2605 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2609 /* Generate code to implement the REDUCTION clauses. */
2611 static void
2612 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2614 gimple_seq sub_seq = NULL;
2615 gimple stmt;
2616 tree x, c;
2617 int count = 0;
2619 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2620 update in that case, otherwise use a lock. */
2621 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2622 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2624 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2626 /* Never use OMP_ATOMIC for array reductions. */
2627 count = -1;
2628 break;
2630 count++;
2633 if (count == 0)
2634 return;
2636 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2638 tree var, ref, new_var;
2639 enum tree_code code;
2640 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2642 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2643 continue;
2645 var = OMP_CLAUSE_DECL (c);
2646 new_var = lookup_decl (var, ctx);
2647 if (is_reference (var))
2648 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2649 ref = build_outer_var_ref (var, ctx);
2650 code = OMP_CLAUSE_REDUCTION_CODE (c);
2652 /* reduction(-:var) sums up the partial results, so it acts
2653 identically to reduction(+:var). */
2654 if (code == MINUS_EXPR)
2655 code = PLUS_EXPR;
2657 if (count == 1)
2659 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2661 addr = save_expr (addr);
2662 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2663 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2664 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2665 gimplify_and_add (x, stmt_seqp);
2666 return;
2669 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2671 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2673 if (is_reference (var))
2674 ref = build_fold_addr_expr_loc (clause_loc, ref);
2675 SET_DECL_VALUE_EXPR (placeholder, ref);
2676 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2677 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2678 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2679 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2680 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2682 else
2684 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2685 ref = build_outer_var_ref (var, ctx);
2686 gimplify_assign (ref, x, &sub_seq);
2690 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2691 gimple_seq_add_stmt (stmt_seqp, stmt);
2693 gimple_seq_add_seq (stmt_seqp, sub_seq);
2695 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2696 gimple_seq_add_stmt (stmt_seqp, stmt);
2700 /* Generate code to implement the COPYPRIVATE clauses. */
2702 static void
2703 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2704 omp_context *ctx)
2706 tree c;
2708 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2710 tree var, new_var, ref, x;
2711 bool by_ref;
2712 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2714 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2715 continue;
2717 var = OMP_CLAUSE_DECL (c);
2718 by_ref = use_pointer_for_field (var, NULL);
2720 ref = build_sender_ref (var, ctx);
2721 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2722 if (by_ref)
2724 x = build_fold_addr_expr_loc (clause_loc, new_var);
2725 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2727 gimplify_assign (ref, x, slist);
2729 ref = build_receiver_ref (var, false, ctx);
2730 if (by_ref)
2732 ref = fold_convert_loc (clause_loc,
2733 build_pointer_type (TREE_TYPE (new_var)),
2734 ref);
2735 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2737 if (is_reference (var))
2739 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2740 ref = build_simple_mem_ref_loc (clause_loc, ref);
2741 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2743 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2744 gimplify_and_add (x, rlist);
2749 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2750 and REDUCTION from the sender (aka parent) side. */
2752 static void
2753 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2754 omp_context *ctx)
2756 tree c;
2758 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2760 tree val, ref, x, var;
2761 bool by_ref, do_in = false, do_out = false;
2762 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2764 switch (OMP_CLAUSE_CODE (c))
2766 case OMP_CLAUSE_PRIVATE:
2767 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2768 break;
2769 continue;
2770 case OMP_CLAUSE_FIRSTPRIVATE:
2771 case OMP_CLAUSE_COPYIN:
2772 case OMP_CLAUSE_LASTPRIVATE:
2773 case OMP_CLAUSE_REDUCTION:
2774 break;
2775 default:
2776 continue;
2779 val = OMP_CLAUSE_DECL (c);
2780 var = lookup_decl_in_outer_ctx (val, ctx);
2782 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2783 && is_global_var (var))
2784 continue;
2785 if (is_variable_sized (val))
2786 continue;
2787 by_ref = use_pointer_for_field (val, NULL);
2789 switch (OMP_CLAUSE_CODE (c))
2791 case OMP_CLAUSE_PRIVATE:
2792 case OMP_CLAUSE_FIRSTPRIVATE:
2793 case OMP_CLAUSE_COPYIN:
2794 do_in = true;
2795 break;
2797 case OMP_CLAUSE_LASTPRIVATE:
2798 if (by_ref || is_reference (val))
2800 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2801 continue;
2802 do_in = true;
2804 else
2806 do_out = true;
2807 if (lang_hooks.decls.omp_private_outer_ref (val))
2808 do_in = true;
2810 break;
2812 case OMP_CLAUSE_REDUCTION:
2813 do_in = true;
2814 do_out = !(by_ref || is_reference (val));
2815 break;
2817 default:
2818 gcc_unreachable ();
2821 if (do_in)
2823 ref = build_sender_ref (val, ctx);
2824 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2825 gimplify_assign (ref, x, ilist);
2826 if (is_task_ctx (ctx))
2827 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2830 if (do_out)
2832 ref = build_sender_ref (val, ctx);
2833 gimplify_assign (var, ref, olist);
2838 /* Generate code to implement SHARED from the sender (aka parent)
2839 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2840 list things that got automatically shared. */
2842 static void
2843 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2845 tree var, ovar, nvar, f, x, record_type;
2847 if (ctx->record_type == NULL)
2848 return;
2850 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2851 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2853 ovar = DECL_ABSTRACT_ORIGIN (f);
2854 nvar = maybe_lookup_decl (ovar, ctx);
2855 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2856 continue;
2858 /* If CTX is a nested parallel directive. Find the immediately
2859 enclosing parallel or workshare construct that contains a
2860 mapping for OVAR. */
2861 var = lookup_decl_in_outer_ctx (ovar, ctx);
2863 if (use_pointer_for_field (ovar, ctx))
2865 x = build_sender_ref (ovar, ctx);
2866 var = build_fold_addr_expr (var);
2867 gimplify_assign (x, var, ilist);
2869 else
2871 x = build_sender_ref (ovar, ctx);
2872 gimplify_assign (x, var, ilist);
2874 if (!TREE_READONLY (var)
2875 /* We don't need to receive a new reference to a result
2876 or parm decl. In fact we may not store to it as we will
2877 invalidate any pending RSO and generate wrong gimple
2878 during inlining. */
2879 && !((TREE_CODE (var) == RESULT_DECL
2880 || TREE_CODE (var) == PARM_DECL)
2881 && DECL_BY_REFERENCE (var)))
2883 x = build_sender_ref (ovar, ctx);
2884 gimplify_assign (var, x, olist);
2891 /* A convenience function to build an empty GIMPLE_COND with just the
2892 condition. */
2894 static gimple
2895 gimple_build_cond_empty (tree cond)
2897 enum tree_code pred_code;
2898 tree lhs, rhs;
2900 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2901 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2905 /* Build the function calls to GOMP_parallel_start etc to actually
2906 generate the parallel operation. REGION is the parallel region
2907 being expanded. BB is the block where to insert the code. WS_ARGS
2908 will be set if this is a call to a combined parallel+workshare
2909 construct, it contains the list of additional arguments needed by
2910 the workshare construct. */
2912 static void
2913 expand_parallel_call (struct omp_region *region, basic_block bb,
2914 gimple entry_stmt, VEC(tree,gc) *ws_args)
2916 tree t, t1, t2, val, cond, c, clauses;
2917 gimple_stmt_iterator gsi;
2918 gimple stmt;
2919 int start_ix;
2920 location_t clause_loc;
2921 VEC(tree,gc) *args;
2923 clauses = gimple_omp_parallel_clauses (entry_stmt);
2925 /* Determine what flavor of GOMP_parallel_start we will be
2926 emitting. */
2927 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2928 if (is_combined_parallel (region))
2930 switch (region->inner->type)
2932 case GIMPLE_OMP_FOR:
2933 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2934 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2935 + (region->inner->sched_kind
2936 == OMP_CLAUSE_SCHEDULE_RUNTIME
2937 ? 3 : region->inner->sched_kind);
2938 break;
2939 case GIMPLE_OMP_SECTIONS:
2940 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2941 break;
2942 default:
2943 gcc_unreachable ();
2947 /* By default, the value of NUM_THREADS is zero (selected at run time)
2948 and there is no conditional. */
2949 cond = NULL_TREE;
2950 val = build_int_cst (unsigned_type_node, 0);
2952 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2953 if (c)
2954 cond = OMP_CLAUSE_IF_EXPR (c);
2956 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2957 if (c)
2959 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2960 clause_loc = OMP_CLAUSE_LOCATION (c);
2962 else
2963 clause_loc = gimple_location (entry_stmt);
2965 /* Ensure 'val' is of the correct type. */
2966 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2968 /* If we found the clause 'if (cond)', build either
2969 (cond != 0) or (cond ? val : 1u). */
2970 if (cond)
2972 gimple_stmt_iterator gsi;
2974 cond = gimple_boolify (cond);
2976 if (integer_zerop (val))
2977 val = fold_build2_loc (clause_loc,
2978 EQ_EXPR, unsigned_type_node, cond,
2979 build_int_cst (TREE_TYPE (cond), 0));
2980 else
2982 basic_block cond_bb, then_bb, else_bb;
2983 edge e, e_then, e_else;
2984 tree tmp_then, tmp_else, tmp_join, tmp_var;
2986 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2987 if (gimple_in_ssa_p (cfun))
2989 tmp_then = make_ssa_name (tmp_var, NULL);
2990 tmp_else = make_ssa_name (tmp_var, NULL);
2991 tmp_join = make_ssa_name (tmp_var, NULL);
2993 else
2995 tmp_then = tmp_var;
2996 tmp_else = tmp_var;
2997 tmp_join = tmp_var;
3000 e = split_block (bb, NULL);
3001 cond_bb = e->src;
3002 bb = e->dest;
3003 remove_edge (e);
3005 then_bb = create_empty_bb (cond_bb);
3006 else_bb = create_empty_bb (then_bb);
3007 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3008 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3010 stmt = gimple_build_cond_empty (cond);
3011 gsi = gsi_start_bb (cond_bb);
3012 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3014 gsi = gsi_start_bb (then_bb);
3015 stmt = gimple_build_assign (tmp_then, val);
3016 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3018 gsi = gsi_start_bb (else_bb);
3019 stmt = gimple_build_assign
3020 (tmp_else, build_int_cst (unsigned_type_node, 1));
3021 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3023 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3024 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3025 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3026 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3028 if (gimple_in_ssa_p (cfun))
3030 gimple phi = create_phi_node (tmp_join, bb);
3031 SSA_NAME_DEF_STMT (tmp_join) = phi;
3032 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3033 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3036 val = tmp_join;
3039 gsi = gsi_start_bb (bb);
3040 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3041 false, GSI_CONTINUE_LINKING);
3044 gsi = gsi_last_bb (bb);
3045 t = gimple_omp_parallel_data_arg (entry_stmt);
3046 if (t == NULL)
3047 t1 = null_pointer_node;
3048 else
3049 t1 = build_fold_addr_expr (t);
3050 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3052 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3053 VEC_quick_push (tree, args, t2);
3054 VEC_quick_push (tree, args, t1);
3055 VEC_quick_push (tree, args, val);
3056 VEC_splice (tree, args, ws_args);
3058 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3059 built_in_decls[start_ix], args);
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3064 t = gimple_omp_parallel_data_arg (entry_stmt);
3065 if (t == NULL)
3066 t = null_pointer_node;
3067 else
3068 t = build_fold_addr_expr (t);
3069 t = build_call_expr_loc (gimple_location (entry_stmt),
3070 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3071 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3072 false, GSI_CONTINUE_LINKING);
3074 t = build_call_expr_loc (gimple_location (entry_stmt),
3075 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3076 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3077 false, GSI_CONTINUE_LINKING);
3081 /* Build the function call to GOMP_task to actually
3082 generate the task operation. BB is the block where to insert the code. */
3084 static void
3085 expand_task_call (basic_block bb, gimple entry_stmt)
3087 tree t, t1, t2, t3, flags, cond, c, clauses;
3088 gimple_stmt_iterator gsi;
3089 location_t loc = gimple_location (entry_stmt);
3091 clauses = gimple_omp_task_clauses (entry_stmt);
3093 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3094 if (c)
3095 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3096 else
3097 cond = boolean_true_node;
3099 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3100 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3102 gsi = gsi_last_bb (bb);
3103 t = gimple_omp_task_data_arg (entry_stmt);
3104 if (t == NULL)
3105 t2 = null_pointer_node;
3106 else
3107 t2 = build_fold_addr_expr_loc (loc, t);
3108 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3109 t = gimple_omp_task_copy_fn (entry_stmt);
3110 if (t == NULL)
3111 t3 = null_pointer_node;
3112 else
3113 t3 = build_fold_addr_expr_loc (loc, t);
3115 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3116 gimple_omp_task_arg_size (entry_stmt),
3117 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3119 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3120 false, GSI_CONTINUE_LINKING);
3124 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3125 catch handler and return it. This prevents programs from violating the
3126 structured block semantics with throws. */
3128 static gimple_seq
3129 maybe_catch_exception (gimple_seq body)
3131 gimple g;
3132 tree decl;
3134 if (!flag_exceptions)
3135 return body;
3137 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3138 decl = lang_hooks.eh_protect_cleanup_actions ();
3139 else
3140 decl = built_in_decls[BUILT_IN_TRAP];
3142 g = gimple_build_eh_must_not_throw (decl);
3143 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3144 GIMPLE_TRY_CATCH);
3146 return gimple_seq_alloc_with_stmt (g);
3149 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3151 static tree
3152 vec2chain (VEC(tree,gc) *v)
3154 tree chain = NULL_TREE, t;
3155 unsigned ix;
3157 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3159 DECL_CHAIN (t) = chain;
3160 chain = t;
3163 return chain;
3167 /* Remove barriers in REGION->EXIT's block. Note that this is only
3168 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3169 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3170 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3171 removed. */
3173 static void
3174 remove_exit_barrier (struct omp_region *region)
3176 gimple_stmt_iterator gsi;
3177 basic_block exit_bb;
3178 edge_iterator ei;
3179 edge e;
3180 gimple stmt;
3181 int any_addressable_vars = -1;
3183 exit_bb = region->exit;
3185 /* If the parallel region doesn't return, we don't have REGION->EXIT
3186 block at all. */
3187 if (! exit_bb)
3188 return;
3190 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3191 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3192 statements that can appear in between are extremely limited -- no
3193 memory operations at all. Here, we allow nothing at all, so the
3194 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3195 gsi = gsi_last_bb (exit_bb);
3196 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3197 gsi_prev (&gsi);
3198 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3199 return;
3201 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3203 gsi = gsi_last_bb (e->src);
3204 if (gsi_end_p (gsi))
3205 continue;
3206 stmt = gsi_stmt (gsi);
3207 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3208 && !gimple_omp_return_nowait_p (stmt))
3210 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3211 in many cases. If there could be tasks queued, the barrier
3212 might be needed to let the tasks run before some local
3213 variable of the parallel that the task uses as shared
3214 runs out of scope. The task can be spawned either
3215 from within current function (this would be easy to check)
3216 or from some function it calls and gets passed an address
3217 of such a variable. */
3218 if (any_addressable_vars < 0)
3220 gimple parallel_stmt = last_stmt (region->entry);
3221 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3222 tree local_decls, block, decl;
3223 unsigned ix;
3225 any_addressable_vars = 0;
3226 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3227 if (TREE_ADDRESSABLE (decl))
3229 any_addressable_vars = 1;
3230 break;
3232 for (block = gimple_block (stmt);
3233 !any_addressable_vars
3234 && block
3235 && TREE_CODE (block) == BLOCK;
3236 block = BLOCK_SUPERCONTEXT (block))
3238 for (local_decls = BLOCK_VARS (block);
3239 local_decls;
3240 local_decls = DECL_CHAIN (local_decls))
3241 if (TREE_ADDRESSABLE (local_decls))
3243 any_addressable_vars = 1;
3244 break;
3246 if (block == gimple_block (parallel_stmt))
3247 break;
3250 if (!any_addressable_vars)
3251 gimple_omp_return_set_nowait (stmt);
3256 static void
3257 remove_exit_barriers (struct omp_region *region)
3259 if (region->type == GIMPLE_OMP_PARALLEL)
3260 remove_exit_barrier (region);
3262 if (region->inner)
3264 region = region->inner;
3265 remove_exit_barriers (region);
3266 while (region->next)
3268 region = region->next;
3269 remove_exit_barriers (region);
3274 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3275 calls. These can't be declared as const functions, but
3276 within one parallel body they are constant, so they can be
3277 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3278 which are declared const. Similarly for task body, except
3279 that in untied task omp_get_thread_num () can change at any task
3280 scheduling point. */
3282 static void
3283 optimize_omp_library_calls (gimple entry_stmt)
3285 basic_block bb;
3286 gimple_stmt_iterator gsi;
3287 tree thr_num_id
3288 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3289 tree num_thr_id
3290 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3291 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3292 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3293 OMP_CLAUSE_UNTIED) != NULL);
3295 FOR_EACH_BB (bb)
3296 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3298 gimple call = gsi_stmt (gsi);
3299 tree decl;
3301 if (is_gimple_call (call)
3302 && (decl = gimple_call_fndecl (call))
3303 && DECL_EXTERNAL (decl)
3304 && TREE_PUBLIC (decl)
3305 && DECL_INITIAL (decl) == NULL)
3307 tree built_in;
3309 if (DECL_NAME (decl) == thr_num_id)
3311 /* In #pragma omp task untied omp_get_thread_num () can change
3312 during the execution of the task region. */
3313 if (untied_task)
3314 continue;
3315 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3317 else if (DECL_NAME (decl) == num_thr_id)
3318 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3319 else
3320 continue;
3322 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3323 || gimple_call_num_args (call) != 0)
3324 continue;
3326 if (flag_exceptions && !TREE_NOTHROW (decl))
3327 continue;
3329 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3330 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3331 TREE_TYPE (TREE_TYPE (built_in))))
3332 continue;
3334 gimple_call_set_fndecl (call, built_in);
3339 /* Expand the OpenMP parallel or task directive starting at REGION. */
3341 static void
3342 expand_omp_taskreg (struct omp_region *region)
3344 basic_block entry_bb, exit_bb, new_bb;
3345 struct function *child_cfun;
3346 tree child_fn, block, t;
3347 tree save_current;
3348 gimple_stmt_iterator gsi;
3349 gimple entry_stmt, stmt;
3350 edge e;
3351 VEC(tree,gc) *ws_args;
3353 entry_stmt = last_stmt (region->entry);
3354 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3355 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3356 /* If this function has been already instrumented, make sure
3357 the child function isn't instrumented again. */
3358 child_cfun->after_tree_profile = cfun->after_tree_profile;
3360 entry_bb = region->entry;
3361 exit_bb = region->exit;
3363 if (is_combined_parallel (region))
3364 ws_args = region->ws_args;
3365 else
3366 ws_args = NULL;
3368 if (child_cfun->cfg)
3370 /* Due to inlining, it may happen that we have already outlined
3371 the region, in which case all we need to do is make the
3372 sub-graph unreachable and emit the parallel call. */
3373 edge entry_succ_e, exit_succ_e;
3374 gimple_stmt_iterator gsi;
3376 entry_succ_e = single_succ_edge (entry_bb);
3378 gsi = gsi_last_bb (entry_bb);
3379 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3380 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3381 gsi_remove (&gsi, true);
3383 new_bb = entry_bb;
3384 if (exit_bb)
3386 exit_succ_e = single_succ_edge (exit_bb);
3387 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3389 remove_edge_and_dominated_blocks (entry_succ_e);
3391 else
3393 unsigned srcidx, dstidx, num;
3395 /* If the parallel region needs data sent from the parent
3396 function, then the very first statement (except possible
3397 tree profile counter updates) of the parallel body
3398 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3399 &.OMP_DATA_O is passed as an argument to the child function,
3400 we need to replace it with the argument as seen by the child
3401 function.
3403 In most cases, this will end up being the identity assignment
3404 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3405 a function call that has been inlined, the original PARM_DECL
3406 .OMP_DATA_I may have been converted into a different local
3407 variable. In which case, we need to keep the assignment. */
3408 if (gimple_omp_taskreg_data_arg (entry_stmt))
3410 basic_block entry_succ_bb = single_succ (entry_bb);
3411 gimple_stmt_iterator gsi;
3412 tree arg, narg;
3413 gimple parcopy_stmt = NULL;
3415 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3417 gimple stmt;
3419 gcc_assert (!gsi_end_p (gsi));
3420 stmt = gsi_stmt (gsi);
3421 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3422 continue;
3424 if (gimple_num_ops (stmt) == 2)
3426 tree arg = gimple_assign_rhs1 (stmt);
3428 /* We're ignore the subcode because we're
3429 effectively doing a STRIP_NOPS. */
3431 if (TREE_CODE (arg) == ADDR_EXPR
3432 && TREE_OPERAND (arg, 0)
3433 == gimple_omp_taskreg_data_arg (entry_stmt))
3435 parcopy_stmt = stmt;
3436 break;
3441 gcc_assert (parcopy_stmt != NULL);
3442 arg = DECL_ARGUMENTS (child_fn);
3444 if (!gimple_in_ssa_p (cfun))
3446 if (gimple_assign_lhs (parcopy_stmt) == arg)
3447 gsi_remove (&gsi, true);
3448 else
3450 /* ?? Is setting the subcode really necessary ?? */
3451 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3452 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3455 else
3457 /* If we are in ssa form, we must load the value from the default
3458 definition of the argument. That should not be defined now,
3459 since the argument is not used uninitialized. */
3460 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3461 narg = make_ssa_name (arg, gimple_build_nop ());
3462 set_default_def (arg, narg);
3463 /* ?? Is setting the subcode really necessary ?? */
3464 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3465 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3466 update_stmt (parcopy_stmt);
3470 /* Declare local variables needed in CHILD_CFUN. */
3471 block = DECL_INITIAL (child_fn);
3472 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3473 /* The gimplifier could record temporaries in parallel/task block
3474 rather than in containing function's local_decls chain,
3475 which would mean cgraph missed finalizing them. Do it now. */
3476 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3477 if (TREE_CODE (t) == VAR_DECL
3478 && TREE_STATIC (t)
3479 && !DECL_EXTERNAL (t))
3480 varpool_finalize_decl (t);
3481 DECL_SAVED_TREE (child_fn) = NULL;
3482 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3483 TREE_USED (block) = 1;
3485 /* Reset DECL_CONTEXT on function arguments. */
3486 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3487 DECL_CONTEXT (t) = child_fn;
3489 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3490 so that it can be moved to the child function. */
3491 gsi = gsi_last_bb (entry_bb);
3492 stmt = gsi_stmt (gsi);
3493 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3494 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3495 gsi_remove (&gsi, true);
3496 e = split_block (entry_bb, stmt);
3497 entry_bb = e->dest;
3498 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3500 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3501 if (exit_bb)
3503 gsi = gsi_last_bb (exit_bb);
3504 gcc_assert (!gsi_end_p (gsi)
3505 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3506 stmt = gimple_build_return (NULL);
3507 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3508 gsi_remove (&gsi, true);
3511 /* Move the parallel region into CHILD_CFUN. */
3513 if (gimple_in_ssa_p (cfun))
3515 push_cfun (child_cfun);
3516 init_tree_ssa (child_cfun);
3517 init_ssa_operands ();
3518 cfun->gimple_df->in_ssa_p = true;
3519 pop_cfun ();
3520 block = NULL_TREE;
3522 else
3523 block = gimple_block (entry_stmt);
3525 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3526 if (exit_bb)
3527 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3529 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3530 num = VEC_length (tree, child_cfun->local_decls);
3531 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3533 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3534 if (DECL_CONTEXT (t) == cfun->decl)
3535 continue;
3536 if (srcidx != dstidx)
3537 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3538 dstidx++;
3540 if (dstidx != num)
3541 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3543 /* Inform the callgraph about the new function. */
3544 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3545 = cfun->curr_properties;
3546 cgraph_add_new_function (child_fn, true);
3548 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3549 fixed in a following pass. */
3550 push_cfun (child_cfun);
3551 save_current = current_function_decl;
3552 current_function_decl = child_fn;
3553 if (optimize)
3554 optimize_omp_library_calls (entry_stmt);
3555 rebuild_cgraph_edges ();
3557 /* Some EH regions might become dead, see PR34608. If
3558 pass_cleanup_cfg isn't the first pass to happen with the
3559 new child, these dead EH edges might cause problems.
3560 Clean them up now. */
3561 if (flag_exceptions)
3563 basic_block bb;
3564 bool changed = false;
3566 FOR_EACH_BB (bb)
3567 changed |= gimple_purge_dead_eh_edges (bb);
3568 if (changed)
3569 cleanup_tree_cfg ();
3571 if (gimple_in_ssa_p (cfun))
3572 update_ssa (TODO_update_ssa);
3573 current_function_decl = save_current;
3574 pop_cfun ();
3577 /* Emit a library call to launch the children threads. */
3578 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3579 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3580 else
3581 expand_task_call (new_bb, entry_stmt);
3582 update_ssa (TODO_update_ssa_only_virtuals);
3586 /* A subroutine of expand_omp_for. Generate code for a parallel
3587 loop with any schedule. Given parameters:
3589 for (V = N1; V cond N2; V += STEP) BODY;
3591 where COND is "<" or ">", we generate pseudocode
3593 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3594 if (more) goto L0; else goto L3;
3596 V = istart0;
3597 iend = iend0;
3599 BODY;
3600 V += STEP;
3601 if (V cond iend) goto L1; else goto L2;
3603 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3606 If this is a combined omp parallel loop, instead of the call to
3607 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3609 For collapsed loops, given parameters:
3610 collapse(3)
3611 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3612 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3613 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3614 BODY;
3616 we generate pseudocode
3618 if (cond3 is <)
3619 adj = STEP3 - 1;
3620 else
3621 adj = STEP3 + 1;
3622 count3 = (adj + N32 - N31) / STEP3;
3623 if (cond2 is <)
3624 adj = STEP2 - 1;
3625 else
3626 adj = STEP2 + 1;
3627 count2 = (adj + N22 - N21) / STEP2;
3628 if (cond1 is <)
3629 adj = STEP1 - 1;
3630 else
3631 adj = STEP1 + 1;
3632 count1 = (adj + N12 - N11) / STEP1;
3633 count = count1 * count2 * count3;
3634 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3635 if (more) goto L0; else goto L3;
3637 V = istart0;
3638 T = V;
3639 V3 = N31 + (T % count3) * STEP3;
3640 T = T / count3;
3641 V2 = N21 + (T % count2) * STEP2;
3642 T = T / count2;
3643 V1 = N11 + T * STEP1;
3644 iend = iend0;
3646 BODY;
3647 V += 1;
3648 if (V < iend) goto L10; else goto L2;
3649 L10:
3650 V3 += STEP3;
3651 if (V3 cond3 N32) goto L1; else goto L11;
3652 L11:
3653 V3 = N31;
3654 V2 += STEP2;
3655 if (V2 cond2 N22) goto L1; else goto L12;
3656 L12:
3657 V2 = N21;
3658 V1 += STEP1;
3659 goto L1;
3661 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3666 static void
3667 expand_omp_for_generic (struct omp_region *region,
3668 struct omp_for_data *fd,
3669 enum built_in_function start_fn,
3670 enum built_in_function next_fn)
3672 tree type, istart0, iend0, iend;
3673 tree t, vmain, vback, bias = NULL_TREE;
3674 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3675 basic_block l2_bb = NULL, l3_bb = NULL;
3676 gimple_stmt_iterator gsi;
3677 gimple stmt;
3678 bool in_combined_parallel = is_combined_parallel (region);
3679 bool broken_loop = region->cont == NULL;
3680 edge e, ne;
3681 tree *counts = NULL;
3682 int i;
3684 gcc_assert (!broken_loop || !in_combined_parallel);
3685 gcc_assert (fd->iter_type == long_integer_type_node
3686 || !in_combined_parallel);
3688 type = TREE_TYPE (fd->loop.v);
3689 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3690 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3691 TREE_ADDRESSABLE (istart0) = 1;
3692 TREE_ADDRESSABLE (iend0) = 1;
3693 if (gimple_in_ssa_p (cfun))
3695 add_referenced_var (istart0);
3696 add_referenced_var (iend0);
3699 /* See if we need to bias by LLONG_MIN. */
3700 if (fd->iter_type == long_long_unsigned_type_node
3701 && TREE_CODE (type) == INTEGER_TYPE
3702 && !TYPE_UNSIGNED (type))
3704 tree n1, n2;
3706 if (fd->loop.cond_code == LT_EXPR)
3708 n1 = fd->loop.n1;
3709 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3711 else
3713 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3714 n2 = fd->loop.n1;
3716 if (TREE_CODE (n1) != INTEGER_CST
3717 || TREE_CODE (n2) != INTEGER_CST
3718 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3719 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3722 entry_bb = region->entry;
3723 cont_bb = region->cont;
3724 collapse_bb = NULL;
3725 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3726 gcc_assert (broken_loop
3727 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3728 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3729 l1_bb = single_succ (l0_bb);
3730 if (!broken_loop)
3732 l2_bb = create_empty_bb (cont_bb);
3733 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3734 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3736 else
3737 l2_bb = NULL;
3738 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3739 exit_bb = region->exit;
3741 gsi = gsi_last_bb (entry_bb);
3743 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3744 if (fd->collapse > 1)
3746 /* collapsed loops need work for expansion in SSA form. */
3747 gcc_assert (!gimple_in_ssa_p (cfun));
3748 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3749 for (i = 0; i < fd->collapse; i++)
3751 tree itype = TREE_TYPE (fd->loops[i].v);
3753 if (POINTER_TYPE_P (itype))
3754 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3755 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3756 ? -1 : 1));
3757 t = fold_build2 (PLUS_EXPR, itype,
3758 fold_convert (itype, fd->loops[i].step), t);
3759 t = fold_build2 (PLUS_EXPR, itype, t,
3760 fold_convert (itype, fd->loops[i].n2));
3761 t = fold_build2 (MINUS_EXPR, itype, t,
3762 fold_convert (itype, fd->loops[i].n1));
3763 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3764 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3765 fold_build1 (NEGATE_EXPR, itype, t),
3766 fold_build1 (NEGATE_EXPR, itype,
3767 fold_convert (itype,
3768 fd->loops[i].step)));
3769 else
3770 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3771 fold_convert (itype, fd->loops[i].step));
3772 t = fold_convert (type, t);
3773 if (TREE_CODE (t) == INTEGER_CST)
3774 counts[i] = t;
3775 else
3777 counts[i] = create_tmp_var (type, ".count");
3778 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3779 true, GSI_SAME_STMT);
3780 stmt = gimple_build_assign (counts[i], t);
3781 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3783 if (SSA_VAR_P (fd->loop.n2))
3785 if (i == 0)
3786 t = counts[0];
3787 else
3789 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3790 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3791 true, GSI_SAME_STMT);
3793 stmt = gimple_build_assign (fd->loop.n2, t);
3794 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3798 if (in_combined_parallel)
3800 /* In a combined parallel loop, emit a call to
3801 GOMP_loop_foo_next. */
3802 t = build_call_expr (built_in_decls[next_fn], 2,
3803 build_fold_addr_expr (istart0),
3804 build_fold_addr_expr (iend0));
3806 else
3808 tree t0, t1, t2, t3, t4;
3809 /* If this is not a combined parallel loop, emit a call to
3810 GOMP_loop_foo_start in ENTRY_BB. */
3811 t4 = build_fold_addr_expr (iend0);
3812 t3 = build_fold_addr_expr (istart0);
3813 t2 = fold_convert (fd->iter_type, fd->loop.step);
3814 if (POINTER_TYPE_P (type)
3815 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3817 /* Avoid casting pointers to integer of a different size. */
3818 tree itype
3819 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3820 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3821 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3823 else
3825 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3826 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3828 if (bias)
3830 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3831 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3833 if (fd->iter_type == long_integer_type_node)
3835 if (fd->chunk_size)
3837 t = fold_convert (fd->iter_type, fd->chunk_size);
3838 t = build_call_expr (built_in_decls[start_fn], 6,
3839 t0, t1, t2, t, t3, t4);
3841 else
3842 t = build_call_expr (built_in_decls[start_fn], 5,
3843 t0, t1, t2, t3, t4);
3845 else
3847 tree t5;
3848 tree c_bool_type;
3850 /* The GOMP_loop_ull_*start functions have additional boolean
3851 argument, true for < loops and false for > loops.
3852 In Fortran, the C bool type can be different from
3853 boolean_type_node. */
3854 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3855 t5 = build_int_cst (c_bool_type,
3856 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3857 if (fd->chunk_size)
3859 t = fold_convert (fd->iter_type, fd->chunk_size);
3860 t = build_call_expr (built_in_decls[start_fn], 7,
3861 t5, t0, t1, t2, t, t3, t4);
3863 else
3864 t = build_call_expr (built_in_decls[start_fn], 6,
3865 t5, t0, t1, t2, t3, t4);
3868 if (TREE_TYPE (t) != boolean_type_node)
3869 t = fold_build2 (NE_EXPR, boolean_type_node,
3870 t, build_int_cst (TREE_TYPE (t), 0));
3871 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3872 true, GSI_SAME_STMT);
3873 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3875 /* Remove the GIMPLE_OMP_FOR statement. */
3876 gsi_remove (&gsi, true);
3878 /* Iteration setup for sequential loop goes in L0_BB. */
3879 gsi = gsi_start_bb (l0_bb);
3880 t = istart0;
3881 if (bias)
3882 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3883 if (POINTER_TYPE_P (type))
3884 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3885 0), t);
3886 t = fold_convert (type, t);
3887 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3888 false, GSI_CONTINUE_LINKING);
3889 stmt = gimple_build_assign (fd->loop.v, t);
3890 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3892 t = iend0;
3893 if (bias)
3894 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3895 if (POINTER_TYPE_P (type))
3896 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3897 0), t);
3898 t = fold_convert (type, t);
3899 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3900 false, GSI_CONTINUE_LINKING);
3901 if (fd->collapse > 1)
3903 tree tem = create_tmp_var (type, ".tem");
3905 stmt = gimple_build_assign (tem, fd->loop.v);
3906 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3907 for (i = fd->collapse - 1; i >= 0; i--)
3909 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3910 itype = vtype;
3911 if (POINTER_TYPE_P (vtype))
3912 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3913 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3914 t = fold_convert (itype, t);
3915 t = fold_build2 (MULT_EXPR, itype, t,
3916 fold_convert (itype, fd->loops[i].step));
3917 if (POINTER_TYPE_P (vtype))
3918 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3919 fd->loops[i].n1, fold_convert (sizetype, t));
3920 else
3921 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3922 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3923 false, GSI_CONTINUE_LINKING);
3924 stmt = gimple_build_assign (fd->loops[i].v, t);
3925 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3926 if (i != 0)
3928 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3929 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3930 false, GSI_CONTINUE_LINKING);
3931 stmt = gimple_build_assign (tem, t);
3932 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3937 if (!broken_loop)
3939 /* Code to control the increment and predicate for the sequential
3940 loop goes in the CONT_BB. */
3941 gsi = gsi_last_bb (cont_bb);
3942 stmt = gsi_stmt (gsi);
3943 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3944 vmain = gimple_omp_continue_control_use (stmt);
3945 vback = gimple_omp_continue_control_def (stmt);
3947 if (POINTER_TYPE_P (type))
3948 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3949 fold_convert (sizetype, fd->loop.step));
3950 else
3951 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3952 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3953 true, GSI_SAME_STMT);
3954 stmt = gimple_build_assign (vback, t);
3955 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3957 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3958 stmt = gimple_build_cond_empty (t);
3959 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3961 /* Remove GIMPLE_OMP_CONTINUE. */
3962 gsi_remove (&gsi, true);
3964 if (fd->collapse > 1)
3966 basic_block last_bb, bb;
3968 last_bb = cont_bb;
3969 for (i = fd->collapse - 1; i >= 0; i--)
3971 tree vtype = TREE_TYPE (fd->loops[i].v);
3973 bb = create_empty_bb (last_bb);
3974 gsi = gsi_start_bb (bb);
3976 if (i < fd->collapse - 1)
3978 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3979 e->probability = REG_BR_PROB_BASE / 8;
3981 t = fd->loops[i + 1].n1;
3982 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3983 false, GSI_CONTINUE_LINKING);
3984 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3985 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3987 else
3988 collapse_bb = bb;
3990 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3992 if (POINTER_TYPE_P (vtype))
3993 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3994 fd->loops[i].v,
3995 fold_convert (sizetype, fd->loops[i].step));
3996 else
3997 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3998 fd->loops[i].step);
3999 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4000 false, GSI_CONTINUE_LINKING);
4001 stmt = gimple_build_assign (fd->loops[i].v, t);
4002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4004 if (i > 0)
4006 t = fd->loops[i].n2;
4007 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4008 false, GSI_CONTINUE_LINKING);
4009 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4010 fd->loops[i].v, t);
4011 stmt = gimple_build_cond_empty (t);
4012 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4013 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4014 e->probability = REG_BR_PROB_BASE * 7 / 8;
4016 else
4017 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4018 last_bb = bb;
4022 /* Emit code to get the next parallel iteration in L2_BB. */
4023 gsi = gsi_start_bb (l2_bb);
4025 t = build_call_expr (built_in_decls[next_fn], 2,
4026 build_fold_addr_expr (istart0),
4027 build_fold_addr_expr (iend0));
4028 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4029 false, GSI_CONTINUE_LINKING);
4030 if (TREE_TYPE (t) != boolean_type_node)
4031 t = fold_build2 (NE_EXPR, boolean_type_node,
4032 t, build_int_cst (TREE_TYPE (t), 0));
4033 stmt = gimple_build_cond_empty (t);
4034 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4037 /* Add the loop cleanup function. */
4038 gsi = gsi_last_bb (exit_bb);
4039 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4040 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4041 else
4042 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4043 stmt = gimple_build_call (t, 0);
4044 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4045 gsi_remove (&gsi, true);
4047 /* Connect the new blocks. */
4048 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4049 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4051 if (!broken_loop)
4053 gimple_seq phis;
4055 e = find_edge (cont_bb, l3_bb);
4056 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4058 phis = phi_nodes (l3_bb);
4059 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4061 gimple phi = gsi_stmt (gsi);
4062 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4063 PHI_ARG_DEF_FROM_EDGE (phi, e));
4065 remove_edge (e);
4067 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4068 if (fd->collapse > 1)
4070 e = find_edge (cont_bb, l1_bb);
4071 remove_edge (e);
4072 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4074 else
4076 e = find_edge (cont_bb, l1_bb);
4077 e->flags = EDGE_TRUE_VALUE;
4079 e->probability = REG_BR_PROB_BASE * 7 / 8;
4080 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4081 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4083 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4084 recompute_dominator (CDI_DOMINATORS, l2_bb));
4085 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4086 recompute_dominator (CDI_DOMINATORS, l3_bb));
4087 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4088 recompute_dominator (CDI_DOMINATORS, l0_bb));
4089 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4090 recompute_dominator (CDI_DOMINATORS, l1_bb));
4095 /* A subroutine of expand_omp_for. Generate code for a parallel
4096 loop with static schedule and no specified chunk size. Given
4097 parameters:
4099 for (V = N1; V cond N2; V += STEP) BODY;
4101 where COND is "<" or ">", we generate pseudocode
4103 if (cond is <)
4104 adj = STEP - 1;
4105 else
4106 adj = STEP + 1;
4107 if ((__typeof (V)) -1 > 0 && cond is >)
4108 n = -(adj + N2 - N1) / -STEP;
4109 else
4110 n = (adj + N2 - N1) / STEP;
4111 q = n / nthreads;
4112 q += (q * nthreads != n);
4113 s0 = q * threadid;
4114 e0 = min(s0 + q, n);
4115 V = s0 * STEP + N1;
4116 if (s0 >= e0) goto L2; else goto L0;
4118 e = e0 * STEP + N1;
4120 BODY;
4121 V += STEP;
4122 if (V cond e) goto L1;
4126 static void
4127 expand_omp_for_static_nochunk (struct omp_region *region,
4128 struct omp_for_data *fd)
4130 tree n, q, s0, e0, e, t, nthreads, threadid;
4131 tree type, itype, vmain, vback;
4132 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4133 basic_block fin_bb;
4134 gimple_stmt_iterator gsi;
4135 gimple stmt;
4137 itype = type = TREE_TYPE (fd->loop.v);
4138 if (POINTER_TYPE_P (type))
4139 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4141 entry_bb = region->entry;
4142 cont_bb = region->cont;
4143 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4144 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4145 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4146 body_bb = single_succ (seq_start_bb);
4147 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4148 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4149 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4150 exit_bb = region->exit;
4152 /* Iteration space partitioning goes in ENTRY_BB. */
4153 gsi = gsi_last_bb (entry_bb);
4154 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4156 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4157 t = fold_convert (itype, t);
4158 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4159 true, GSI_SAME_STMT);
4161 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4162 t = fold_convert (itype, t);
4163 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4164 true, GSI_SAME_STMT);
4166 fd->loop.n1
4167 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4168 true, NULL_TREE, true, GSI_SAME_STMT);
4169 fd->loop.n2
4170 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4171 true, NULL_TREE, true, GSI_SAME_STMT);
4172 fd->loop.step
4173 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4174 true, NULL_TREE, true, GSI_SAME_STMT);
4176 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4177 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4178 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4179 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4180 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4181 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4182 fold_build1 (NEGATE_EXPR, itype, t),
4183 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4184 else
4185 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4186 t = fold_convert (itype, t);
4187 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4189 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4190 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4192 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4193 t = fold_build2 (NE_EXPR, itype, t, n);
4194 t = fold_build2 (PLUS_EXPR, itype, q, t);
4195 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4197 t = build2 (MULT_EXPR, itype, q, threadid);
4198 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4200 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4201 t = fold_build2 (MIN_EXPR, itype, t, n);
4202 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4204 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4205 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4207 /* Remove the GIMPLE_OMP_FOR statement. */
4208 gsi_remove (&gsi, true);
4210 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4211 gsi = gsi_start_bb (seq_start_bb);
4213 t = fold_convert (itype, s0);
4214 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4215 if (POINTER_TYPE_P (type))
4216 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4217 fold_convert (sizetype, t));
4218 else
4219 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4220 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4221 false, GSI_CONTINUE_LINKING);
4222 stmt = gimple_build_assign (fd->loop.v, t);
4223 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4225 t = fold_convert (itype, e0);
4226 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4227 if (POINTER_TYPE_P (type))
4228 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4229 fold_convert (sizetype, t));
4230 else
4231 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4232 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4233 false, GSI_CONTINUE_LINKING);
4235 /* The code controlling the sequential loop replaces the
4236 GIMPLE_OMP_CONTINUE. */
4237 gsi = gsi_last_bb (cont_bb);
4238 stmt = gsi_stmt (gsi);
4239 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4240 vmain = gimple_omp_continue_control_use (stmt);
4241 vback = gimple_omp_continue_control_def (stmt);
4243 if (POINTER_TYPE_P (type))
4244 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4245 fold_convert (sizetype, fd->loop.step));
4246 else
4247 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4248 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4249 true, GSI_SAME_STMT);
4250 stmt = gimple_build_assign (vback, t);
4251 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4253 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4254 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4256 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4257 gsi_remove (&gsi, true);
4259 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4260 gsi = gsi_last_bb (exit_bb);
4261 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4262 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4263 false, GSI_SAME_STMT);
4264 gsi_remove (&gsi, true);
4266 /* Connect all the blocks. */
4267 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4268 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4270 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4271 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4273 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4274 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4275 recompute_dominator (CDI_DOMINATORS, body_bb));
4276 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4277 recompute_dominator (CDI_DOMINATORS, fin_bb));
4281 /* A subroutine of expand_omp_for. Generate code for a parallel
4282 loop with static schedule and a specified chunk size. Given
4283 parameters:
4285 for (V = N1; V cond N2; V += STEP) BODY;
4287 where COND is "<" or ">", we generate pseudocode
4289 if (cond is <)
4290 adj = STEP - 1;
4291 else
4292 adj = STEP + 1;
4293 if ((__typeof (V)) -1 > 0 && cond is >)
4294 n = -(adj + N2 - N1) / -STEP;
4295 else
4296 n = (adj + N2 - N1) / STEP;
4297 trip = 0;
4298 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4299 here so that V is defined
4300 if the loop is not entered
4302 s0 = (trip * nthreads + threadid) * CHUNK;
4303 e0 = min(s0 + CHUNK, n);
4304 if (s0 < n) goto L1; else goto L4;
4306 V = s0 * STEP + N1;
4307 e = e0 * STEP + N1;
4309 BODY;
4310 V += STEP;
4311 if (V cond e) goto L2; else goto L3;
4313 trip += 1;
4314 goto L0;
4318 static void
4319 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4321 tree n, s0, e0, e, t;
4322 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4323 tree type, itype, v_main, v_back, v_extra;
4324 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4325 basic_block trip_update_bb, cont_bb, fin_bb;
4326 gimple_stmt_iterator si;
4327 gimple stmt;
4328 edge se;
4330 itype = type = TREE_TYPE (fd->loop.v);
4331 if (POINTER_TYPE_P (type))
4332 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4334 entry_bb = region->entry;
4335 se = split_block (entry_bb, last_stmt (entry_bb));
4336 entry_bb = se->src;
4337 iter_part_bb = se->dest;
4338 cont_bb = region->cont;
4339 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4340 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4341 == FALLTHRU_EDGE (cont_bb)->dest);
4342 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4343 body_bb = single_succ (seq_start_bb);
4344 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4345 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4346 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4347 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4348 exit_bb = region->exit;
4350 /* Trip and adjustment setup goes in ENTRY_BB. */
4351 si = gsi_last_bb (entry_bb);
4352 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4354 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4355 t = fold_convert (itype, t);
4356 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4357 true, GSI_SAME_STMT);
4359 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4360 t = fold_convert (itype, t);
4361 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4362 true, GSI_SAME_STMT);
4364 fd->loop.n1
4365 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4366 true, NULL_TREE, true, GSI_SAME_STMT);
4367 fd->loop.n2
4368 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4369 true, NULL_TREE, true, GSI_SAME_STMT);
4370 fd->loop.step
4371 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4372 true, NULL_TREE, true, GSI_SAME_STMT);
4373 fd->chunk_size
4374 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4375 true, NULL_TREE, true, GSI_SAME_STMT);
4377 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4378 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4379 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4380 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4381 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4382 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4383 fold_build1 (NEGATE_EXPR, itype, t),
4384 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4385 else
4386 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4387 t = fold_convert (itype, t);
4388 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4389 true, GSI_SAME_STMT);
4391 trip_var = create_tmp_var (itype, ".trip");
4392 if (gimple_in_ssa_p (cfun))
4394 add_referenced_var (trip_var);
4395 trip_init = make_ssa_name (trip_var, NULL);
4396 trip_main = make_ssa_name (trip_var, NULL);
4397 trip_back = make_ssa_name (trip_var, NULL);
4399 else
4401 trip_init = trip_var;
4402 trip_main = trip_var;
4403 trip_back = trip_var;
4406 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4407 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4409 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4410 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4411 if (POINTER_TYPE_P (type))
4412 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4413 fold_convert (sizetype, t));
4414 else
4415 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4416 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4417 true, GSI_SAME_STMT);
4419 /* Remove the GIMPLE_OMP_FOR. */
4420 gsi_remove (&si, true);
4422 /* Iteration space partitioning goes in ITER_PART_BB. */
4423 si = gsi_last_bb (iter_part_bb);
4425 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4426 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4427 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4428 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4429 false, GSI_CONTINUE_LINKING);
4431 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4432 t = fold_build2 (MIN_EXPR, itype, t, n);
4433 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4434 false, GSI_CONTINUE_LINKING);
4436 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4437 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4439 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4440 si = gsi_start_bb (seq_start_bb);
4442 t = fold_convert (itype, s0);
4443 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4444 if (POINTER_TYPE_P (type))
4445 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4446 fold_convert (sizetype, t));
4447 else
4448 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4449 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4450 false, GSI_CONTINUE_LINKING);
4451 stmt = gimple_build_assign (fd->loop.v, t);
4452 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4454 t = fold_convert (itype, e0);
4455 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4456 if (POINTER_TYPE_P (type))
4457 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4458 fold_convert (sizetype, t));
4459 else
4460 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4461 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4462 false, GSI_CONTINUE_LINKING);
4464 /* The code controlling the sequential loop goes in CONT_BB,
4465 replacing the GIMPLE_OMP_CONTINUE. */
4466 si = gsi_last_bb (cont_bb);
4467 stmt = gsi_stmt (si);
4468 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4469 v_main = gimple_omp_continue_control_use (stmt);
4470 v_back = gimple_omp_continue_control_def (stmt);
4472 if (POINTER_TYPE_P (type))
4473 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4474 fold_convert (sizetype, fd->loop.step));
4475 else
4476 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4477 stmt = gimple_build_assign (v_back, t);
4478 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4480 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4481 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4483 /* Remove GIMPLE_OMP_CONTINUE. */
4484 gsi_remove (&si, true);
4486 /* Trip update code goes into TRIP_UPDATE_BB. */
4487 si = gsi_start_bb (trip_update_bb);
4489 t = build_int_cst (itype, 1);
4490 t = build2 (PLUS_EXPR, itype, trip_main, t);
4491 stmt = gimple_build_assign (trip_back, t);
4492 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4494 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4495 si = gsi_last_bb (exit_bb);
4496 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4497 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4498 false, GSI_SAME_STMT);
4499 gsi_remove (&si, true);
4501 /* Connect the new blocks. */
4502 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4503 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4505 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4506 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4508 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4510 if (gimple_in_ssa_p (cfun))
4512 gimple_stmt_iterator psi;
4513 gimple phi;
4514 edge re, ene;
4515 edge_var_map_vector head;
4516 edge_var_map *vm;
4517 size_t i;
4519 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4520 remove arguments of the phi nodes in fin_bb. We need to create
4521 appropriate phi nodes in iter_part_bb instead. */
4522 se = single_pred_edge (fin_bb);
4523 re = single_succ_edge (trip_update_bb);
4524 head = redirect_edge_var_map_vector (re);
4525 ene = single_succ_edge (entry_bb);
4527 psi = gsi_start_phis (fin_bb);
4528 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4529 gsi_next (&psi), ++i)
4531 gimple nphi;
4532 source_location locus;
4534 phi = gsi_stmt (psi);
4535 t = gimple_phi_result (phi);
4536 gcc_assert (t == redirect_edge_var_map_result (vm));
4537 nphi = create_phi_node (t, iter_part_bb);
4538 SSA_NAME_DEF_STMT (t) = nphi;
4540 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4541 locus = gimple_phi_arg_location_from_edge (phi, se);
4543 /* A special case -- fd->loop.v is not yet computed in
4544 iter_part_bb, we need to use v_extra instead. */
4545 if (t == fd->loop.v)
4546 t = v_extra;
4547 add_phi_arg (nphi, t, ene, locus);
4548 locus = redirect_edge_var_map_location (vm);
4549 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4551 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4552 redirect_edge_var_map_clear (re);
4553 while (1)
4555 psi = gsi_start_phis (fin_bb);
4556 if (gsi_end_p (psi))
4557 break;
4558 remove_phi_node (&psi, false);
4561 /* Make phi node for trip. */
4562 phi = create_phi_node (trip_main, iter_part_bb);
4563 SSA_NAME_DEF_STMT (trip_main) = phi;
4564 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4565 UNKNOWN_LOCATION);
4566 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4567 UNKNOWN_LOCATION);
4570 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4571 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4572 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4573 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4574 recompute_dominator (CDI_DOMINATORS, fin_bb));
4575 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4576 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4577 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4578 recompute_dominator (CDI_DOMINATORS, body_bb));
4582 /* Expand the OpenMP loop defined by REGION. */
4584 static void
4585 expand_omp_for (struct omp_region *region)
4587 struct omp_for_data fd;
4588 struct omp_for_data_loop *loops;
4590 loops
4591 = (struct omp_for_data_loop *)
4592 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4593 * sizeof (struct omp_for_data_loop));
4594 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4595 region->sched_kind = fd.sched_kind;
4597 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4598 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4599 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4600 if (region->cont)
4602 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4603 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4604 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4607 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4608 && !fd.have_ordered
4609 && fd.collapse == 1
4610 && region->cont != NULL)
4612 if (fd.chunk_size == NULL)
4613 expand_omp_for_static_nochunk (region, &fd);
4614 else
4615 expand_omp_for_static_chunk (region, &fd);
4617 else
4619 int fn_index, start_ix, next_ix;
4621 if (fd.chunk_size == NULL
4622 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4623 fd.chunk_size = integer_zero_node;
4624 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4625 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4626 ? 3 : fd.sched_kind;
4627 fn_index += fd.have_ordered * 4;
4628 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4629 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4630 if (fd.iter_type == long_long_unsigned_type_node)
4632 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4633 - BUILT_IN_GOMP_LOOP_STATIC_START;
4634 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4635 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4637 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4638 (enum built_in_function) next_ix);
4641 update_ssa (TODO_update_ssa_only_virtuals);
4645 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4647 v = GOMP_sections_start (n);
4649 switch (v)
4651 case 0:
4652 goto L2;
4653 case 1:
4654 section 1;
4655 goto L1;
4656 case 2:
4658 case n:
4660 default:
4661 abort ();
4664 v = GOMP_sections_next ();
4665 goto L0;
4667 reduction;
4669 If this is a combined parallel sections, replace the call to
4670 GOMP_sections_start with call to GOMP_sections_next. */
4672 static void
4673 expand_omp_sections (struct omp_region *region)
4675 tree t, u, vin = NULL, vmain, vnext, l2;
4676 VEC (tree,heap) *label_vec;
4677 unsigned len;
4678 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4679 gimple_stmt_iterator si, switch_si;
4680 gimple sections_stmt, stmt, cont;
4681 edge_iterator ei;
4682 edge e;
4683 struct omp_region *inner;
4684 unsigned i, casei;
4685 bool exit_reachable = region->cont != NULL;
4687 gcc_assert (exit_reachable == (region->exit != NULL));
4688 entry_bb = region->entry;
4689 l0_bb = single_succ (entry_bb);
4690 l1_bb = region->cont;
4691 l2_bb = region->exit;
4692 if (exit_reachable)
4694 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4695 l2 = gimple_block_label (l2_bb);
4696 else
4698 /* This can happen if there are reductions. */
4699 len = EDGE_COUNT (l0_bb->succs);
4700 gcc_assert (len > 0);
4701 e = EDGE_SUCC (l0_bb, len - 1);
4702 si = gsi_last_bb (e->dest);
4703 l2 = NULL_TREE;
4704 if (gsi_end_p (si)
4705 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4706 l2 = gimple_block_label (e->dest);
4707 else
4708 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4710 si = gsi_last_bb (e->dest);
4711 if (gsi_end_p (si)
4712 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4714 l2 = gimple_block_label (e->dest);
4715 break;
4719 default_bb = create_empty_bb (l1_bb->prev_bb);
4721 else
4723 default_bb = create_empty_bb (l0_bb);
4724 l2 = gimple_block_label (default_bb);
4727 /* We will build a switch() with enough cases for all the
4728 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4729 and a default case to abort if something goes wrong. */
4730 len = EDGE_COUNT (l0_bb->succs);
4732 /* Use VEC_quick_push on label_vec throughout, since we know the size
4733 in advance. */
4734 label_vec = VEC_alloc (tree, heap, len);
4736 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4737 GIMPLE_OMP_SECTIONS statement. */
4738 si = gsi_last_bb (entry_bb);
4739 sections_stmt = gsi_stmt (si);
4740 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4741 vin = gimple_omp_sections_control (sections_stmt);
4742 if (!is_combined_parallel (region))
4744 /* If we are not inside a combined parallel+sections region,
4745 call GOMP_sections_start. */
4746 t = build_int_cst (unsigned_type_node,
4747 exit_reachable ? len - 1 : len);
4748 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4749 stmt = gimple_build_call (u, 1, t);
4751 else
4753 /* Otherwise, call GOMP_sections_next. */
4754 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4755 stmt = gimple_build_call (u, 0);
4757 gimple_call_set_lhs (stmt, vin);
4758 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4759 gsi_remove (&si, true);
4761 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4762 L0_BB. */
4763 switch_si = gsi_last_bb (l0_bb);
4764 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4765 if (exit_reachable)
4767 cont = last_stmt (l1_bb);
4768 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4769 vmain = gimple_omp_continue_control_use (cont);
4770 vnext = gimple_omp_continue_control_def (cont);
4772 else
4774 vmain = vin;
4775 vnext = NULL_TREE;
4778 i = 0;
4779 if (exit_reachable)
4781 t = build3 (CASE_LABEL_EXPR, void_type_node,
4782 build_int_cst (unsigned_type_node, 0), NULL, l2);
4783 VEC_quick_push (tree, label_vec, t);
4784 i++;
4787 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4788 for (inner = region->inner, casei = 1;
4789 inner;
4790 inner = inner->next, i++, casei++)
4792 basic_block s_entry_bb, s_exit_bb;
4794 /* Skip optional reduction region. */
4795 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4797 --i;
4798 --casei;
4799 continue;
4802 s_entry_bb = inner->entry;
4803 s_exit_bb = inner->exit;
4805 t = gimple_block_label (s_entry_bb);
4806 u = build_int_cst (unsigned_type_node, casei);
4807 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4808 VEC_quick_push (tree, label_vec, u);
4810 si = gsi_last_bb (s_entry_bb);
4811 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4812 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4813 gsi_remove (&si, true);
4814 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4816 if (s_exit_bb == NULL)
4817 continue;
4819 si = gsi_last_bb (s_exit_bb);
4820 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4821 gsi_remove (&si, true);
4823 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4826 /* Error handling code goes in DEFAULT_BB. */
4827 t = gimple_block_label (default_bb);
4828 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4829 make_edge (l0_bb, default_bb, 0);
4831 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4832 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4833 gsi_remove (&switch_si, true);
4834 VEC_free (tree, heap, label_vec);
4836 si = gsi_start_bb (default_bb);
4837 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4838 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4840 if (exit_reachable)
4842 /* Code to get the next section goes in L1_BB. */
4843 si = gsi_last_bb (l1_bb);
4844 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4846 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4847 gimple_call_set_lhs (stmt, vnext);
4848 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4849 gsi_remove (&si, true);
4851 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4853 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4854 si = gsi_last_bb (l2_bb);
4855 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4856 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4857 else
4858 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4859 stmt = gimple_build_call (t, 0);
4860 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4861 gsi_remove (&si, true);
4864 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4868 /* Expand code for an OpenMP single directive. We've already expanded
4869 much of the code, here we simply place the GOMP_barrier call. */
4871 static void
4872 expand_omp_single (struct omp_region *region)
4874 basic_block entry_bb, exit_bb;
4875 gimple_stmt_iterator si;
4876 bool need_barrier = false;
4878 entry_bb = region->entry;
4879 exit_bb = region->exit;
4881 si = gsi_last_bb (entry_bb);
4882 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4883 be removed. We need to ensure that the thread that entered the single
4884 does not exit before the data is copied out by the other threads. */
4885 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4886 OMP_CLAUSE_COPYPRIVATE))
4887 need_barrier = true;
4888 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4889 gsi_remove (&si, true);
4890 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4892 si = gsi_last_bb (exit_bb);
4893 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4894 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4895 false, GSI_SAME_STMT);
4896 gsi_remove (&si, true);
4897 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4901 /* Generic expansion for OpenMP synchronization directives: master,
4902 ordered and critical. All we need to do here is remove the entry
4903 and exit markers for REGION. */
4905 static void
4906 expand_omp_synch (struct omp_region *region)
4908 basic_block entry_bb, exit_bb;
4909 gimple_stmt_iterator si;
4911 entry_bb = region->entry;
4912 exit_bb = region->exit;
4914 si = gsi_last_bb (entry_bb);
4915 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4916 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4917 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4918 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4919 gsi_remove (&si, true);
4920 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4922 if (exit_bb)
4924 si = gsi_last_bb (exit_bb);
4925 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4926 gsi_remove (&si, true);
4927 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4931 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4932 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4933 size of the data type, and thus usable to find the index of the builtin
4934 decl. Returns false if the expression is not of the proper form. */
4936 static bool
4937 expand_omp_atomic_fetch_op (basic_block load_bb,
4938 tree addr, tree loaded_val,
4939 tree stored_val, int index)
4941 enum built_in_function base;
4942 tree decl, itype, call;
4943 direct_optab optab;
4944 tree rhs;
4945 basic_block store_bb = single_succ (load_bb);
4946 gimple_stmt_iterator gsi;
4947 gimple stmt;
4948 location_t loc;
4950 /* We expect to find the following sequences:
4952 load_bb:
4953 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4955 store_bb:
4956 val = tmp OP something; (or: something OP tmp)
4957 GIMPLE_OMP_STORE (val)
4959 ???FIXME: Allow a more flexible sequence.
4960 Perhaps use data flow to pick the statements.
4964 gsi = gsi_after_labels (store_bb);
4965 stmt = gsi_stmt (gsi);
4966 loc = gimple_location (stmt);
4967 if (!is_gimple_assign (stmt))
4968 return false;
4969 gsi_next (&gsi);
4970 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4971 return false;
4973 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4974 return false;
4976 /* Check for one of the supported fetch-op operations. */
4977 switch (gimple_assign_rhs_code (stmt))
4979 case PLUS_EXPR:
4980 case POINTER_PLUS_EXPR:
4981 base = BUILT_IN_FETCH_AND_ADD_N;
4982 optab = sync_add_optab;
4983 break;
4984 case MINUS_EXPR:
4985 base = BUILT_IN_FETCH_AND_SUB_N;
4986 optab = sync_add_optab;
4987 break;
4988 case BIT_AND_EXPR:
4989 base = BUILT_IN_FETCH_AND_AND_N;
4990 optab = sync_and_optab;
4991 break;
4992 case BIT_IOR_EXPR:
4993 base = BUILT_IN_FETCH_AND_OR_N;
4994 optab = sync_ior_optab;
4995 break;
4996 case BIT_XOR_EXPR:
4997 base = BUILT_IN_FETCH_AND_XOR_N;
4998 optab = sync_xor_optab;
4999 break;
5000 default:
5001 return false;
5003 /* Make sure the expression is of the proper form. */
5004 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5005 rhs = gimple_assign_rhs2 (stmt);
5006 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5007 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5008 rhs = gimple_assign_rhs1 (stmt);
5009 else
5010 return false;
5012 decl = built_in_decls[base + index + 1];
5013 if (decl == NULL_TREE)
5014 return false;
5015 itype = TREE_TYPE (TREE_TYPE (decl));
5017 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5018 return false;
5020 gsi = gsi_last_bb (load_bb);
5021 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5022 call = build_call_expr_loc (loc,
5023 decl, 2, addr,
5024 fold_convert_loc (loc, itype, rhs));
5025 call = fold_convert_loc (loc, void_type_node, call);
5026 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5027 gsi_remove (&gsi, true);
5029 gsi = gsi_last_bb (store_bb);
5030 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5031 gsi_remove (&gsi, true);
5032 gsi = gsi_last_bb (store_bb);
5033 gsi_remove (&gsi, true);
5035 if (gimple_in_ssa_p (cfun))
5036 update_ssa (TODO_update_ssa_no_phi);
5038 return true;
5041 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5043 oldval = *addr;
5044 repeat:
5045 newval = rhs; // with oldval replacing *addr in rhs
5046 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5047 if (oldval != newval)
5048 goto repeat;
5050 INDEX is log2 of the size of the data type, and thus usable to find the
5051 index of the builtin decl. */
5053 static bool
5054 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5055 tree addr, tree loaded_val, tree stored_val,
5056 int index)
5058 tree loadedi, storedi, initial, new_storedi, old_vali;
5059 tree type, itype, cmpxchg, iaddr;
5060 gimple_stmt_iterator si;
5061 basic_block loop_header = single_succ (load_bb);
5062 gimple phi, stmt;
5063 edge e;
5065 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5066 if (cmpxchg == NULL_TREE)
5067 return false;
5068 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5069 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5071 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5072 == CODE_FOR_nothing)
5073 return false;
5075 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5076 si = gsi_last_bb (load_bb);
5077 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5079 /* For floating-point values, we'll need to view-convert them to integers
5080 so that we can perform the atomic compare and swap. Simplify the
5081 following code by always setting up the "i"ntegral variables. */
5082 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5084 tree iaddr_val;
5086 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5087 true), NULL);
5088 iaddr_val
5089 = force_gimple_operand_gsi (&si,
5090 fold_convert (TREE_TYPE (iaddr), addr),
5091 false, NULL_TREE, true, GSI_SAME_STMT);
5092 stmt = gimple_build_assign (iaddr, iaddr_val);
5093 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5094 loadedi = create_tmp_var (itype, NULL);
5095 if (gimple_in_ssa_p (cfun))
5097 add_referenced_var (iaddr);
5098 add_referenced_var (loadedi);
5099 loadedi = make_ssa_name (loadedi, NULL);
5102 else
5104 iaddr = addr;
5105 loadedi = loaded_val;
5108 initial
5109 = force_gimple_operand_gsi (&si,
5110 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5111 iaddr,
5112 build_int_cst (TREE_TYPE (iaddr), 0)),
5113 true, NULL_TREE, true, GSI_SAME_STMT);
5115 /* Move the value to the LOADEDI temporary. */
5116 if (gimple_in_ssa_p (cfun))
5118 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5119 phi = create_phi_node (loadedi, loop_header);
5120 SSA_NAME_DEF_STMT (loadedi) = phi;
5121 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5122 initial);
5124 else
5125 gsi_insert_before (&si,
5126 gimple_build_assign (loadedi, initial),
5127 GSI_SAME_STMT);
5128 if (loadedi != loaded_val)
5130 gimple_stmt_iterator gsi2;
5131 tree x;
5133 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5134 gsi2 = gsi_start_bb (loop_header);
5135 if (gimple_in_ssa_p (cfun))
5137 gimple stmt;
5138 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5139 true, GSI_SAME_STMT);
5140 stmt = gimple_build_assign (loaded_val, x);
5141 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5143 else
5145 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5146 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5147 true, GSI_SAME_STMT);
5150 gsi_remove (&si, true);
5152 si = gsi_last_bb (store_bb);
5153 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5155 if (iaddr == addr)
5156 storedi = stored_val;
5157 else
5158 storedi =
5159 force_gimple_operand_gsi (&si,
5160 build1 (VIEW_CONVERT_EXPR, itype,
5161 stored_val), true, NULL_TREE, true,
5162 GSI_SAME_STMT);
5164 /* Build the compare&swap statement. */
5165 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5166 new_storedi = force_gimple_operand_gsi (&si,
5167 fold_convert (TREE_TYPE (loadedi),
5168 new_storedi),
5169 true, NULL_TREE,
5170 true, GSI_SAME_STMT);
5172 if (gimple_in_ssa_p (cfun))
5173 old_vali = loadedi;
5174 else
5176 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5177 if (gimple_in_ssa_p (cfun))
5178 add_referenced_var (old_vali);
5179 stmt = gimple_build_assign (old_vali, loadedi);
5180 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5182 stmt = gimple_build_assign (loadedi, new_storedi);
5183 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5186 /* Note that we always perform the comparison as an integer, even for
5187 floating point. This allows the atomic operation to properly
5188 succeed even with NaNs and -0.0. */
5189 stmt = gimple_build_cond_empty
5190 (build2 (NE_EXPR, boolean_type_node,
5191 new_storedi, old_vali));
5192 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5194 /* Update cfg. */
5195 e = single_succ_edge (store_bb);
5196 e->flags &= ~EDGE_FALLTHRU;
5197 e->flags |= EDGE_FALSE_VALUE;
5199 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5201 /* Copy the new value to loadedi (we already did that before the condition
5202 if we are not in SSA). */
5203 if (gimple_in_ssa_p (cfun))
5205 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5206 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5209 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5210 gsi_remove (&si, true);
5212 if (gimple_in_ssa_p (cfun))
5213 update_ssa (TODO_update_ssa_no_phi);
5215 return true;
5218 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5220 GOMP_atomic_start ();
5221 *addr = rhs;
5222 GOMP_atomic_end ();
5224 The result is not globally atomic, but works so long as all parallel
5225 references are within #pragma omp atomic directives. According to
5226 responses received from omp@openmp.org, appears to be within spec.
5227 Which makes sense, since that's how several other compilers handle
5228 this situation as well.
5229 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5230 expanding. STORED_VAL is the operand of the matching
5231 GIMPLE_OMP_ATOMIC_STORE.
5233 We replace
5234 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5235 loaded_val = *addr;
5237 and replace
5238 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5239 *addr = stored_val;
5242 static bool
5243 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5244 tree addr, tree loaded_val, tree stored_val)
5246 gimple_stmt_iterator si;
5247 gimple stmt;
5248 tree t;
5250 si = gsi_last_bb (load_bb);
5251 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5253 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5254 t = build_call_expr (t, 0);
5255 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5257 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5258 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5259 gsi_remove (&si, true);
5261 si = gsi_last_bb (store_bb);
5262 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5264 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5265 stored_val);
5266 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5268 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5269 t = build_call_expr (t, 0);
5270 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5271 gsi_remove (&si, true);
5273 if (gimple_in_ssa_p (cfun))
5274 update_ssa (TODO_update_ssa_no_phi);
5275 return true;
5278 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5279 using expand_omp_atomic_fetch_op. If it failed, we try to
5280 call expand_omp_atomic_pipeline, and if it fails too, the
5281 ultimate fallback is wrapping the operation in a mutex
5282 (expand_omp_atomic_mutex). REGION is the atomic region built
5283 by build_omp_regions_1(). */
5285 static void
5286 expand_omp_atomic (struct omp_region *region)
5288 basic_block load_bb = region->entry, store_bb = region->exit;
5289 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5290 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5291 tree addr = gimple_omp_atomic_load_rhs (load);
5292 tree stored_val = gimple_omp_atomic_store_val (store);
5293 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5294 HOST_WIDE_INT index;
5296 /* Make sure the type is one of the supported sizes. */
5297 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5298 index = exact_log2 (index);
5299 if (index >= 0 && index <= 4)
5301 unsigned int align = TYPE_ALIGN_UNIT (type);
5303 /* __sync builtins require strict data alignment. */
5304 if (exact_log2 (align) >= index)
5306 /* When possible, use specialized atomic update functions. */
5307 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5308 && store_bb == single_succ (load_bb))
5310 if (expand_omp_atomic_fetch_op (load_bb, addr,
5311 loaded_val, stored_val, index))
5312 return;
5315 /* If we don't have specialized __sync builtins, try and implement
5316 as a compare and swap loop. */
5317 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5318 loaded_val, stored_val, index))
5319 return;
5323 /* The ultimate fallback is wrapping the operation in a mutex. */
5324 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5328 /* Expand the parallel region tree rooted at REGION. Expansion
5329 proceeds in depth-first order. Innermost regions are expanded
5330 first. This way, parallel regions that require a new function to
5331 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5332 internal dependencies in their body. */
5334 static void
5335 expand_omp (struct omp_region *region)
5337 while (region)
5339 location_t saved_location;
5341 /* First, determine whether this is a combined parallel+workshare
5342 region. */
5343 if (region->type == GIMPLE_OMP_PARALLEL)
5344 determine_parallel_type (region);
5346 if (region->inner)
5347 expand_omp (region->inner);
5349 saved_location = input_location;
5350 if (gimple_has_location (last_stmt (region->entry)))
5351 input_location = gimple_location (last_stmt (region->entry));
5353 switch (region->type)
5355 case GIMPLE_OMP_PARALLEL:
5356 case GIMPLE_OMP_TASK:
5357 expand_omp_taskreg (region);
5358 break;
5360 case GIMPLE_OMP_FOR:
5361 expand_omp_for (region);
5362 break;
5364 case GIMPLE_OMP_SECTIONS:
5365 expand_omp_sections (region);
5366 break;
5368 case GIMPLE_OMP_SECTION:
5369 /* Individual omp sections are handled together with their
5370 parent GIMPLE_OMP_SECTIONS region. */
5371 break;
5373 case GIMPLE_OMP_SINGLE:
5374 expand_omp_single (region);
5375 break;
5377 case GIMPLE_OMP_MASTER:
5378 case GIMPLE_OMP_ORDERED:
5379 case GIMPLE_OMP_CRITICAL:
5380 expand_omp_synch (region);
5381 break;
5383 case GIMPLE_OMP_ATOMIC_LOAD:
5384 expand_omp_atomic (region);
5385 break;
5387 default:
5388 gcc_unreachable ();
5391 input_location = saved_location;
5392 region = region->next;
5397 /* Helper for build_omp_regions. Scan the dominator tree starting at
5398 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5399 true, the function ends once a single tree is built (otherwise, whole
5400 forest of OMP constructs may be built). */
5402 static void
5403 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5404 bool single_tree)
5406 gimple_stmt_iterator gsi;
5407 gimple stmt;
5408 basic_block son;
5410 gsi = gsi_last_bb (bb);
5411 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5413 struct omp_region *region;
5414 enum gimple_code code;
5416 stmt = gsi_stmt (gsi);
5417 code = gimple_code (stmt);
5418 if (code == GIMPLE_OMP_RETURN)
5420 /* STMT is the return point out of region PARENT. Mark it
5421 as the exit point and make PARENT the immediately
5422 enclosing region. */
5423 gcc_assert (parent);
5424 region = parent;
5425 region->exit = bb;
5426 parent = parent->outer;
5428 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5430 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5431 GIMPLE_OMP_RETURN, but matches with
5432 GIMPLE_OMP_ATOMIC_LOAD. */
5433 gcc_assert (parent);
5434 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5435 region = parent;
5436 region->exit = bb;
5437 parent = parent->outer;
5440 else if (code == GIMPLE_OMP_CONTINUE)
5442 gcc_assert (parent);
5443 parent->cont = bb;
5445 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5447 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5448 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5451 else
5453 /* Otherwise, this directive becomes the parent for a new
5454 region. */
5455 region = new_omp_region (bb, code, parent);
5456 parent = region;
5460 if (single_tree && !parent)
5461 return;
5463 for (son = first_dom_son (CDI_DOMINATORS, bb);
5464 son;
5465 son = next_dom_son (CDI_DOMINATORS, son))
5466 build_omp_regions_1 (son, parent, single_tree);
5469 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5470 root_omp_region. */
5472 static void
5473 build_omp_regions_root (basic_block root)
5475 gcc_assert (root_omp_region == NULL);
5476 build_omp_regions_1 (root, NULL, true);
5477 gcc_assert (root_omp_region != NULL);
5480 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5482 void
5483 omp_expand_local (basic_block head)
5485 build_omp_regions_root (head);
5486 if (dump_file && (dump_flags & TDF_DETAILS))
5488 fprintf (dump_file, "\nOMP region tree\n\n");
5489 dump_omp_region (dump_file, root_omp_region, 0);
5490 fprintf (dump_file, "\n");
5493 remove_exit_barriers (root_omp_region);
5494 expand_omp (root_omp_region);
5496 free_omp_regions ();
5499 /* Scan the CFG and build a tree of OMP regions. Return the root of
5500 the OMP region tree. */
5502 static void
5503 build_omp_regions (void)
5505 gcc_assert (root_omp_region == NULL);
5506 calculate_dominance_info (CDI_DOMINATORS);
5507 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5510 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5512 static unsigned int
5513 execute_expand_omp (void)
5515 build_omp_regions ();
5517 if (!root_omp_region)
5518 return 0;
5520 if (dump_file)
5522 fprintf (dump_file, "\nOMP region tree\n\n");
5523 dump_omp_region (dump_file, root_omp_region, 0);
5524 fprintf (dump_file, "\n");
5527 remove_exit_barriers (root_omp_region);
5529 expand_omp (root_omp_region);
5531 cleanup_tree_cfg ();
5533 free_omp_regions ();
5535 return 0;
5538 /* OMP expansion -- the default pass, run before creation of SSA form. */
5540 static bool
5541 gate_expand_omp (void)
5543 return (flag_openmp != 0 && !seen_error ());
5546 struct gimple_opt_pass pass_expand_omp =
5549 GIMPLE_PASS,
5550 "ompexp", /* name */
5551 gate_expand_omp, /* gate */
5552 execute_expand_omp, /* execute */
5553 NULL, /* sub */
5554 NULL, /* next */
5555 0, /* static_pass_number */
5556 TV_NONE, /* tv_id */
5557 PROP_gimple_any, /* properties_required */
5558 0, /* properties_provided */
5559 0, /* properties_destroyed */
5560 0, /* todo_flags_start */
5561 TODO_dump_func /* todo_flags_finish */
5565 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5567 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5568 CTX is the enclosing OMP context for the current statement. */
5570 static void
5571 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5573 tree block, control;
5574 gimple_stmt_iterator tgsi;
5575 unsigned i, len;
5576 gimple stmt, new_stmt, bind, t;
5577 gimple_seq ilist, dlist, olist, new_body, body;
5578 struct gimplify_ctx gctx;
5580 stmt = gsi_stmt (*gsi_p);
5582 push_gimplify_context (&gctx);
5584 dlist = NULL;
5585 ilist = NULL;
5586 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5587 &ilist, &dlist, ctx);
5589 tgsi = gsi_start (gimple_omp_body (stmt));
5590 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5591 continue;
5593 tgsi = gsi_start (gimple_omp_body (stmt));
5594 body = NULL;
5595 for (i = 0; i < len; i++, gsi_next (&tgsi))
5597 omp_context *sctx;
5598 gimple sec_start;
5600 sec_start = gsi_stmt (tgsi);
5601 sctx = maybe_lookup_ctx (sec_start);
5602 gcc_assert (sctx);
5604 gimple_seq_add_stmt (&body, sec_start);
5606 lower_omp (gimple_omp_body (sec_start), sctx);
5607 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5608 gimple_omp_set_body (sec_start, NULL);
5610 if (i == len - 1)
5612 gimple_seq l = NULL;
5613 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5614 &l, ctx);
5615 gimple_seq_add_seq (&body, l);
5616 gimple_omp_section_set_last (sec_start);
5619 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5622 block = make_node (BLOCK);
5623 bind = gimple_build_bind (NULL, body, block);
5625 olist = NULL;
5626 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5628 block = make_node (BLOCK);
5629 new_stmt = gimple_build_bind (NULL, NULL, block);
5631 pop_gimplify_context (new_stmt);
5632 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5633 BLOCK_VARS (block) = gimple_bind_vars (bind);
5634 if (BLOCK_VARS (block))
5635 TREE_USED (block) = 1;
5637 new_body = NULL;
5638 gimple_seq_add_seq (&new_body, ilist);
5639 gimple_seq_add_stmt (&new_body, stmt);
5640 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5641 gimple_seq_add_stmt (&new_body, bind);
5643 control = create_tmp_var (unsigned_type_node, ".section");
5644 t = gimple_build_omp_continue (control, control);
5645 gimple_omp_sections_set_control (stmt, control);
5646 gimple_seq_add_stmt (&new_body, t);
5648 gimple_seq_add_seq (&new_body, olist);
5649 gimple_seq_add_seq (&new_body, dlist);
5651 new_body = maybe_catch_exception (new_body);
5653 t = gimple_build_omp_return
5654 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5655 OMP_CLAUSE_NOWAIT));
5656 gimple_seq_add_stmt (&new_body, t);
5658 gimple_bind_set_body (new_stmt, new_body);
5659 gimple_omp_set_body (stmt, NULL);
5661 gsi_replace (gsi_p, new_stmt, true);
5665 /* A subroutine of lower_omp_single. Expand the simple form of
5666 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5668 if (GOMP_single_start ())
5669 BODY;
5670 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5672 FIXME. It may be better to delay expanding the logic of this until
5673 pass_expand_omp. The expanded logic may make the job more difficult
5674 to a synchronization analysis pass. */
5676 static void
5677 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5679 location_t loc = gimple_location (single_stmt);
5680 tree tlabel = create_artificial_label (loc);
5681 tree flabel = create_artificial_label (loc);
5682 gimple call, cond;
5683 tree lhs, decl;
5685 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5686 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5687 call = gimple_build_call (decl, 0);
5688 gimple_call_set_lhs (call, lhs);
5689 gimple_seq_add_stmt (pre_p, call);
5691 cond = gimple_build_cond (EQ_EXPR, lhs,
5692 fold_convert_loc (loc, TREE_TYPE (lhs),
5693 boolean_true_node),
5694 tlabel, flabel);
5695 gimple_seq_add_stmt (pre_p, cond);
5696 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5697 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5698 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5702 /* A subroutine of lower_omp_single. Expand the simple form of
5703 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5705 #pragma omp single copyprivate (a, b, c)
5707 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5710 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5712 BODY;
5713 copyout.a = a;
5714 copyout.b = b;
5715 copyout.c = c;
5716 GOMP_single_copy_end (&copyout);
5718 else
5720 a = copyout_p->a;
5721 b = copyout_p->b;
5722 c = copyout_p->c;
5724 GOMP_barrier ();
5727 FIXME. It may be better to delay expanding the logic of this until
5728 pass_expand_omp. The expanded logic may make the job more difficult
5729 to a synchronization analysis pass. */
5731 static void
5732 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5734 tree ptr_type, t, l0, l1, l2;
5735 gimple_seq copyin_seq;
5736 location_t loc = gimple_location (single_stmt);
5738 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5740 ptr_type = build_pointer_type (ctx->record_type);
5741 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5743 l0 = create_artificial_label (loc);
5744 l1 = create_artificial_label (loc);
5745 l2 = create_artificial_label (loc);
5747 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5748 t = fold_convert_loc (loc, ptr_type, t);
5749 gimplify_assign (ctx->receiver_decl, t, pre_p);
5751 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5752 build_int_cst (ptr_type, 0));
5753 t = build3 (COND_EXPR, void_type_node, t,
5754 build_and_jump (&l0), build_and_jump (&l1));
5755 gimplify_and_add (t, pre_p);
5757 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5759 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5761 copyin_seq = NULL;
5762 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5763 &copyin_seq, ctx);
5765 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5766 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5767 1, t);
5768 gimplify_and_add (t, pre_p);
5770 t = build_and_jump (&l2);
5771 gimplify_and_add (t, pre_p);
5773 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5775 gimple_seq_add_seq (pre_p, copyin_seq);
5777 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5781 /* Expand code for an OpenMP single directive. */
5783 static void
5784 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5786 tree block;
5787 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5788 gimple_seq bind_body, dlist;
5789 struct gimplify_ctx gctx;
5791 push_gimplify_context (&gctx);
5793 bind_body = NULL;
5794 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5795 &bind_body, &dlist, ctx);
5796 lower_omp (gimple_omp_body (single_stmt), ctx);
5798 gimple_seq_add_stmt (&bind_body, single_stmt);
5800 if (ctx->record_type)
5801 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5802 else
5803 lower_omp_single_simple (single_stmt, &bind_body);
5805 gimple_omp_set_body (single_stmt, NULL);
5807 gimple_seq_add_seq (&bind_body, dlist);
5809 bind_body = maybe_catch_exception (bind_body);
5811 t = gimple_build_omp_return
5812 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5813 OMP_CLAUSE_NOWAIT));
5814 gimple_seq_add_stmt (&bind_body, t);
5816 block = make_node (BLOCK);
5817 bind = gimple_build_bind (NULL, bind_body, block);
5819 pop_gimplify_context (bind);
5821 gimple_bind_append_vars (bind, ctx->block_vars);
5822 BLOCK_VARS (block) = ctx->block_vars;
5823 gsi_replace (gsi_p, bind, true);
5824 if (BLOCK_VARS (block))
5825 TREE_USED (block) = 1;
5829 /* Expand code for an OpenMP master directive. */
5831 static void
5832 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5834 tree block, lab = NULL, x;
5835 gimple stmt = gsi_stmt (*gsi_p), bind;
5836 location_t loc = gimple_location (stmt);
5837 gimple_seq tseq;
5838 struct gimplify_ctx gctx;
5840 push_gimplify_context (&gctx);
5842 block = make_node (BLOCK);
5843 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5844 block);
5846 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5847 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5848 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5849 tseq = NULL;
5850 gimplify_and_add (x, &tseq);
5851 gimple_bind_add_seq (bind, tseq);
5853 lower_omp (gimple_omp_body (stmt), ctx);
5854 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5855 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5856 gimple_omp_set_body (stmt, NULL);
5858 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5860 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5862 pop_gimplify_context (bind);
5864 gimple_bind_append_vars (bind, ctx->block_vars);
5865 BLOCK_VARS (block) = ctx->block_vars;
5866 gsi_replace (gsi_p, bind, true);
5870 /* Expand code for an OpenMP ordered directive. */
5872 static void
5873 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5875 tree block;
5876 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5877 struct gimplify_ctx gctx;
5879 push_gimplify_context (&gctx);
5881 block = make_node (BLOCK);
5882 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5883 block);
5885 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5886 gimple_bind_add_stmt (bind, x);
5888 lower_omp (gimple_omp_body (stmt), ctx);
5889 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5890 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5891 gimple_omp_set_body (stmt, NULL);
5893 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5894 gimple_bind_add_stmt (bind, x);
5896 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5898 pop_gimplify_context (bind);
5900 gimple_bind_append_vars (bind, ctx->block_vars);
5901 BLOCK_VARS (block) = gimple_bind_vars (bind);
5902 gsi_replace (gsi_p, bind, true);
5906 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5907 substitution of a couple of function calls. But in the NAMED case,
5908 requires that languages coordinate a symbol name. It is therefore
5909 best put here in common code. */
5911 static GTY((param1_is (tree), param2_is (tree)))
5912 splay_tree critical_name_mutexes;
5914 static void
5915 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5917 tree block;
5918 tree name, lock, unlock;
5919 gimple stmt = gsi_stmt (*gsi_p), bind;
5920 location_t loc = gimple_location (stmt);
5921 gimple_seq tbody;
5922 struct gimplify_ctx gctx;
5924 name = gimple_omp_critical_name (stmt);
5925 if (name)
5927 tree decl;
5928 splay_tree_node n;
5930 if (!critical_name_mutexes)
5931 critical_name_mutexes
5932 = splay_tree_new_ggc (splay_tree_compare_pointers,
5933 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5934 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5936 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5937 if (n == NULL)
5939 char *new_str;
5941 decl = create_tmp_var_raw (ptr_type_node, NULL);
5943 new_str = ACONCAT ((".gomp_critical_user_",
5944 IDENTIFIER_POINTER (name), NULL));
5945 DECL_NAME (decl) = get_identifier (new_str);
5946 TREE_PUBLIC (decl) = 1;
5947 TREE_STATIC (decl) = 1;
5948 DECL_COMMON (decl) = 1;
5949 DECL_ARTIFICIAL (decl) = 1;
5950 DECL_IGNORED_P (decl) = 1;
5951 varpool_finalize_decl (decl);
5953 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5954 (splay_tree_value) decl);
5956 else
5957 decl = (tree) n->value;
5959 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5960 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5962 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5963 unlock = build_call_expr_loc (loc, unlock, 1,
5964 build_fold_addr_expr_loc (loc, decl));
5966 else
5968 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5969 lock = build_call_expr_loc (loc, lock, 0);
5971 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5972 unlock = build_call_expr_loc (loc, unlock, 0);
5975 push_gimplify_context (&gctx);
5977 block = make_node (BLOCK);
5978 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5980 tbody = gimple_bind_body (bind);
5981 gimplify_and_add (lock, &tbody);
5982 gimple_bind_set_body (bind, tbody);
5984 lower_omp (gimple_omp_body (stmt), ctx);
5985 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5986 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5987 gimple_omp_set_body (stmt, NULL);
5989 tbody = gimple_bind_body (bind);
5990 gimplify_and_add (unlock, &tbody);
5991 gimple_bind_set_body (bind, tbody);
5993 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5995 pop_gimplify_context (bind);
5996 gimple_bind_append_vars (bind, ctx->block_vars);
5997 BLOCK_VARS (block) = gimple_bind_vars (bind);
5998 gsi_replace (gsi_p, bind, true);
6002 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6003 for a lastprivate clause. Given a loop control predicate of (V
6004 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6005 is appended to *DLIST, iterator initialization is appended to
6006 *BODY_P. */
6008 static void
6009 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6010 gimple_seq *dlist, struct omp_context *ctx)
6012 tree clauses, cond, vinit;
6013 enum tree_code cond_code;
6014 gimple_seq stmts;
6016 cond_code = fd->loop.cond_code;
6017 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6019 /* When possible, use a strict equality expression. This can let VRP
6020 type optimizations deduce the value and remove a copy. */
6021 if (host_integerp (fd->loop.step, 0))
6023 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6024 if (step == 1 || step == -1)
6025 cond_code = EQ_EXPR;
6028 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6030 clauses = gimple_omp_for_clauses (fd->for_stmt);
6031 stmts = NULL;
6032 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6033 if (!gimple_seq_empty_p (stmts))
6035 gimple_seq_add_seq (&stmts, *dlist);
6036 *dlist = stmts;
6038 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6039 vinit = fd->loop.n1;
6040 if (cond_code == EQ_EXPR
6041 && host_integerp (fd->loop.n2, 0)
6042 && ! integer_zerop (fd->loop.n2))
6043 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6045 /* Initialize the iterator variable, so that threads that don't execute
6046 any iterations don't execute the lastprivate clauses by accident. */
6047 gimplify_assign (fd->loop.v, vinit, body_p);
6052 /* Lower code for an OpenMP loop directive. */
6054 static void
6055 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6057 tree *rhs_p, block;
6058 struct omp_for_data fd;
6059 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6060 gimple_seq omp_for_body, body, dlist;
6061 size_t i;
6062 struct gimplify_ctx gctx;
6064 push_gimplify_context (&gctx);
6066 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6067 lower_omp (gimple_omp_body (stmt), ctx);
6069 block = make_node (BLOCK);
6070 new_stmt = gimple_build_bind (NULL, NULL, block);
6072 /* Move declaration of temporaries in the loop body before we make
6073 it go away. */
6074 omp_for_body = gimple_omp_body (stmt);
6075 if (!gimple_seq_empty_p (omp_for_body)
6076 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6078 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6079 gimple_bind_append_vars (new_stmt, vars);
6082 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6083 dlist = NULL;
6084 body = NULL;
6085 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6086 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6088 /* Lower the header expressions. At this point, we can assume that
6089 the header is of the form:
6091 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6093 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6094 using the .omp_data_s mapping, if needed. */
6095 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6097 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6098 if (!is_gimple_min_invariant (*rhs_p))
6099 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6101 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6102 if (!is_gimple_min_invariant (*rhs_p))
6103 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6105 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6106 if (!is_gimple_min_invariant (*rhs_p))
6107 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6110 /* Once lowered, extract the bounds and clauses. */
6111 extract_omp_for_data (stmt, &fd, NULL);
6113 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6115 gimple_seq_add_stmt (&body, stmt);
6116 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6118 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6119 fd.loop.v));
6121 /* After the loop, add exit clauses. */
6122 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6123 gimple_seq_add_seq (&body, dlist);
6125 body = maybe_catch_exception (body);
6127 /* Region exit marker goes at the end of the loop body. */
6128 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6130 pop_gimplify_context (new_stmt);
6132 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6133 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6134 if (BLOCK_VARS (block))
6135 TREE_USED (block) = 1;
6137 gimple_bind_set_body (new_stmt, body);
6138 gimple_omp_set_body (stmt, NULL);
6139 gimple_omp_for_set_pre_body (stmt, NULL);
6140 gsi_replace (gsi_p, new_stmt, true);
6143 /* Callback for walk_stmts. Check if the current statement only contains
6144 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6146 static tree
6147 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6148 bool *handled_ops_p,
6149 struct walk_stmt_info *wi)
6151 int *info = (int *) wi->info;
6152 gimple stmt = gsi_stmt (*gsi_p);
6154 *handled_ops_p = true;
6155 switch (gimple_code (stmt))
6157 WALK_SUBSTMTS;
6159 case GIMPLE_OMP_FOR:
6160 case GIMPLE_OMP_SECTIONS:
6161 *info = *info == 0 ? 1 : -1;
6162 break;
6163 default:
6164 *info = -1;
6165 break;
6167 return NULL;
6170 struct omp_taskcopy_context
6172 /* This field must be at the beginning, as we do "inheritance": Some
6173 callback functions for tree-inline.c (e.g., omp_copy_decl)
6174 receive a copy_body_data pointer that is up-casted to an
6175 omp_context pointer. */
6176 copy_body_data cb;
6177 omp_context *ctx;
6180 static tree
6181 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6183 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6185 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6186 return create_tmp_var (TREE_TYPE (var), NULL);
6188 return var;
6191 static tree
6192 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6194 tree name, new_fields = NULL, type, f;
6196 type = lang_hooks.types.make_type (RECORD_TYPE);
6197 name = DECL_NAME (TYPE_NAME (orig_type));
6198 name = build_decl (gimple_location (tcctx->ctx->stmt),
6199 TYPE_DECL, name, type);
6200 TYPE_NAME (type) = name;
6202 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6204 tree new_f = copy_node (f);
6205 DECL_CONTEXT (new_f) = type;
6206 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6207 TREE_CHAIN (new_f) = new_fields;
6208 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6209 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6210 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6211 &tcctx->cb, NULL);
6212 new_fields = new_f;
6213 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6215 TYPE_FIELDS (type) = nreverse (new_fields);
6216 layout_type (type);
6217 return type;
6220 /* Create task copyfn. */
6222 static void
6223 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6225 struct function *child_cfun;
6226 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6227 tree record_type, srecord_type, bind, list;
6228 bool record_needs_remap = false, srecord_needs_remap = false;
6229 splay_tree_node n;
6230 struct omp_taskcopy_context tcctx;
6231 struct gimplify_ctx gctx;
6232 location_t loc = gimple_location (task_stmt);
6234 child_fn = gimple_omp_task_copy_fn (task_stmt);
6235 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6236 gcc_assert (child_cfun->cfg == NULL);
6237 child_cfun->dont_save_pending_sizes_p = 1;
6238 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6240 /* Reset DECL_CONTEXT on function arguments. */
6241 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6242 DECL_CONTEXT (t) = child_fn;
6244 /* Populate the function. */
6245 push_gimplify_context (&gctx);
6246 current_function_decl = child_fn;
6248 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6249 TREE_SIDE_EFFECTS (bind) = 1;
6250 list = NULL;
6251 DECL_SAVED_TREE (child_fn) = bind;
6252 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6254 /* Remap src and dst argument types if needed. */
6255 record_type = ctx->record_type;
6256 srecord_type = ctx->srecord_type;
6257 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6258 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6260 record_needs_remap = true;
6261 break;
6263 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6264 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6266 srecord_needs_remap = true;
6267 break;
6270 if (record_needs_remap || srecord_needs_remap)
6272 memset (&tcctx, '\0', sizeof (tcctx));
6273 tcctx.cb.src_fn = ctx->cb.src_fn;
6274 tcctx.cb.dst_fn = child_fn;
6275 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6276 tcctx.cb.dst_node = tcctx.cb.src_node;
6277 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6278 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6279 tcctx.cb.eh_lp_nr = 0;
6280 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6281 tcctx.cb.decl_map = pointer_map_create ();
6282 tcctx.ctx = ctx;
6284 if (record_needs_remap)
6285 record_type = task_copyfn_remap_type (&tcctx, record_type);
6286 if (srecord_needs_remap)
6287 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6289 else
6290 tcctx.cb.decl_map = NULL;
6292 push_cfun (child_cfun);
6294 arg = DECL_ARGUMENTS (child_fn);
6295 TREE_TYPE (arg) = build_pointer_type (record_type);
6296 sarg = DECL_CHAIN (arg);
6297 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6299 /* First pass: initialize temporaries used in record_type and srecord_type
6300 sizes and field offsets. */
6301 if (tcctx.cb.decl_map)
6302 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6303 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6305 tree *p;
6307 decl = OMP_CLAUSE_DECL (c);
6308 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6309 if (p == NULL)
6310 continue;
6311 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6312 sf = (tree) n->value;
6313 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6314 src = build_simple_mem_ref_loc (loc, sarg);
6315 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6316 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6317 append_to_statement_list (t, &list);
6320 /* Second pass: copy shared var pointers and copy construct non-VLA
6321 firstprivate vars. */
6322 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6323 switch (OMP_CLAUSE_CODE (c))
6325 case OMP_CLAUSE_SHARED:
6326 decl = OMP_CLAUSE_DECL (c);
6327 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6328 if (n == NULL)
6329 break;
6330 f = (tree) n->value;
6331 if (tcctx.cb.decl_map)
6332 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6333 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6334 sf = (tree) n->value;
6335 if (tcctx.cb.decl_map)
6336 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6337 src = build_simple_mem_ref_loc (loc, sarg);
6338 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6339 dst = build_simple_mem_ref_loc (loc, arg);
6340 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6341 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6342 append_to_statement_list (t, &list);
6343 break;
6344 case OMP_CLAUSE_FIRSTPRIVATE:
6345 decl = OMP_CLAUSE_DECL (c);
6346 if (is_variable_sized (decl))
6347 break;
6348 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6349 if (n == NULL)
6350 break;
6351 f = (tree) n->value;
6352 if (tcctx.cb.decl_map)
6353 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6354 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6355 if (n != NULL)
6357 sf = (tree) n->value;
6358 if (tcctx.cb.decl_map)
6359 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6360 src = build_simple_mem_ref_loc (loc, sarg);
6361 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6362 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6363 src = build_simple_mem_ref_loc (loc, src);
6365 else
6366 src = decl;
6367 dst = build_simple_mem_ref_loc (loc, arg);
6368 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6369 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6370 append_to_statement_list (t, &list);
6371 break;
6372 case OMP_CLAUSE_PRIVATE:
6373 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6374 break;
6375 decl = OMP_CLAUSE_DECL (c);
6376 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6377 f = (tree) n->value;
6378 if (tcctx.cb.decl_map)
6379 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6380 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6381 if (n != NULL)
6383 sf = (tree) n->value;
6384 if (tcctx.cb.decl_map)
6385 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6386 src = build_simple_mem_ref_loc (loc, sarg);
6387 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6388 if (use_pointer_for_field (decl, NULL))
6389 src = build_simple_mem_ref_loc (loc, src);
6391 else
6392 src = decl;
6393 dst = build_simple_mem_ref_loc (loc, arg);
6394 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6395 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6396 append_to_statement_list (t, &list);
6397 break;
6398 default:
6399 break;
6402 /* Last pass: handle VLA firstprivates. */
6403 if (tcctx.cb.decl_map)
6404 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6405 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6407 tree ind, ptr, df;
6409 decl = OMP_CLAUSE_DECL (c);
6410 if (!is_variable_sized (decl))
6411 continue;
6412 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6413 if (n == NULL)
6414 continue;
6415 f = (tree) n->value;
6416 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6417 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6418 ind = DECL_VALUE_EXPR (decl);
6419 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6420 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6421 n = splay_tree_lookup (ctx->sfield_map,
6422 (splay_tree_key) TREE_OPERAND (ind, 0));
6423 sf = (tree) n->value;
6424 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6425 src = build_simple_mem_ref_loc (loc, sarg);
6426 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6427 src = build_simple_mem_ref_loc (loc, src);
6428 dst = build_simple_mem_ref_loc (loc, arg);
6429 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6430 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6431 append_to_statement_list (t, &list);
6432 n = splay_tree_lookup (ctx->field_map,
6433 (splay_tree_key) TREE_OPERAND (ind, 0));
6434 df = (tree) n->value;
6435 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6436 ptr = build_simple_mem_ref_loc (loc, arg);
6437 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6438 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6439 build_fold_addr_expr_loc (loc, dst));
6440 append_to_statement_list (t, &list);
6443 t = build1 (RETURN_EXPR, void_type_node, NULL);
6444 append_to_statement_list (t, &list);
6446 if (tcctx.cb.decl_map)
6447 pointer_map_destroy (tcctx.cb.decl_map);
6448 pop_gimplify_context (NULL);
6449 BIND_EXPR_BODY (bind) = list;
6450 pop_cfun ();
6451 current_function_decl = ctx->cb.src_fn;
6454 /* Lower the OpenMP parallel or task directive in the current statement
6455 in GSI_P. CTX holds context information for the directive. */
6457 static void
6458 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6460 tree clauses;
6461 tree child_fn, t;
6462 gimple stmt = gsi_stmt (*gsi_p);
6463 gimple par_bind, bind;
6464 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6465 struct gimplify_ctx gctx;
6466 location_t loc = gimple_location (stmt);
6468 clauses = gimple_omp_taskreg_clauses (stmt);
6469 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6470 par_body = gimple_bind_body (par_bind);
6471 child_fn = ctx->cb.dst_fn;
6472 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6473 && !gimple_omp_parallel_combined_p (stmt))
6475 struct walk_stmt_info wi;
6476 int ws_num = 0;
6478 memset (&wi, 0, sizeof (wi));
6479 wi.info = &ws_num;
6480 wi.val_only = true;
6481 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6482 if (ws_num == 1)
6483 gimple_omp_parallel_set_combined_p (stmt, true);
6485 if (ctx->srecord_type)
6486 create_task_copyfn (stmt, ctx);
6488 push_gimplify_context (&gctx);
6490 par_olist = NULL;
6491 par_ilist = NULL;
6492 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6493 lower_omp (par_body, ctx);
6494 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6495 lower_reduction_clauses (clauses, &par_olist, ctx);
6497 /* Declare all the variables created by mapping and the variables
6498 declared in the scope of the parallel body. */
6499 record_vars_into (ctx->block_vars, child_fn);
6500 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6502 if (ctx->record_type)
6504 ctx->sender_decl
6505 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6506 : ctx->record_type, ".omp_data_o");
6507 DECL_NAMELESS (ctx->sender_decl) = 1;
6508 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6509 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6512 olist = NULL;
6513 ilist = NULL;
6514 lower_send_clauses (clauses, &ilist, &olist, ctx);
6515 lower_send_shared_vars (&ilist, &olist, ctx);
6517 /* Once all the expansions are done, sequence all the different
6518 fragments inside gimple_omp_body. */
6520 new_body = NULL;
6522 if (ctx->record_type)
6524 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6525 /* fixup_child_record_type might have changed receiver_decl's type. */
6526 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6527 gimple_seq_add_stmt (&new_body,
6528 gimple_build_assign (ctx->receiver_decl, t));
6531 gimple_seq_add_seq (&new_body, par_ilist);
6532 gimple_seq_add_seq (&new_body, par_body);
6533 gimple_seq_add_seq (&new_body, par_olist);
6534 new_body = maybe_catch_exception (new_body);
6535 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6536 gimple_omp_set_body (stmt, new_body);
6538 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6539 gimple_bind_add_stmt (bind, stmt);
6540 if (ilist || olist)
6542 gimple_seq_add_stmt (&ilist, bind);
6543 gimple_seq_add_seq (&ilist, olist);
6544 bind = gimple_build_bind (NULL, ilist, NULL);
6547 gsi_replace (gsi_p, bind, true);
6549 pop_gimplify_context (NULL);
6552 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6553 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6554 of OpenMP context, but with task_shared_vars set. */
6556 static tree
6557 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6558 void *data)
6560 tree t = *tp;
6562 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6563 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6564 return t;
6566 if (task_shared_vars
6567 && DECL_P (t)
6568 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6569 return t;
6571 /* If a global variable has been privatized, TREE_CONSTANT on
6572 ADDR_EXPR might be wrong. */
6573 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6574 recompute_tree_invariant_for_addr_expr (t);
6576 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6577 return NULL_TREE;
6580 static void
6581 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6583 gimple stmt = gsi_stmt (*gsi_p);
6584 struct walk_stmt_info wi;
6586 if (gimple_has_location (stmt))
6587 input_location = gimple_location (stmt);
6589 if (task_shared_vars)
6590 memset (&wi, '\0', sizeof (wi));
6592 /* If we have issued syntax errors, avoid doing any heavy lifting.
6593 Just replace the OpenMP directives with a NOP to avoid
6594 confusing RTL expansion. */
6595 if (seen_error () && is_gimple_omp (stmt))
6597 gsi_replace (gsi_p, gimple_build_nop (), true);
6598 return;
6601 switch (gimple_code (stmt))
6603 case GIMPLE_COND:
6604 if ((ctx || task_shared_vars)
6605 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6606 ctx ? NULL : &wi, NULL)
6607 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6608 ctx ? NULL : &wi, NULL)))
6609 gimple_regimplify_operands (stmt, gsi_p);
6610 break;
6611 case GIMPLE_CATCH:
6612 lower_omp (gimple_catch_handler (stmt), ctx);
6613 break;
6614 case GIMPLE_EH_FILTER:
6615 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6616 break;
6617 case GIMPLE_TRY:
6618 lower_omp (gimple_try_eval (stmt), ctx);
6619 lower_omp (gimple_try_cleanup (stmt), ctx);
6620 break;
6621 case GIMPLE_BIND:
6622 lower_omp (gimple_bind_body (stmt), ctx);
6623 break;
6624 case GIMPLE_OMP_PARALLEL:
6625 case GIMPLE_OMP_TASK:
6626 ctx = maybe_lookup_ctx (stmt);
6627 lower_omp_taskreg (gsi_p, ctx);
6628 break;
6629 case GIMPLE_OMP_FOR:
6630 ctx = maybe_lookup_ctx (stmt);
6631 gcc_assert (ctx);
6632 lower_omp_for (gsi_p, ctx);
6633 break;
6634 case GIMPLE_OMP_SECTIONS:
6635 ctx = maybe_lookup_ctx (stmt);
6636 gcc_assert (ctx);
6637 lower_omp_sections (gsi_p, ctx);
6638 break;
6639 case GIMPLE_OMP_SINGLE:
6640 ctx = maybe_lookup_ctx (stmt);
6641 gcc_assert (ctx);
6642 lower_omp_single (gsi_p, ctx);
6643 break;
6644 case GIMPLE_OMP_MASTER:
6645 ctx = maybe_lookup_ctx (stmt);
6646 gcc_assert (ctx);
6647 lower_omp_master (gsi_p, ctx);
6648 break;
6649 case GIMPLE_OMP_ORDERED:
6650 ctx = maybe_lookup_ctx (stmt);
6651 gcc_assert (ctx);
6652 lower_omp_ordered (gsi_p, ctx);
6653 break;
6654 case GIMPLE_OMP_CRITICAL:
6655 ctx = maybe_lookup_ctx (stmt);
6656 gcc_assert (ctx);
6657 lower_omp_critical (gsi_p, ctx);
6658 break;
6659 case GIMPLE_OMP_ATOMIC_LOAD:
6660 if ((ctx || task_shared_vars)
6661 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6662 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6663 gimple_regimplify_operands (stmt, gsi_p);
6664 break;
6665 default:
6666 if ((ctx || task_shared_vars)
6667 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6668 ctx ? NULL : &wi))
6669 gimple_regimplify_operands (stmt, gsi_p);
6670 break;
6674 static void
6675 lower_omp (gimple_seq body, omp_context *ctx)
6677 location_t saved_location = input_location;
6678 gimple_stmt_iterator gsi = gsi_start (body);
6679 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6680 lower_omp_1 (&gsi, ctx);
6681 input_location = saved_location;
6684 /* Main entry point. */
6686 static unsigned int
6687 execute_lower_omp (void)
6689 gimple_seq body;
6691 /* This pass always runs, to provide PROP_gimple_lomp.
6692 But there is nothing to do unless -fopenmp is given. */
6693 if (flag_openmp == 0)
6694 return 0;
6696 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6697 delete_omp_context);
6699 body = gimple_body (current_function_decl);
6700 scan_omp (body, NULL);
6701 gcc_assert (taskreg_nesting_level == 0);
6703 if (all_contexts->root)
6705 struct gimplify_ctx gctx;
6707 if (task_shared_vars)
6708 push_gimplify_context (&gctx);
6709 lower_omp (body, NULL);
6710 if (task_shared_vars)
6711 pop_gimplify_context (NULL);
6714 if (all_contexts)
6716 splay_tree_delete (all_contexts);
6717 all_contexts = NULL;
6719 BITMAP_FREE (task_shared_vars);
6720 return 0;
6723 struct gimple_opt_pass pass_lower_omp =
6726 GIMPLE_PASS,
6727 "omplower", /* name */
6728 NULL, /* gate */
6729 execute_lower_omp, /* execute */
6730 NULL, /* sub */
6731 NULL, /* next */
6732 0, /* static_pass_number */
6733 TV_NONE, /* tv_id */
6734 PROP_gimple_any, /* properties_required */
6735 PROP_gimple_lomp, /* properties_provided */
6736 0, /* properties_destroyed */
6737 0, /* todo_flags_start */
6738 TODO_dump_func /* todo_flags_finish */
6742 /* The following is a utility to diagnose OpenMP structured block violations.
6743 It is not part of the "omplower" pass, as that's invoked too late. It
6744 should be invoked by the respective front ends after gimplification. */
6746 static splay_tree all_labels;
6748 /* Check for mismatched contexts and generate an error if needed. Return
6749 true if an error is detected. */
6751 static bool
6752 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6753 gimple branch_ctx, gimple label_ctx)
6755 if (label_ctx == branch_ctx)
6756 return false;
6760 Previously we kept track of the label's entire context in diagnose_sb_[12]
6761 so we could traverse it and issue a correct "exit" or "enter" error
6762 message upon a structured block violation.
6764 We built the context by building a list with tree_cons'ing, but there is
6765 no easy counterpart in gimple tuples. It seems like far too much work
6766 for issuing exit/enter error messages. If someone really misses the
6767 distinct error message... patches welcome.
6770 #if 0
6771 /* Try to avoid confusing the user by producing and error message
6772 with correct "exit" or "enter" verbiage. We prefer "exit"
6773 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6774 if (branch_ctx == NULL)
6775 exit_p = false;
6776 else
6778 while (label_ctx)
6780 if (TREE_VALUE (label_ctx) == branch_ctx)
6782 exit_p = false;
6783 break;
6785 label_ctx = TREE_CHAIN (label_ctx);
6789 if (exit_p)
6790 error ("invalid exit from OpenMP structured block");
6791 else
6792 error ("invalid entry to OpenMP structured block");
6793 #endif
6795 /* If it's obvious we have an invalid entry, be specific about the error. */
6796 if (branch_ctx == NULL)
6797 error ("invalid entry to OpenMP structured block");
6798 else
6799 /* Otherwise, be vague and lazy, but efficient. */
6800 error ("invalid branch to/from an OpenMP structured block");
6802 gsi_replace (gsi_p, gimple_build_nop (), false);
6803 return true;
6806 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6807 where each label is found. */
6809 static tree
6810 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6811 struct walk_stmt_info *wi)
6813 gimple context = (gimple) wi->info;
6814 gimple inner_context;
6815 gimple stmt = gsi_stmt (*gsi_p);
6817 *handled_ops_p = true;
6819 switch (gimple_code (stmt))
6821 WALK_SUBSTMTS;
6823 case GIMPLE_OMP_PARALLEL:
6824 case GIMPLE_OMP_TASK:
6825 case GIMPLE_OMP_SECTIONS:
6826 case GIMPLE_OMP_SINGLE:
6827 case GIMPLE_OMP_SECTION:
6828 case GIMPLE_OMP_MASTER:
6829 case GIMPLE_OMP_ORDERED:
6830 case GIMPLE_OMP_CRITICAL:
6831 /* The minimal context here is just the current OMP construct. */
6832 inner_context = stmt;
6833 wi->info = inner_context;
6834 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6835 wi->info = context;
6836 break;
6838 case GIMPLE_OMP_FOR:
6839 inner_context = stmt;
6840 wi->info = inner_context;
6841 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6842 walk them. */
6843 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6844 diagnose_sb_1, NULL, wi);
6845 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6846 wi->info = context;
6847 break;
6849 case GIMPLE_LABEL:
6850 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6851 (splay_tree_value) context);
6852 break;
6854 default:
6855 break;
6858 return NULL_TREE;
6861 /* Pass 2: Check each branch and see if its context differs from that of
6862 the destination label's context. */
6864 static tree
6865 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6866 struct walk_stmt_info *wi)
6868 gimple context = (gimple) wi->info;
6869 splay_tree_node n;
6870 gimple stmt = gsi_stmt (*gsi_p);
6872 *handled_ops_p = true;
6874 switch (gimple_code (stmt))
6876 WALK_SUBSTMTS;
6878 case GIMPLE_OMP_PARALLEL:
6879 case GIMPLE_OMP_TASK:
6880 case GIMPLE_OMP_SECTIONS:
6881 case GIMPLE_OMP_SINGLE:
6882 case GIMPLE_OMP_SECTION:
6883 case GIMPLE_OMP_MASTER:
6884 case GIMPLE_OMP_ORDERED:
6885 case GIMPLE_OMP_CRITICAL:
6886 wi->info = stmt;
6887 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6888 wi->info = context;
6889 break;
6891 case GIMPLE_OMP_FOR:
6892 wi->info = stmt;
6893 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6894 walk them. */
6895 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6896 diagnose_sb_2, NULL, wi);
6897 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6898 wi->info = context;
6899 break;
6901 case GIMPLE_COND:
6903 tree lab = gimple_cond_true_label (stmt);
6904 if (lab)
6906 n = splay_tree_lookup (all_labels,
6907 (splay_tree_key) lab);
6908 diagnose_sb_0 (gsi_p, context,
6909 n ? (gimple) n->value : NULL);
6911 lab = gimple_cond_false_label (stmt);
6912 if (lab)
6914 n = splay_tree_lookup (all_labels,
6915 (splay_tree_key) lab);
6916 diagnose_sb_0 (gsi_p, context,
6917 n ? (gimple) n->value : NULL);
6920 break;
6922 case GIMPLE_GOTO:
6924 tree lab = gimple_goto_dest (stmt);
6925 if (TREE_CODE (lab) != LABEL_DECL)
6926 break;
6928 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6929 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6931 break;
6933 case GIMPLE_SWITCH:
6935 unsigned int i;
6936 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6938 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6939 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6940 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6941 break;
6944 break;
6946 case GIMPLE_RETURN:
6947 diagnose_sb_0 (gsi_p, context, NULL);
6948 break;
6950 default:
6951 break;
6954 return NULL_TREE;
6957 static unsigned int
6958 diagnose_omp_structured_block_errors (void)
6960 struct walk_stmt_info wi;
6961 gimple_seq body = gimple_body (current_function_decl);
6963 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6965 memset (&wi, 0, sizeof (wi));
6966 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6968 memset (&wi, 0, sizeof (wi));
6969 wi.want_locations = true;
6970 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6972 splay_tree_delete (all_labels);
6973 all_labels = NULL;
6975 return 0;
6978 static bool
6979 gate_diagnose_omp_blocks (void)
6981 return flag_openmp != 0;
6984 struct gimple_opt_pass pass_diagnose_omp_blocks =
6987 GIMPLE_PASS,
6988 "*diagnose_omp_blocks", /* name */
6989 gate_diagnose_omp_blocks, /* gate */
6990 diagnose_omp_structured_block_errors, /* execute */
6991 NULL, /* sub */
6992 NULL, /* next */
6993 0, /* static_pass_number */
6994 TV_NONE, /* tv_id */
6995 PROP_gimple_any, /* properties_required */
6996 0, /* properties_provided */
6997 0, /* properties_destroyed */
6998 0, /* todo_flags_start */
6999 0, /* todo_flags_finish */
7003 #include "gt-omp-low.h"