PR tree-optimization/47427
[official-gcc.git] / gcc / omp-low.c
blobc3f217818bcb5c70cd0b2969b4279f2ca731800f
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 default:
261 break;
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
315 loop->n2, size_one_node);
316 else
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
326 loop->n2, size_int (-1));
327 else
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
337 t = gimple_omp_for_incr (for_stmt, i);
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
355 if (iter_type != long_long_unsigned_type_node)
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
363 tree n;
365 if (loop->cond_code == LT_EXPR)
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
378 tree n1, n2;
380 if (loop->cond_code == LT_EXPR)
382 n1 = loop->n1;
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
385 loop->n2, loop->step);
387 else
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
391 loop->n2, loop->step);
392 n2 = loop->n1;
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
402 if (collapse_count && *collapse_count == NULL)
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
410 tree itype = TREE_TYPE (loop->v);
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
429 else
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
433 if (count != NULL_TREE)
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
442 else
443 count = NULL_TREE;
447 if (count)
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
461 if (count)
462 *collapse_count = fold_convert_loc (loc, iter_type, count);
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
467 if (fd->collapse > 1)
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
492 Is lowered into:
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
520 static bool
521 workshare_safe_to_combine_p (basic_block ws_entry_bb)
523 struct omp_for_data fd;
524 gimple ws_stmt = last_stmt (ws_entry_bb);
526 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
527 return true;
529 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531 extract_omp_for_data (ws_stmt, &fd, NULL);
533 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
534 return false;
535 if (fd.iter_type != long_integer_type_node)
536 return false;
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
542 see through this. */
543 if (!is_gimple_min_invariant (fd.loop.n1)
544 || !is_gimple_min_invariant (fd.loop.n2)
545 || !is_gimple_min_invariant (fd.loop.step)
546 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
547 return false;
549 return true;
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
555 expanded. */
557 static VEC(tree,gc) *
558 get_ws_args_for (gimple ws_stmt)
560 tree t;
561 location_t loc = gimple_location (ws_stmt);
562 VEC(tree,gc) *ws_args;
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
566 struct omp_for_data fd;
568 extract_omp_for_data (ws_stmt, &fd, NULL);
570 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
573 VEC_quick_push (tree, ws_args, t);
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
576 VEC_quick_push (tree, ws_args, t);
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 VEC_quick_push (tree, ws_args, t);
581 if (fd.chunk_size)
583 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
584 VEC_quick_push (tree, ws_args, t);
587 return ws_args;
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 ws_args = VEC_alloc (tree, gc, 1);
597 VEC_quick_push (tree, ws_args, t);
598 return ws_args;
601 gcc_unreachable ();
605 /* Discover whether REGION is a combined parallel+workshare region. */
607 static void
608 determine_parallel_type (struct omp_region *region)
610 basic_block par_entry_bb, par_exit_bb;
611 basic_block ws_entry_bb, ws_exit_bb;
613 if (region == NULL || region->inner == NULL
614 || region->exit == NULL || region->inner->exit == NULL
615 || region->inner->cont == NULL)
616 return;
618 /* We only support parallel+for and parallel+sections. */
619 if (region->type != GIMPLE_OMP_PARALLEL
620 || (region->inner->type != GIMPLE_OMP_FOR
621 && region->inner->type != GIMPLE_OMP_SECTIONS))
622 return;
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb = region->entry;
627 par_exit_bb = region->exit;
628 ws_entry_bb = region->inner->entry;
629 ws_exit_bb = region->inner->exit;
631 if (single_succ (par_entry_bb) == ws_entry_bb
632 && single_succ (ws_exit_bb) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
635 || (last_and_only_stmt (ws_entry_bb)
636 && last_and_only_stmt (par_exit_bb))))
638 gimple ws_stmt = last_stmt (ws_entry_bb);
640 if (region->inner->type == GIMPLE_OMP_FOR)
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses = gimple_omp_for_clauses (ws_stmt);
652 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
653 if (c == NULL
654 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
657 region->is_combined_parallel = false;
658 region->inner->is_combined_parallel = false;
659 return;
663 region->is_combined_parallel = true;
664 region->inner->is_combined_parallel = true;
665 region->ws_args = get_ws_args_for (ws_stmt);
670 /* Return true if EXPR is variable sized. */
672 static inline bool
673 is_variable_sized (const_tree expr)
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
678 /* Return true if DECL is a reference type. */
680 static inline bool
681 is_reference (tree decl)
683 return lang_hooks.decls.omp_privatize_by_reference (decl);
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
690 static inline tree
691 lookup_decl (tree var, omp_context *ctx)
693 tree *n;
694 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 return *n;
698 static inline tree
699 maybe_lookup_decl (const_tree var, omp_context *ctx)
701 tree *n;
702 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
703 return n ? *n : NULL_TREE;
706 static inline tree
707 lookup_field (tree var, omp_context *ctx)
709 splay_tree_node n;
710 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
711 return (tree) n->value;
714 static inline tree
715 lookup_sfield (tree var, omp_context *ctx)
717 splay_tree_node n;
718 n = splay_tree_lookup (ctx->sfield_map
719 ? ctx->sfield_map : ctx->field_map,
720 (splay_tree_key) var);
721 return (tree) n->value;
724 static inline tree
725 maybe_lookup_field (tree var, omp_context *ctx)
727 splay_tree_node n;
728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
729 return n ? (tree) n->value : NULL_TREE;
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
735 static bool
736 use_pointer_for_field (tree decl, omp_context *shared_ctx)
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
739 return true;
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
743 if (shared_ctx)
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
750 return true;
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
757 return true;
759 /* Do not use copy-in/copy-out for variables that have their
760 address taken. */
761 if (TREE_ADDRESSABLE (decl))
762 return true;
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
771 omp_context *up;
773 for (up = shared_ctx->outer; up; up = up->outer)
774 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
775 break;
777 if (up)
779 tree c;
781 for (c = gimple_omp_taskreg_clauses (up->stmt);
782 c; c = OMP_CLAUSE_CHAIN (c))
783 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c) == decl)
785 break;
787 if (c)
788 return true;
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
798 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
799 if (is_gimple_reg (outer))
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
809 return true;
813 return false;
816 /* Create a new VAR_DECL and copy information from VAR to it. */
818 tree
819 copy_var_decl (tree var, tree name, tree type)
821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
829 TREE_USED (copy) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
832 return copy;
835 /* Construct a new automatic decl similar to VAR. */
837 static tree
838 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
840 tree copy = copy_var_decl (var, name, type);
842 DECL_CONTEXT (copy) = current_function_decl;
843 DECL_CHAIN (copy) = ctx->block_vars;
844 ctx->block_vars = copy;
846 return copy;
849 static tree
850 omp_copy_decl_1 (tree var, omp_context *ctx)
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
855 /* Build tree nodes to access the field for VAR on the receiver side. */
857 static tree
858 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
860 tree x, field = lookup_field (var, ctx);
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x = maybe_lookup_field (field, ctx);
865 if (x != NULL)
866 field = x;
868 x = build_simple_mem_ref (ctx->receiver_decl);
869 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
870 if (by_ref)
871 x = build_simple_mem_ref (x);
873 return x;
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
880 static tree
881 build_outer_var_ref (tree var, omp_context *ctx)
883 tree x;
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
886 x = var;
887 else if (is_variable_sized (var))
889 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
890 x = build_outer_var_ref (x, ctx);
891 x = build_simple_mem_ref (x);
893 else if (is_taskreg_ctx (ctx))
895 bool by_ref = use_pointer_for_field (var, NULL);
896 x = build_receiver_ref (var, by_ref, ctx);
898 else if (ctx->outer)
899 x = lookup_decl (var, ctx->outer);
900 else if (is_reference (var))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
903 x = var;
904 else
905 gcc_unreachable ();
907 if (is_reference (var))
908 x = build_simple_mem_ref (x);
910 return x;
913 /* Build tree nodes to access the field for VAR on the sender side. */
915 static tree
916 build_sender_ref (tree var, omp_context *ctx)
918 tree field = lookup_sfield (var, ctx);
919 return build3 (COMPONENT_REF, TREE_TYPE (field),
920 ctx->sender_decl, field, NULL);
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
925 static void
926 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
928 tree field, type, sfield = NULL_TREE;
930 gcc_assert ((mask & 1) == 0
931 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
932 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
933 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
935 type = TREE_TYPE (var);
936 if (by_ref)
937 type = build_pointer_type (type);
938 else if ((mask & 3) == 1 && is_reference (var))
939 type = TREE_TYPE (type);
941 field = build_decl (DECL_SOURCE_LOCATION (var),
942 FIELD_DECL, DECL_NAME (var), type);
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field) = var;
948 if (type == TREE_TYPE (var))
950 DECL_ALIGN (field) = DECL_ALIGN (var);
951 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
952 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
954 else
955 DECL_ALIGN (field) = TYPE_ALIGN (type);
957 if ((mask & 3) == 3)
959 insert_field_into_struct (ctx->record_type, field);
960 if (ctx->srecord_type)
962 sfield = build_decl (DECL_SOURCE_LOCATION (var),
963 FIELD_DECL, DECL_NAME (var), type);
964 DECL_ABSTRACT_ORIGIN (sfield) = var;
965 DECL_ALIGN (sfield) = DECL_ALIGN (field);
966 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
967 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
968 insert_field_into_struct (ctx->srecord_type, sfield);
971 else
973 if (ctx->srecord_type == NULL_TREE)
975 tree t;
977 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
978 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
979 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
981 sfield = build_decl (DECL_SOURCE_LOCATION (var),
982 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
983 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
984 insert_field_into_struct (ctx->srecord_type, sfield);
985 splay_tree_insert (ctx->sfield_map,
986 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
987 (splay_tree_value) sfield);
990 sfield = field;
991 insert_field_into_struct ((mask & 1) ? ctx->record_type
992 : ctx->srecord_type, field);
995 if (mask & 1)
996 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
997 (splay_tree_value) field);
998 if ((mask & 2) && ctx->sfield_map)
999 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1000 (splay_tree_value) sfield);
1003 static tree
1004 install_var_local (tree var, omp_context *ctx)
1006 tree new_var = omp_copy_decl_1 (var, ctx);
1007 insert_decl_map (&ctx->cb, var, new_var);
1008 return new_var;
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1014 static void
1015 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1017 tree new_decl, size;
1019 new_decl = lookup_decl (decl, ctx);
1021 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1024 && DECL_HAS_VALUE_EXPR_P (decl))
1026 tree ve = DECL_VALUE_EXPR (decl);
1027 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1028 SET_DECL_VALUE_EXPR (new_decl, ve);
1029 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1034 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1035 if (size == error_mark_node)
1036 size = TYPE_SIZE (TREE_TYPE (new_decl));
1037 DECL_SIZE (new_decl) = size;
1039 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1040 if (size == error_mark_node)
1041 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1042 DECL_SIZE_UNIT (new_decl) = size;
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1051 static tree
1052 omp_copy_decl (tree var, copy_body_data *cb)
1054 omp_context *ctx = (omp_context *) cb;
1055 tree new_var;
1057 if (TREE_CODE (var) == LABEL_DECL)
1059 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1060 DECL_CONTEXT (new_var) = current_function_decl;
1061 insert_decl_map (&ctx->cb, var, new_var);
1062 return new_var;
1065 while (!is_taskreg_ctx (ctx))
1067 ctx = ctx->outer;
1068 if (ctx == NULL)
1069 return var;
1070 new_var = maybe_lookup_decl (var, ctx);
1071 if (new_var)
1072 return new_var;
1075 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return var;
1078 return error_mark_node;
1082 /* Return the parallel region associated with STMT. */
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region *, int);
1086 void debug_omp_region (struct omp_region *);
1087 void debug_all_omp_regions (void);
1089 /* Dump the parallel region tree rooted at REGION. */
1091 void
1092 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1094 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1095 gimple_code_name[region->type]);
1097 if (region->inner)
1098 dump_omp_region (file, region->inner, indent + 4);
1100 if (region->cont)
1102 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1103 region->cont->index);
1106 if (region->exit)
1107 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1108 region->exit->index);
1109 else
1110 fprintf (file, "%*s[no exit marker]\n", indent, "");
1112 if (region->next)
1113 dump_omp_region (file, region->next, indent);
1116 DEBUG_FUNCTION void
1117 debug_omp_region (struct omp_region *region)
1119 dump_omp_region (stderr, region, 0);
1122 DEBUG_FUNCTION void
1123 debug_all_omp_regions (void)
1125 dump_omp_region (stderr, root_omp_region, 0);
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1131 struct omp_region *
1132 new_omp_region (basic_block bb, enum gimple_code type,
1133 struct omp_region *parent)
1135 struct omp_region *region = XCNEW (struct omp_region);
1137 region->outer = parent;
1138 region->entry = bb;
1139 region->type = type;
1141 if (parent)
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region->next = parent->inner;
1146 parent->inner = region;
1148 else
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region->next = root_omp_region;
1153 root_omp_region = region;
1156 return region;
1159 /* Release the memory associated with the region tree rooted at REGION. */
1161 static void
1162 free_omp_region_1 (struct omp_region *region)
1164 struct omp_region *i, *n;
1166 for (i = region->inner; i ; i = n)
1168 n = i->next;
1169 free_omp_region_1 (i);
1172 free (region);
1175 /* Release the memory for the entire omp region tree. */
1177 void
1178 free_omp_regions (void)
1180 struct omp_region *r, *n;
1181 for (r = root_omp_region; r ; r = n)
1183 n = r->next;
1184 free_omp_region_1 (r);
1186 root_omp_region = NULL;
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1192 static omp_context *
1193 new_omp_context (gimple stmt, omp_context *outer_ctx)
1195 omp_context *ctx = XCNEW (omp_context);
1197 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1198 (splay_tree_value) ctx);
1199 ctx->stmt = stmt;
1201 if (outer_ctx)
1203 ctx->outer = outer_ctx;
1204 ctx->cb = outer_ctx->cb;
1205 ctx->cb.block = NULL;
1206 ctx->depth = outer_ctx->depth + 1;
1208 else
1210 ctx->cb.src_fn = current_function_decl;
1211 ctx->cb.dst_fn = current_function_decl;
1212 ctx->cb.src_node = cgraph_node (current_function_decl);
1213 ctx->cb.dst_node = ctx->cb.src_node;
1214 ctx->cb.src_cfun = cfun;
1215 ctx->cb.copy_decl = omp_copy_decl;
1216 ctx->cb.eh_lp_nr = 0;
1217 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1218 ctx->depth = 1;
1221 ctx->cb.decl_map = pointer_map_create ();
1223 return ctx;
1226 static gimple_seq maybe_catch_exception (gimple_seq);
1228 /* Finalize task copyfn. */
1230 static void
1231 finalize_task_copyfn (gimple task_stmt)
1233 struct function *child_cfun;
1234 tree child_fn, old_fn;
1235 gimple_seq seq, new_seq;
1236 gimple bind;
1238 child_fn = gimple_omp_task_copy_fn (task_stmt);
1239 if (child_fn == NULL_TREE)
1240 return;
1242 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244 /* Inform the callgraph about the new function. */
1245 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1246 = cfun->curr_properties;
1248 old_fn = current_function_decl;
1249 push_cfun (child_cfun);
1250 current_function_decl = child_fn;
1251 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1252 seq = gimple_seq_alloc ();
1253 gimple_seq_add_stmt (&seq, bind);
1254 new_seq = maybe_catch_exception (seq);
1255 if (new_seq != seq)
1257 bind = gimple_build_bind (NULL, new_seq, NULL);
1258 seq = gimple_seq_alloc ();
1259 gimple_seq_add_stmt (&seq, bind);
1261 gimple_set_body (child_fn, seq);
1262 pop_cfun ();
1263 current_function_decl = old_fn;
1265 cgraph_add_new_function (child_fn, false);
1268 /* Destroy a omp_context data structures. Called through the splay tree
1269 value delete callback. */
1271 static void
1272 delete_omp_context (splay_tree_value value)
1274 omp_context *ctx = (omp_context *) value;
1276 pointer_map_destroy (ctx->cb.decl_map);
1278 if (ctx->field_map)
1279 splay_tree_delete (ctx->field_map);
1280 if (ctx->sfield_map)
1281 splay_tree_delete (ctx->sfield_map);
1283 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1284 it produces corrupt debug information. */
1285 if (ctx->record_type)
1287 tree t;
1288 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1289 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 if (ctx->srecord_type)
1293 tree t;
1294 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1295 DECL_ABSTRACT_ORIGIN (t) = NULL;
1298 if (is_task_ctx (ctx))
1299 finalize_task_copyfn (ctx->stmt);
1301 XDELETE (ctx);
1304 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1305 context. */
1307 static void
1308 fixup_child_record_type (omp_context *ctx)
1310 tree f, type = ctx->record_type;
1312 /* ??? It isn't sufficient to just call remap_type here, because
1313 variably_modified_type_p doesn't work the way we expect for
1314 record types. Testing each field for whether it needs remapping
1315 and creating a new record by hand works, however. */
1316 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1317 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1318 break;
1319 if (f)
1321 tree name, new_fields = NULL;
1323 type = lang_hooks.types.make_type (RECORD_TYPE);
1324 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1325 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1326 TYPE_DECL, name, type);
1327 TYPE_NAME (type) = name;
1329 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1331 tree new_f = copy_node (f);
1332 DECL_CONTEXT (new_f) = type;
1333 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1334 DECL_CHAIN (new_f) = new_fields;
1335 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1336 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1337 &ctx->cb, NULL);
1338 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1339 &ctx->cb, NULL);
1340 new_fields = new_f;
1342 /* Arrange to be able to look up the receiver field
1343 given the sender field. */
1344 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1345 (splay_tree_value) new_f);
1347 TYPE_FIELDS (type) = nreverse (new_fields);
1348 layout_type (type);
1351 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1354 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1355 specified by CLAUSES. */
1357 static void
1358 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 tree c, decl;
1361 bool scan_array_reductions = false;
1363 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 bool by_ref;
1367 switch (OMP_CLAUSE_CODE (c))
1369 case OMP_CLAUSE_PRIVATE:
1370 decl = OMP_CLAUSE_DECL (c);
1371 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1372 goto do_private;
1373 else if (!is_variable_sized (decl))
1374 install_var_local (decl, ctx);
1375 break;
1377 case OMP_CLAUSE_SHARED:
1378 gcc_assert (is_taskreg_ctx (ctx));
1379 decl = OMP_CLAUSE_DECL (c);
1380 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1381 || !is_variable_sized (decl));
1382 /* Global variables don't need to be copied,
1383 the receiver side will use them directly. */
1384 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1385 break;
1386 by_ref = use_pointer_for_field (decl, ctx);
1387 if (! TREE_READONLY (decl)
1388 || TREE_ADDRESSABLE (decl)
1389 || by_ref
1390 || is_reference (decl))
1392 install_var_field (decl, by_ref, 3, ctx);
1393 install_var_local (decl, ctx);
1394 break;
1396 /* We don't need to copy const scalar vars back. */
1397 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1398 goto do_private;
1400 case OMP_CLAUSE_LASTPRIVATE:
1401 /* Let the corresponding firstprivate clause create
1402 the variable. */
1403 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1404 break;
1405 /* FALLTHRU */
1407 case OMP_CLAUSE_FIRSTPRIVATE:
1408 case OMP_CLAUSE_REDUCTION:
1409 decl = OMP_CLAUSE_DECL (c);
1410 do_private:
1411 if (is_variable_sized (decl))
1413 if (is_task_ctx (ctx))
1414 install_var_field (decl, false, 1, ctx);
1415 break;
1417 else if (is_taskreg_ctx (ctx))
1419 bool global
1420 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1421 by_ref = use_pointer_for_field (decl, NULL);
1423 if (is_task_ctx (ctx)
1424 && (global || by_ref || is_reference (decl)))
1426 install_var_field (decl, false, 1, ctx);
1427 if (!global)
1428 install_var_field (decl, by_ref, 2, ctx);
1430 else if (!global)
1431 install_var_field (decl, by_ref, 3, ctx);
1433 install_var_local (decl, ctx);
1434 break;
1436 case OMP_CLAUSE_COPYPRIVATE:
1437 case OMP_CLAUSE_COPYIN:
1438 decl = OMP_CLAUSE_DECL (c);
1439 by_ref = use_pointer_for_field (decl, NULL);
1440 install_var_field (decl, by_ref, 3, ctx);
1441 break;
1443 case OMP_CLAUSE_DEFAULT:
1444 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1445 break;
1447 case OMP_CLAUSE_IF:
1448 case OMP_CLAUSE_NUM_THREADS:
1449 case OMP_CLAUSE_SCHEDULE:
1450 if (ctx->outer)
1451 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1452 break;
1454 case OMP_CLAUSE_NOWAIT:
1455 case OMP_CLAUSE_ORDERED:
1456 case OMP_CLAUSE_COLLAPSE:
1457 case OMP_CLAUSE_UNTIED:
1458 break;
1460 default:
1461 gcc_unreachable ();
1465 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 switch (OMP_CLAUSE_CODE (c))
1469 case OMP_CLAUSE_LASTPRIVATE:
1470 /* Let the corresponding firstprivate clause create
1471 the variable. */
1472 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1473 scan_array_reductions = true;
1474 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1475 break;
1476 /* FALLTHRU */
1478 case OMP_CLAUSE_PRIVATE:
1479 case OMP_CLAUSE_FIRSTPRIVATE:
1480 case OMP_CLAUSE_REDUCTION:
1481 decl = OMP_CLAUSE_DECL (c);
1482 if (is_variable_sized (decl))
1483 install_var_local (decl, ctx);
1484 fixup_remapped_decl (decl, ctx,
1485 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1486 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1487 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1488 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1489 scan_array_reductions = true;
1490 break;
1492 case OMP_CLAUSE_SHARED:
1493 decl = OMP_CLAUSE_DECL (c);
1494 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1495 fixup_remapped_decl (decl, ctx, false);
1496 break;
1498 case OMP_CLAUSE_COPYPRIVATE:
1499 case OMP_CLAUSE_COPYIN:
1500 case OMP_CLAUSE_DEFAULT:
1501 case OMP_CLAUSE_IF:
1502 case OMP_CLAUSE_NUM_THREADS:
1503 case OMP_CLAUSE_SCHEDULE:
1504 case OMP_CLAUSE_NOWAIT:
1505 case OMP_CLAUSE_ORDERED:
1506 case OMP_CLAUSE_COLLAPSE:
1507 case OMP_CLAUSE_UNTIED:
1508 break;
1510 default:
1511 gcc_unreachable ();
1515 if (scan_array_reductions)
1516 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1517 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1518 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1523 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1524 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1525 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1528 /* Create a new name for omp child function. Returns an identifier. */
1530 static GTY(()) unsigned int tmp_ompfn_id_num;
1532 static tree
1533 create_omp_child_function_name (bool task_copy)
1535 return (clone_function_name (current_function_decl,
1536 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1539 /* Build a decl for the omp child function. It'll not contain a body
1540 yet, just the bare decl. */
1542 static void
1543 create_omp_child_function (omp_context *ctx, bool task_copy)
1545 tree decl, type, name, t;
1547 name = create_omp_child_function_name (task_copy);
1548 if (task_copy)
1549 type = build_function_type_list (void_type_node, ptr_type_node,
1550 ptr_type_node, NULL_TREE);
1551 else
1552 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1554 decl = build_decl (gimple_location (ctx->stmt),
1555 FUNCTION_DECL, name, type);
1557 if (!task_copy)
1558 ctx->cb.dst_fn = decl;
1559 else
1560 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1562 TREE_STATIC (decl) = 1;
1563 TREE_USED (decl) = 1;
1564 DECL_ARTIFICIAL (decl) = 1;
1565 DECL_NAMELESS (decl) = 1;
1566 DECL_IGNORED_P (decl) = 0;
1567 TREE_PUBLIC (decl) = 0;
1568 DECL_UNINLINABLE (decl) = 1;
1569 DECL_EXTERNAL (decl) = 0;
1570 DECL_CONTEXT (decl) = NULL_TREE;
1571 DECL_INITIAL (decl) = make_node (BLOCK);
1573 t = build_decl (DECL_SOURCE_LOCATION (decl),
1574 RESULT_DECL, NULL_TREE, void_type_node);
1575 DECL_ARTIFICIAL (t) = 1;
1576 DECL_IGNORED_P (t) = 1;
1577 DECL_CONTEXT (t) = decl;
1578 DECL_RESULT (decl) = t;
1580 t = build_decl (DECL_SOURCE_LOCATION (decl),
1581 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1582 DECL_ARTIFICIAL (t) = 1;
1583 DECL_NAMELESS (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1592 t = build_decl (DECL_SOURCE_LOCATION (decl),
1593 PARM_DECL, get_identifier (".omp_data_o"),
1594 ptr_type_node);
1595 DECL_ARTIFICIAL (t) = 1;
1596 DECL_NAMELESS (t) = 1;
1597 DECL_ARG_TYPE (t) = ptr_type_node;
1598 DECL_CONTEXT (t) = current_function_decl;
1599 TREE_USED (t) = 1;
1600 TREE_ADDRESSABLE (t) = 1;
1601 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1602 DECL_ARGUMENTS (decl) = t;
1605 /* Allocate memory for the function structure. The call to
1606 allocate_struct_function clobbers CFUN, so we need to restore
1607 it afterward. */
1608 push_struct_function (decl);
1609 cfun->function_end_locus = gimple_location (ctx->stmt);
1610 pop_cfun ();
1614 /* Scan an OpenMP parallel directive. */
1616 static void
1617 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 omp_context *ctx;
1620 tree name;
1621 gimple stmt = gsi_stmt (*gsi);
1623 /* Ignore parallel directives with empty bodies, unless there
1624 are copyin clauses. */
1625 if (optimize > 0
1626 && empty_body_p (gimple_omp_body (stmt))
1627 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1628 OMP_CLAUSE_COPYIN) == NULL)
1630 gsi_replace (gsi, gimple_build_nop (), false);
1631 return;
1634 ctx = new_omp_context (stmt, outer_ctx);
1635 if (taskreg_nesting_level > 1)
1636 ctx->is_nested = true;
1637 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1638 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1639 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1640 name = create_tmp_var_name (".omp_data_s");
1641 name = build_decl (gimple_location (stmt),
1642 TYPE_DECL, name, ctx->record_type);
1643 DECL_ARTIFICIAL (name) = 1;
1644 DECL_NAMELESS (name) = 1;
1645 TYPE_NAME (ctx->record_type) = name;
1646 create_omp_child_function (ctx, false);
1647 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1649 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1650 scan_omp (gimple_omp_body (stmt), ctx);
1652 if (TYPE_FIELDS (ctx->record_type) == NULL)
1653 ctx->record_type = ctx->receiver_decl = NULL;
1654 else
1656 layout_type (ctx->record_type);
1657 fixup_child_record_type (ctx);
1661 /* Scan an OpenMP task directive. */
1663 static void
1664 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1666 omp_context *ctx;
1667 tree name, t;
1668 gimple stmt = gsi_stmt (*gsi);
1669 location_t loc = gimple_location (stmt);
1671 /* Ignore task directives with empty bodies. */
1672 if (optimize > 0
1673 && empty_body_p (gimple_omp_body (stmt)))
1675 gsi_replace (gsi, gimple_build_nop (), false);
1676 return;
1679 ctx = new_omp_context (stmt, outer_ctx);
1680 if (taskreg_nesting_level > 1)
1681 ctx->is_nested = true;
1682 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1683 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1684 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1685 name = create_tmp_var_name (".omp_data_s");
1686 name = build_decl (gimple_location (stmt),
1687 TYPE_DECL, name, ctx->record_type);
1688 DECL_ARTIFICIAL (name) = 1;
1689 DECL_NAMELESS (name) = 1;
1690 TYPE_NAME (ctx->record_type) = name;
1691 create_omp_child_function (ctx, false);
1692 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1694 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1696 if (ctx->srecord_type)
1698 name = create_tmp_var_name (".omp_data_a");
1699 name = build_decl (gimple_location (stmt),
1700 TYPE_DECL, name, ctx->srecord_type);
1701 DECL_ARTIFICIAL (name) = 1;
1702 DECL_NAMELESS (name) = 1;
1703 TYPE_NAME (ctx->srecord_type) = name;
1704 create_omp_child_function (ctx, true);
1707 scan_omp (gimple_omp_body (stmt), ctx);
1709 if (TYPE_FIELDS (ctx->record_type) == NULL)
1711 ctx->record_type = ctx->receiver_decl = NULL;
1712 t = build_int_cst (long_integer_type_node, 0);
1713 gimple_omp_task_set_arg_size (stmt, t);
1714 t = build_int_cst (long_integer_type_node, 1);
1715 gimple_omp_task_set_arg_align (stmt, t);
1717 else
1719 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1720 /* Move VLA fields to the end. */
1721 p = &TYPE_FIELDS (ctx->record_type);
1722 while (*p)
1723 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1724 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1726 *q = *p;
1727 *p = TREE_CHAIN (*p);
1728 TREE_CHAIN (*q) = NULL_TREE;
1729 q = &TREE_CHAIN (*q);
1731 else
1732 p = &DECL_CHAIN (*p);
1733 *p = vla_fields;
1734 layout_type (ctx->record_type);
1735 fixup_child_record_type (ctx);
1736 if (ctx->srecord_type)
1737 layout_type (ctx->srecord_type);
1738 t = fold_convert_loc (loc, long_integer_type_node,
1739 TYPE_SIZE_UNIT (ctx->record_type));
1740 gimple_omp_task_set_arg_size (stmt, t);
1741 t = build_int_cst (long_integer_type_node,
1742 TYPE_ALIGN_UNIT (ctx->record_type));
1743 gimple_omp_task_set_arg_align (stmt, t);
1748 /* Scan an OpenMP loop directive. */
1750 static void
1751 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1753 omp_context *ctx;
1754 size_t i;
1756 ctx = new_omp_context (stmt, outer_ctx);
1758 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1760 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1761 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1763 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1764 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1765 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1766 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1768 scan_omp (gimple_omp_body (stmt), ctx);
1771 /* Scan an OpenMP sections directive. */
1773 static void
1774 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1776 omp_context *ctx;
1778 ctx = new_omp_context (stmt, outer_ctx);
1779 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1780 scan_omp (gimple_omp_body (stmt), ctx);
1783 /* Scan an OpenMP single directive. */
1785 static void
1786 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1788 omp_context *ctx;
1789 tree name;
1791 ctx = new_omp_context (stmt, outer_ctx);
1792 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1793 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1794 name = create_tmp_var_name (".omp_copy_s");
1795 name = build_decl (gimple_location (stmt),
1796 TYPE_DECL, name, ctx->record_type);
1797 TYPE_NAME (ctx->record_type) = name;
1799 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1800 scan_omp (gimple_omp_body (stmt), ctx);
1802 if (TYPE_FIELDS (ctx->record_type) == NULL)
1803 ctx->record_type = NULL;
1804 else
1805 layout_type (ctx->record_type);
1809 /* Check OpenMP nesting restrictions. */
1810 static void
1811 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1813 switch (gimple_code (stmt))
1815 case GIMPLE_OMP_FOR:
1816 case GIMPLE_OMP_SECTIONS:
1817 case GIMPLE_OMP_SINGLE:
1818 case GIMPLE_CALL:
1819 for (; ctx != NULL; ctx = ctx->outer)
1820 switch (gimple_code (ctx->stmt))
1822 case GIMPLE_OMP_FOR:
1823 case GIMPLE_OMP_SECTIONS:
1824 case GIMPLE_OMP_SINGLE:
1825 case GIMPLE_OMP_ORDERED:
1826 case GIMPLE_OMP_MASTER:
1827 case GIMPLE_OMP_TASK:
1828 if (is_gimple_call (stmt))
1830 warning (0, "barrier region may not be closely nested inside "
1831 "of work-sharing, critical, ordered, master or "
1832 "explicit task region");
1833 return;
1835 warning (0, "work-sharing region may not be closely nested inside "
1836 "of work-sharing, critical, ordered, master or explicit "
1837 "task region");
1838 return;
1839 case GIMPLE_OMP_PARALLEL:
1840 return;
1841 default:
1842 break;
1844 break;
1845 case GIMPLE_OMP_MASTER:
1846 for (; ctx != NULL; ctx = ctx->outer)
1847 switch (gimple_code (ctx->stmt))
1849 case GIMPLE_OMP_FOR:
1850 case GIMPLE_OMP_SECTIONS:
1851 case GIMPLE_OMP_SINGLE:
1852 case GIMPLE_OMP_TASK:
1853 warning (0, "master region may not be closely nested inside "
1854 "of work-sharing or explicit task region");
1855 return;
1856 case GIMPLE_OMP_PARALLEL:
1857 return;
1858 default:
1859 break;
1861 break;
1862 case GIMPLE_OMP_ORDERED:
1863 for (; ctx != NULL; ctx = ctx->outer)
1864 switch (gimple_code (ctx->stmt))
1866 case GIMPLE_OMP_CRITICAL:
1867 case GIMPLE_OMP_TASK:
1868 warning (0, "ordered region may not be closely nested inside "
1869 "of critical or explicit task region");
1870 return;
1871 case GIMPLE_OMP_FOR:
1872 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1873 OMP_CLAUSE_ORDERED) == NULL)
1874 warning (0, "ordered region must be closely nested inside "
1875 "a loop region with an ordered clause");
1876 return;
1877 case GIMPLE_OMP_PARALLEL:
1878 return;
1879 default:
1880 break;
1882 break;
1883 case GIMPLE_OMP_CRITICAL:
1884 for (; ctx != NULL; ctx = ctx->outer)
1885 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1886 && (gimple_omp_critical_name (stmt)
1887 == gimple_omp_critical_name (ctx->stmt)))
1889 warning (0, "critical region may not be nested inside a critical "
1890 "region with the same name");
1891 return;
1893 break;
1894 default:
1895 break;
1900 /* Helper function scan_omp.
1902 Callback for walk_tree or operators in walk_gimple_stmt used to
1903 scan for OpenMP directives in TP. */
1905 static tree
1906 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1908 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1909 omp_context *ctx = (omp_context *) wi->info;
1910 tree t = *tp;
1912 switch (TREE_CODE (t))
1914 case VAR_DECL:
1915 case PARM_DECL:
1916 case LABEL_DECL:
1917 case RESULT_DECL:
1918 if (ctx)
1919 *tp = remap_decl (t, &ctx->cb);
1920 break;
1922 default:
1923 if (ctx && TYPE_P (t))
1924 *tp = remap_type (t, &ctx->cb);
1925 else if (!DECL_P (t))
1927 *walk_subtrees = 1;
1928 if (ctx)
1930 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1931 if (tem != TREE_TYPE (t))
1933 if (TREE_CODE (t) == INTEGER_CST)
1934 *tp = build_int_cst_wide (tem,
1935 TREE_INT_CST_LOW (t),
1936 TREE_INT_CST_HIGH (t));
1937 else
1938 TREE_TYPE (t) = tem;
1942 break;
1945 return NULL_TREE;
1949 /* Helper function for scan_omp.
1951 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1952 the current statement in GSI. */
1954 static tree
1955 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1956 struct walk_stmt_info *wi)
1958 gimple stmt = gsi_stmt (*gsi);
1959 omp_context *ctx = (omp_context *) wi->info;
1961 if (gimple_has_location (stmt))
1962 input_location = gimple_location (stmt);
1964 /* Check the OpenMP nesting restrictions. */
1965 if (ctx != NULL)
1967 if (is_gimple_omp (stmt))
1968 check_omp_nesting_restrictions (stmt, ctx);
1969 else if (is_gimple_call (stmt))
1971 tree fndecl = gimple_call_fndecl (stmt);
1972 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1973 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1974 check_omp_nesting_restrictions (stmt, ctx);
1978 *handled_ops_p = true;
1980 switch (gimple_code (stmt))
1982 case GIMPLE_OMP_PARALLEL:
1983 taskreg_nesting_level++;
1984 scan_omp_parallel (gsi, ctx);
1985 taskreg_nesting_level--;
1986 break;
1988 case GIMPLE_OMP_TASK:
1989 taskreg_nesting_level++;
1990 scan_omp_task (gsi, ctx);
1991 taskreg_nesting_level--;
1992 break;
1994 case GIMPLE_OMP_FOR:
1995 scan_omp_for (stmt, ctx);
1996 break;
1998 case GIMPLE_OMP_SECTIONS:
1999 scan_omp_sections (stmt, ctx);
2000 break;
2002 case GIMPLE_OMP_SINGLE:
2003 scan_omp_single (stmt, ctx);
2004 break;
2006 case GIMPLE_OMP_SECTION:
2007 case GIMPLE_OMP_MASTER:
2008 case GIMPLE_OMP_ORDERED:
2009 case GIMPLE_OMP_CRITICAL:
2010 ctx = new_omp_context (stmt, ctx);
2011 scan_omp (gimple_omp_body (stmt), ctx);
2012 break;
2014 case GIMPLE_BIND:
2016 tree var;
2018 *handled_ops_p = false;
2019 if (ctx)
2020 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2021 insert_decl_map (&ctx->cb, var, var);
2023 break;
2024 default:
2025 *handled_ops_p = false;
2026 break;
2029 return NULL_TREE;
2033 /* Scan all the statements starting at the current statement. CTX
2034 contains context information about the OpenMP directives and
2035 clauses found during the scan. */
2037 static void
2038 scan_omp (gimple_seq body, omp_context *ctx)
2040 location_t saved_location;
2041 struct walk_stmt_info wi;
2043 memset (&wi, 0, sizeof (wi));
2044 wi.info = ctx;
2045 wi.want_locations = true;
2047 saved_location = input_location;
2048 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2049 input_location = saved_location;
2052 /* Re-gimplification and code generation routines. */
2054 /* Build a call to GOMP_barrier. */
2056 static tree
2057 build_omp_barrier (void)
2059 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2062 /* If a context was created for STMT when it was scanned, return it. */
2064 static omp_context *
2065 maybe_lookup_ctx (gimple stmt)
2067 splay_tree_node n;
2068 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2069 return n ? (omp_context *) n->value : NULL;
2073 /* Find the mapping for DECL in CTX or the immediately enclosing
2074 context that has a mapping for DECL.
2076 If CTX is a nested parallel directive, we may have to use the decl
2077 mappings created in CTX's parent context. Suppose that we have the
2078 following parallel nesting (variable UIDs showed for clarity):
2080 iD.1562 = 0;
2081 #omp parallel shared(iD.1562) -> outer parallel
2082 iD.1562 = iD.1562 + 1;
2084 #omp parallel shared (iD.1562) -> inner parallel
2085 iD.1562 = iD.1562 - 1;
2087 Each parallel structure will create a distinct .omp_data_s structure
2088 for copying iD.1562 in/out of the directive:
2090 outer parallel .omp_data_s.1.i -> iD.1562
2091 inner parallel .omp_data_s.2.i -> iD.1562
2093 A shared variable mapping will produce a copy-out operation before
2094 the parallel directive and a copy-in operation after it. So, in
2095 this case we would have:
2097 iD.1562 = 0;
2098 .omp_data_o.1.i = iD.1562;
2099 #omp parallel shared(iD.1562) -> outer parallel
2100 .omp_data_i.1 = &.omp_data_o.1
2101 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2103 .omp_data_o.2.i = iD.1562; -> **
2104 #omp parallel shared(iD.1562) -> inner parallel
2105 .omp_data_i.2 = &.omp_data_o.2
2106 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2109 ** This is a problem. The symbol iD.1562 cannot be referenced
2110 inside the body of the outer parallel region. But since we are
2111 emitting this copy operation while expanding the inner parallel
2112 directive, we need to access the CTX structure of the outer
2113 parallel directive to get the correct mapping:
2115 .omp_data_o.2.i = .omp_data_i.1->i
2117 Since there may be other workshare or parallel directives enclosing
2118 the parallel directive, it may be necessary to walk up the context
2119 parent chain. This is not a problem in general because nested
2120 parallelism happens only rarely. */
2122 static tree
2123 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2125 tree t;
2126 omp_context *up;
2128 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2129 t = maybe_lookup_decl (decl, up);
2131 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2133 return t ? t : decl;
2137 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2138 in outer contexts. */
2140 static tree
2141 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2143 tree t = NULL;
2144 omp_context *up;
2146 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2147 t = maybe_lookup_decl (decl, up);
2149 return t ? t : decl;
2153 /* Construct the initialization value for reduction CLAUSE. */
2155 tree
2156 omp_reduction_init (tree clause, tree type)
2158 location_t loc = OMP_CLAUSE_LOCATION (clause);
2159 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2161 case PLUS_EXPR:
2162 case MINUS_EXPR:
2163 case BIT_IOR_EXPR:
2164 case BIT_XOR_EXPR:
2165 case TRUTH_OR_EXPR:
2166 case TRUTH_ORIF_EXPR:
2167 case TRUTH_XOR_EXPR:
2168 case NE_EXPR:
2169 return build_zero_cst (type);
2171 case MULT_EXPR:
2172 case TRUTH_AND_EXPR:
2173 case TRUTH_ANDIF_EXPR:
2174 case EQ_EXPR:
2175 return fold_convert_loc (loc, type, integer_one_node);
2177 case BIT_AND_EXPR:
2178 return fold_convert_loc (loc, type, integer_minus_one_node);
2180 case MAX_EXPR:
2181 if (SCALAR_FLOAT_TYPE_P (type))
2183 REAL_VALUE_TYPE max, min;
2184 if (HONOR_INFINITIES (TYPE_MODE (type)))
2186 real_inf (&max);
2187 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2189 else
2190 real_maxval (&min, 1, TYPE_MODE (type));
2191 return build_real (type, min);
2193 else
2195 gcc_assert (INTEGRAL_TYPE_P (type));
2196 return TYPE_MIN_VALUE (type);
2199 case MIN_EXPR:
2200 if (SCALAR_FLOAT_TYPE_P (type))
2202 REAL_VALUE_TYPE max;
2203 if (HONOR_INFINITIES (TYPE_MODE (type)))
2204 real_inf (&max);
2205 else
2206 real_maxval (&max, 0, TYPE_MODE (type));
2207 return build_real (type, max);
2209 else
2211 gcc_assert (INTEGRAL_TYPE_P (type));
2212 return TYPE_MAX_VALUE (type);
2215 default:
2216 gcc_unreachable ();
2220 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2221 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2222 private variables. Initialization statements go in ILIST, while calls
2223 to destructors go in DLIST. */
2225 static void
2226 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2227 omp_context *ctx)
2229 gimple_stmt_iterator diter;
2230 tree c, dtor, copyin_seq, x, ptr;
2231 bool copyin_by_ref = false;
2232 bool lastprivate_firstprivate = false;
2233 int pass;
2235 *dlist = gimple_seq_alloc ();
2236 diter = gsi_start (*dlist);
2237 copyin_seq = NULL;
2239 /* Do all the fixed sized types in the first pass, and the variable sized
2240 types in the second pass. This makes sure that the scalar arguments to
2241 the variable sized types are processed before we use them in the
2242 variable sized operations. */
2243 for (pass = 0; pass < 2; ++pass)
2245 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2247 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2248 tree var, new_var;
2249 bool by_ref;
2250 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2252 switch (c_kind)
2254 case OMP_CLAUSE_PRIVATE:
2255 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2256 continue;
2257 break;
2258 case OMP_CLAUSE_SHARED:
2259 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2261 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2262 continue;
2264 case OMP_CLAUSE_FIRSTPRIVATE:
2265 case OMP_CLAUSE_COPYIN:
2266 case OMP_CLAUSE_REDUCTION:
2267 break;
2268 case OMP_CLAUSE_LASTPRIVATE:
2269 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2271 lastprivate_firstprivate = true;
2272 if (pass != 0)
2273 continue;
2275 break;
2276 default:
2277 continue;
2280 new_var = var = OMP_CLAUSE_DECL (c);
2281 if (c_kind != OMP_CLAUSE_COPYIN)
2282 new_var = lookup_decl (var, ctx);
2284 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2286 if (pass != 0)
2287 continue;
2289 else if (is_variable_sized (var))
2291 /* For variable sized types, we need to allocate the
2292 actual storage here. Call alloca and store the
2293 result in the pointer decl that we created elsewhere. */
2294 if (pass == 0)
2295 continue;
2297 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2299 gimple stmt;
2300 tree tmp;
2302 ptr = DECL_VALUE_EXPR (new_var);
2303 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2304 ptr = TREE_OPERAND (ptr, 0);
2305 gcc_assert (DECL_P (ptr));
2306 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2308 /* void *tmp = __builtin_alloca */
2309 stmt
2310 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2311 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2312 gimple_add_tmp_var (tmp);
2313 gimple_call_set_lhs (stmt, tmp);
2315 gimple_seq_add_stmt (ilist, stmt);
2317 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2318 gimplify_assign (ptr, x, ilist);
2321 else if (is_reference (var))
2323 /* For references that are being privatized for Fortran,
2324 allocate new backing storage for the new pointer
2325 variable. This allows us to avoid changing all the
2326 code that expects a pointer to something that expects
2327 a direct variable. Note that this doesn't apply to
2328 C++, since reference types are disallowed in data
2329 sharing clauses there, except for NRV optimized
2330 return values. */
2331 if (pass == 0)
2332 continue;
2334 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2335 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2337 x = build_receiver_ref (var, false, ctx);
2338 x = build_fold_addr_expr_loc (clause_loc, x);
2340 else if (TREE_CONSTANT (x))
2342 const char *name = NULL;
2343 if (DECL_NAME (var))
2344 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2346 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2347 name);
2348 gimple_add_tmp_var (x);
2349 TREE_ADDRESSABLE (x) = 1;
2350 x = build_fold_addr_expr_loc (clause_loc, x);
2352 else
2354 x = build_call_expr_loc (clause_loc,
2355 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2358 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2359 gimplify_assign (new_var, x, ilist);
2361 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2363 else if (c_kind == OMP_CLAUSE_REDUCTION
2364 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2366 if (pass == 0)
2367 continue;
2369 else if (pass != 0)
2370 continue;
2372 switch (OMP_CLAUSE_CODE (c))
2374 case OMP_CLAUSE_SHARED:
2375 /* Shared global vars are just accessed directly. */
2376 if (is_global_var (new_var))
2377 break;
2378 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2379 needs to be delayed until after fixup_child_record_type so
2380 that we get the correct type during the dereference. */
2381 by_ref = use_pointer_for_field (var, ctx);
2382 x = build_receiver_ref (var, by_ref, ctx);
2383 SET_DECL_VALUE_EXPR (new_var, x);
2384 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2386 /* ??? If VAR is not passed by reference, and the variable
2387 hasn't been initialized yet, then we'll get a warning for
2388 the store into the omp_data_s structure. Ideally, we'd be
2389 able to notice this and not store anything at all, but
2390 we're generating code too early. Suppress the warning. */
2391 if (!by_ref)
2392 TREE_NO_WARNING (var) = 1;
2393 break;
2395 case OMP_CLAUSE_LASTPRIVATE:
2396 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2397 break;
2398 /* FALLTHRU */
2400 case OMP_CLAUSE_PRIVATE:
2401 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2402 x = build_outer_var_ref (var, ctx);
2403 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2405 if (is_task_ctx (ctx))
2406 x = build_receiver_ref (var, false, ctx);
2407 else
2408 x = build_outer_var_ref (var, ctx);
2410 else
2411 x = NULL;
2412 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2413 if (x)
2414 gimplify_and_add (x, ilist);
2415 /* FALLTHRU */
2417 do_dtor:
2418 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2419 if (x)
2421 gimple_seq tseq = NULL;
2423 dtor = x;
2424 gimplify_stmt (&dtor, &tseq);
2425 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2427 break;
2429 case OMP_CLAUSE_FIRSTPRIVATE:
2430 if (is_task_ctx (ctx))
2432 if (is_reference (var) || is_variable_sized (var))
2433 goto do_dtor;
2434 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2435 ctx))
2436 || use_pointer_for_field (var, NULL))
2438 x = build_receiver_ref (var, false, ctx);
2439 SET_DECL_VALUE_EXPR (new_var, x);
2440 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2441 goto do_dtor;
2444 x = build_outer_var_ref (var, ctx);
2445 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2446 gimplify_and_add (x, ilist);
2447 goto do_dtor;
2448 break;
2450 case OMP_CLAUSE_COPYIN:
2451 by_ref = use_pointer_for_field (var, NULL);
2452 x = build_receiver_ref (var, by_ref, ctx);
2453 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2454 append_to_statement_list (x, &copyin_seq);
2455 copyin_by_ref |= by_ref;
2456 break;
2458 case OMP_CLAUSE_REDUCTION:
2459 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2461 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2462 x = build_outer_var_ref (var, ctx);
2464 if (is_reference (var))
2465 x = build_fold_addr_expr_loc (clause_loc, x);
2466 SET_DECL_VALUE_EXPR (placeholder, x);
2467 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2468 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2469 gimple_seq_add_seq (ilist,
2470 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2472 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2474 else
2476 x = omp_reduction_init (c, TREE_TYPE (new_var));
2477 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2478 gimplify_assign (new_var, x, ilist);
2480 break;
2482 default:
2483 gcc_unreachable ();
2488 /* The copyin sequence is not to be executed by the main thread, since
2489 that would result in self-copies. Perhaps not visible to scalars,
2490 but it certainly is to C++ operator=. */
2491 if (copyin_seq)
2493 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2494 x = build2 (NE_EXPR, boolean_type_node, x,
2495 build_int_cst (TREE_TYPE (x), 0));
2496 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2497 gimplify_and_add (x, ilist);
2500 /* If any copyin variable is passed by reference, we must ensure the
2501 master thread doesn't modify it before it is copied over in all
2502 threads. Similarly for variables in both firstprivate and
2503 lastprivate clauses we need to ensure the lastprivate copying
2504 happens after firstprivate copying in all threads. */
2505 if (copyin_by_ref || lastprivate_firstprivate)
2506 gimplify_and_add (build_omp_barrier (), ilist);
2510 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2511 both parallel and workshare constructs. PREDICATE may be NULL if it's
2512 always true. */
2514 static void
2515 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2516 omp_context *ctx)
2518 tree x, c, label = NULL;
2519 bool par_clauses = false;
2521 /* Early exit if there are no lastprivate clauses. */
2522 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2523 if (clauses == NULL)
2525 /* If this was a workshare clause, see if it had been combined
2526 with its parallel. In that case, look for the clauses on the
2527 parallel statement itself. */
2528 if (is_parallel_ctx (ctx))
2529 return;
2531 ctx = ctx->outer;
2532 if (ctx == NULL || !is_parallel_ctx (ctx))
2533 return;
2535 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2536 OMP_CLAUSE_LASTPRIVATE);
2537 if (clauses == NULL)
2538 return;
2539 par_clauses = true;
2542 if (predicate)
2544 gimple stmt;
2545 tree label_true, arm1, arm2;
2547 label = create_artificial_label (UNKNOWN_LOCATION);
2548 label_true = create_artificial_label (UNKNOWN_LOCATION);
2549 arm1 = TREE_OPERAND (predicate, 0);
2550 arm2 = TREE_OPERAND (predicate, 1);
2551 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2552 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2553 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2554 label_true, label);
2555 gimple_seq_add_stmt (stmt_list, stmt);
2556 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2559 for (c = clauses; c ;)
2561 tree var, new_var;
2562 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2564 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2566 var = OMP_CLAUSE_DECL (c);
2567 new_var = lookup_decl (var, ctx);
2569 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2571 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2572 gimple_seq_add_seq (stmt_list,
2573 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2575 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2577 x = build_outer_var_ref (var, ctx);
2578 if (is_reference (var))
2579 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2580 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2581 gimplify_and_add (x, stmt_list);
2583 c = OMP_CLAUSE_CHAIN (c);
2584 if (c == NULL && !par_clauses)
2586 /* If this was a workshare clause, see if it had been combined
2587 with its parallel. In that case, continue looking for the
2588 clauses also on the parallel statement itself. */
2589 if (is_parallel_ctx (ctx))
2590 break;
2592 ctx = ctx->outer;
2593 if (ctx == NULL || !is_parallel_ctx (ctx))
2594 break;
2596 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2597 OMP_CLAUSE_LASTPRIVATE);
2598 par_clauses = true;
2602 if (label)
2603 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2607 /* Generate code to implement the REDUCTION clauses. */
2609 static void
2610 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2612 gimple_seq sub_seq = NULL;
2613 gimple stmt;
2614 tree x, c;
2615 int count = 0;
2617 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2618 update in that case, otherwise use a lock. */
2619 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2620 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2622 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2624 /* Never use OMP_ATOMIC for array reductions. */
2625 count = -1;
2626 break;
2628 count++;
2631 if (count == 0)
2632 return;
2634 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2636 tree var, ref, new_var;
2637 enum tree_code code;
2638 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2640 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2641 continue;
2643 var = OMP_CLAUSE_DECL (c);
2644 new_var = lookup_decl (var, ctx);
2645 if (is_reference (var))
2646 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2647 ref = build_outer_var_ref (var, ctx);
2648 code = OMP_CLAUSE_REDUCTION_CODE (c);
2650 /* reduction(-:var) sums up the partial results, so it acts
2651 identically to reduction(+:var). */
2652 if (code == MINUS_EXPR)
2653 code = PLUS_EXPR;
2655 if (count == 1)
2657 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2659 addr = save_expr (addr);
2660 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2661 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2662 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2663 gimplify_and_add (x, stmt_seqp);
2664 return;
2667 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2669 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2671 if (is_reference (var))
2672 ref = build_fold_addr_expr_loc (clause_loc, ref);
2673 SET_DECL_VALUE_EXPR (placeholder, ref);
2674 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2675 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2676 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2677 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2678 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2680 else
2682 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2683 ref = build_outer_var_ref (var, ctx);
2684 gimplify_assign (ref, x, &sub_seq);
2688 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2689 gimple_seq_add_stmt (stmt_seqp, stmt);
2691 gimple_seq_add_seq (stmt_seqp, sub_seq);
2693 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2694 gimple_seq_add_stmt (stmt_seqp, stmt);
2698 /* Generate code to implement the COPYPRIVATE clauses. */
2700 static void
2701 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2702 omp_context *ctx)
2704 tree c;
2706 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2708 tree var, new_var, ref, x;
2709 bool by_ref;
2710 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2712 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2713 continue;
2715 var = OMP_CLAUSE_DECL (c);
2716 by_ref = use_pointer_for_field (var, NULL);
2718 ref = build_sender_ref (var, ctx);
2719 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2720 if (by_ref)
2722 x = build_fold_addr_expr_loc (clause_loc, new_var);
2723 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2725 gimplify_assign (ref, x, slist);
2727 ref = build_receiver_ref (var, false, ctx);
2728 if (by_ref)
2730 ref = fold_convert_loc (clause_loc,
2731 build_pointer_type (TREE_TYPE (new_var)),
2732 ref);
2733 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2735 if (is_reference (var))
2737 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2738 ref = build_simple_mem_ref_loc (clause_loc, ref);
2739 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2741 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2742 gimplify_and_add (x, rlist);
2747 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2748 and REDUCTION from the sender (aka parent) side. */
2750 static void
2751 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2752 omp_context *ctx)
2754 tree c;
2756 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2758 tree val, ref, x, var;
2759 bool by_ref, do_in = false, do_out = false;
2760 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2762 switch (OMP_CLAUSE_CODE (c))
2764 case OMP_CLAUSE_PRIVATE:
2765 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2766 break;
2767 continue;
2768 case OMP_CLAUSE_FIRSTPRIVATE:
2769 case OMP_CLAUSE_COPYIN:
2770 case OMP_CLAUSE_LASTPRIVATE:
2771 case OMP_CLAUSE_REDUCTION:
2772 break;
2773 default:
2774 continue;
2777 val = OMP_CLAUSE_DECL (c);
2778 var = lookup_decl_in_outer_ctx (val, ctx);
2780 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2781 && is_global_var (var))
2782 continue;
2783 if (is_variable_sized (val))
2784 continue;
2785 by_ref = use_pointer_for_field (val, NULL);
2787 switch (OMP_CLAUSE_CODE (c))
2789 case OMP_CLAUSE_PRIVATE:
2790 case OMP_CLAUSE_FIRSTPRIVATE:
2791 case OMP_CLAUSE_COPYIN:
2792 do_in = true;
2793 break;
2795 case OMP_CLAUSE_LASTPRIVATE:
2796 if (by_ref || is_reference (val))
2798 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2799 continue;
2800 do_in = true;
2802 else
2804 do_out = true;
2805 if (lang_hooks.decls.omp_private_outer_ref (val))
2806 do_in = true;
2808 break;
2810 case OMP_CLAUSE_REDUCTION:
2811 do_in = true;
2812 do_out = !(by_ref || is_reference (val));
2813 break;
2815 default:
2816 gcc_unreachable ();
2819 if (do_in)
2821 ref = build_sender_ref (val, ctx);
2822 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2823 gimplify_assign (ref, x, ilist);
2824 if (is_task_ctx (ctx))
2825 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2828 if (do_out)
2830 ref = build_sender_ref (val, ctx);
2831 gimplify_assign (var, ref, olist);
2836 /* Generate code to implement SHARED from the sender (aka parent)
2837 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2838 list things that got automatically shared. */
2840 static void
2841 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2843 tree var, ovar, nvar, f, x, record_type;
2845 if (ctx->record_type == NULL)
2846 return;
2848 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2849 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2851 ovar = DECL_ABSTRACT_ORIGIN (f);
2852 nvar = maybe_lookup_decl (ovar, ctx);
2853 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2854 continue;
2856 /* If CTX is a nested parallel directive. Find the immediately
2857 enclosing parallel or workshare construct that contains a
2858 mapping for OVAR. */
2859 var = lookup_decl_in_outer_ctx (ovar, ctx);
2861 if (use_pointer_for_field (ovar, ctx))
2863 x = build_sender_ref (ovar, ctx);
2864 var = build_fold_addr_expr (var);
2865 gimplify_assign (x, var, ilist);
2867 else
2869 x = build_sender_ref (ovar, ctx);
2870 gimplify_assign (x, var, ilist);
2872 if (!TREE_READONLY (var)
2873 /* We don't need to receive a new reference to a result
2874 or parm decl. In fact we may not store to it as we will
2875 invalidate any pending RSO and generate wrong gimple
2876 during inlining. */
2877 && !((TREE_CODE (var) == RESULT_DECL
2878 || TREE_CODE (var) == PARM_DECL)
2879 && DECL_BY_REFERENCE (var)))
2881 x = build_sender_ref (ovar, ctx);
2882 gimplify_assign (var, x, olist);
2889 /* A convenience function to build an empty GIMPLE_COND with just the
2890 condition. */
2892 static gimple
2893 gimple_build_cond_empty (tree cond)
2895 enum tree_code pred_code;
2896 tree lhs, rhs;
2898 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2899 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2903 /* Build the function calls to GOMP_parallel_start etc to actually
2904 generate the parallel operation. REGION is the parallel region
2905 being expanded. BB is the block where to insert the code. WS_ARGS
2906 will be set if this is a call to a combined parallel+workshare
2907 construct, it contains the list of additional arguments needed by
2908 the workshare construct. */
2910 static void
2911 expand_parallel_call (struct omp_region *region, basic_block bb,
2912 gimple entry_stmt, VEC(tree,gc) *ws_args)
2914 tree t, t1, t2, val, cond, c, clauses;
2915 gimple_stmt_iterator gsi;
2916 gimple stmt;
2917 int start_ix;
2918 location_t clause_loc;
2919 VEC(tree,gc) *args;
2921 clauses = gimple_omp_parallel_clauses (entry_stmt);
2923 /* Determine what flavor of GOMP_parallel_start we will be
2924 emitting. */
2925 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2926 if (is_combined_parallel (region))
2928 switch (region->inner->type)
2930 case GIMPLE_OMP_FOR:
2931 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2932 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2933 + (region->inner->sched_kind
2934 == OMP_CLAUSE_SCHEDULE_RUNTIME
2935 ? 3 : region->inner->sched_kind);
2936 break;
2937 case GIMPLE_OMP_SECTIONS:
2938 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2939 break;
2940 default:
2941 gcc_unreachable ();
2945 /* By default, the value of NUM_THREADS is zero (selected at run time)
2946 and there is no conditional. */
2947 cond = NULL_TREE;
2948 val = build_int_cst (unsigned_type_node, 0);
2950 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2951 if (c)
2952 cond = OMP_CLAUSE_IF_EXPR (c);
2954 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2955 if (c)
2957 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2958 clause_loc = OMP_CLAUSE_LOCATION (c);
2960 else
2961 clause_loc = gimple_location (entry_stmt);
2963 /* Ensure 'val' is of the correct type. */
2964 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2966 /* If we found the clause 'if (cond)', build either
2967 (cond != 0) or (cond ? val : 1u). */
2968 if (cond)
2970 gimple_stmt_iterator gsi;
2972 cond = gimple_boolify (cond);
2974 if (integer_zerop (val))
2975 val = fold_build2_loc (clause_loc,
2976 EQ_EXPR, unsigned_type_node, cond,
2977 build_int_cst (TREE_TYPE (cond), 0));
2978 else
2980 basic_block cond_bb, then_bb, else_bb;
2981 edge e, e_then, e_else;
2982 tree tmp_then, tmp_else, tmp_join, tmp_var;
2984 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2985 if (gimple_in_ssa_p (cfun))
2987 tmp_then = make_ssa_name (tmp_var, NULL);
2988 tmp_else = make_ssa_name (tmp_var, NULL);
2989 tmp_join = make_ssa_name (tmp_var, NULL);
2991 else
2993 tmp_then = tmp_var;
2994 tmp_else = tmp_var;
2995 tmp_join = tmp_var;
2998 e = split_block (bb, NULL);
2999 cond_bb = e->src;
3000 bb = e->dest;
3001 remove_edge (e);
3003 then_bb = create_empty_bb (cond_bb);
3004 else_bb = create_empty_bb (then_bb);
3005 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3006 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3008 stmt = gimple_build_cond_empty (cond);
3009 gsi = gsi_start_bb (cond_bb);
3010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3012 gsi = gsi_start_bb (then_bb);
3013 stmt = gimple_build_assign (tmp_then, val);
3014 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3016 gsi = gsi_start_bb (else_bb);
3017 stmt = gimple_build_assign
3018 (tmp_else, build_int_cst (unsigned_type_node, 1));
3019 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3021 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3022 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3023 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3024 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3026 if (gimple_in_ssa_p (cfun))
3028 gimple phi = create_phi_node (tmp_join, bb);
3029 SSA_NAME_DEF_STMT (tmp_join) = phi;
3030 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3031 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3034 val = tmp_join;
3037 gsi = gsi_start_bb (bb);
3038 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3039 false, GSI_CONTINUE_LINKING);
3042 gsi = gsi_last_bb (bb);
3043 t = gimple_omp_parallel_data_arg (entry_stmt);
3044 if (t == NULL)
3045 t1 = null_pointer_node;
3046 else
3047 t1 = build_fold_addr_expr (t);
3048 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3050 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3051 VEC_quick_push (tree, args, t2);
3052 VEC_quick_push (tree, args, t1);
3053 VEC_quick_push (tree, args, val);
3054 VEC_splice (tree, args, ws_args);
3056 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3057 built_in_decls[start_ix], args);
3059 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3060 false, GSI_CONTINUE_LINKING);
3062 t = gimple_omp_parallel_data_arg (entry_stmt);
3063 if (t == NULL)
3064 t = null_pointer_node;
3065 else
3066 t = build_fold_addr_expr (t);
3067 t = build_call_expr_loc (gimple_location (entry_stmt),
3068 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3069 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3070 false, GSI_CONTINUE_LINKING);
3072 t = build_call_expr_loc (gimple_location (entry_stmt),
3073 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3074 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3075 false, GSI_CONTINUE_LINKING);
3079 /* Build the function call to GOMP_task to actually
3080 generate the task operation. BB is the block where to insert the code. */
3082 static void
3083 expand_task_call (basic_block bb, gimple entry_stmt)
3085 tree t, t1, t2, t3, flags, cond, c, clauses;
3086 gimple_stmt_iterator gsi;
3087 location_t loc = gimple_location (entry_stmt);
3089 clauses = gimple_omp_task_clauses (entry_stmt);
3091 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3092 if (c)
3093 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3094 else
3095 cond = boolean_true_node;
3097 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3098 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3100 gsi = gsi_last_bb (bb);
3101 t = gimple_omp_task_data_arg (entry_stmt);
3102 if (t == NULL)
3103 t2 = null_pointer_node;
3104 else
3105 t2 = build_fold_addr_expr_loc (loc, t);
3106 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3107 t = gimple_omp_task_copy_fn (entry_stmt);
3108 if (t == NULL)
3109 t3 = null_pointer_node;
3110 else
3111 t3 = build_fold_addr_expr_loc (loc, t);
3113 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3114 gimple_omp_task_arg_size (entry_stmt),
3115 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3117 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3118 false, GSI_CONTINUE_LINKING);
3122 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3123 catch handler and return it. This prevents programs from violating the
3124 structured block semantics with throws. */
3126 static gimple_seq
3127 maybe_catch_exception (gimple_seq body)
3129 gimple g;
3130 tree decl;
3132 if (!flag_exceptions)
3133 return body;
3135 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3136 decl = lang_hooks.eh_protect_cleanup_actions ();
3137 else
3138 decl = built_in_decls[BUILT_IN_TRAP];
3140 g = gimple_build_eh_must_not_throw (decl);
3141 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3142 GIMPLE_TRY_CATCH);
3144 return gimple_seq_alloc_with_stmt (g);
3147 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3149 static tree
3150 vec2chain (VEC(tree,gc) *v)
3152 tree chain = NULL_TREE, t;
3153 unsigned ix;
3155 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3157 DECL_CHAIN (t) = chain;
3158 chain = t;
3161 return chain;
3165 /* Remove barriers in REGION->EXIT's block. Note that this is only
3166 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3167 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3168 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3169 removed. */
3171 static void
3172 remove_exit_barrier (struct omp_region *region)
3174 gimple_stmt_iterator gsi;
3175 basic_block exit_bb;
3176 edge_iterator ei;
3177 edge e;
3178 gimple stmt;
3179 int any_addressable_vars = -1;
3181 exit_bb = region->exit;
3183 /* If the parallel region doesn't return, we don't have REGION->EXIT
3184 block at all. */
3185 if (! exit_bb)
3186 return;
3188 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3189 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3190 statements that can appear in between are extremely limited -- no
3191 memory operations at all. Here, we allow nothing at all, so the
3192 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3193 gsi = gsi_last_bb (exit_bb);
3194 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3195 gsi_prev (&gsi);
3196 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3197 return;
3199 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3201 gsi = gsi_last_bb (e->src);
3202 if (gsi_end_p (gsi))
3203 continue;
3204 stmt = gsi_stmt (gsi);
3205 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3206 && !gimple_omp_return_nowait_p (stmt))
3208 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3209 in many cases. If there could be tasks queued, the barrier
3210 might be needed to let the tasks run before some local
3211 variable of the parallel that the task uses as shared
3212 runs out of scope. The task can be spawned either
3213 from within current function (this would be easy to check)
3214 or from some function it calls and gets passed an address
3215 of such a variable. */
3216 if (any_addressable_vars < 0)
3218 gimple parallel_stmt = last_stmt (region->entry);
3219 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3220 tree local_decls, block, decl;
3221 unsigned ix;
3223 any_addressable_vars = 0;
3224 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3225 if (TREE_ADDRESSABLE (decl))
3227 any_addressable_vars = 1;
3228 break;
3230 for (block = gimple_block (stmt);
3231 !any_addressable_vars
3232 && block
3233 && TREE_CODE (block) == BLOCK;
3234 block = BLOCK_SUPERCONTEXT (block))
3236 for (local_decls = BLOCK_VARS (block);
3237 local_decls;
3238 local_decls = DECL_CHAIN (local_decls))
3239 if (TREE_ADDRESSABLE (local_decls))
3241 any_addressable_vars = 1;
3242 break;
3244 if (block == gimple_block (parallel_stmt))
3245 break;
3248 if (!any_addressable_vars)
3249 gimple_omp_return_set_nowait (stmt);
3254 static void
3255 remove_exit_barriers (struct omp_region *region)
3257 if (region->type == GIMPLE_OMP_PARALLEL)
3258 remove_exit_barrier (region);
3260 if (region->inner)
3262 region = region->inner;
3263 remove_exit_barriers (region);
3264 while (region->next)
3266 region = region->next;
3267 remove_exit_barriers (region);
3272 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3273 calls. These can't be declared as const functions, but
3274 within one parallel body they are constant, so they can be
3275 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3276 which are declared const. Similarly for task body, except
3277 that in untied task omp_get_thread_num () can change at any task
3278 scheduling point. */
3280 static void
3281 optimize_omp_library_calls (gimple entry_stmt)
3283 basic_block bb;
3284 gimple_stmt_iterator gsi;
3285 tree thr_num_id
3286 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3287 tree num_thr_id
3288 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3289 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3290 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3291 OMP_CLAUSE_UNTIED) != NULL);
3293 FOR_EACH_BB (bb)
3294 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3296 gimple call = gsi_stmt (gsi);
3297 tree decl;
3299 if (is_gimple_call (call)
3300 && (decl = gimple_call_fndecl (call))
3301 && DECL_EXTERNAL (decl)
3302 && TREE_PUBLIC (decl)
3303 && DECL_INITIAL (decl) == NULL)
3305 tree built_in;
3307 if (DECL_NAME (decl) == thr_num_id)
3309 /* In #pragma omp task untied omp_get_thread_num () can change
3310 during the execution of the task region. */
3311 if (untied_task)
3312 continue;
3313 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3315 else if (DECL_NAME (decl) == num_thr_id)
3316 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3317 else
3318 continue;
3320 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3321 || gimple_call_num_args (call) != 0)
3322 continue;
3324 if (flag_exceptions && !TREE_NOTHROW (decl))
3325 continue;
3327 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3328 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3329 TREE_TYPE (TREE_TYPE (built_in))))
3330 continue;
3332 gimple_call_set_fndecl (call, built_in);
3337 /* Expand the OpenMP parallel or task directive starting at REGION. */
3339 static void
3340 expand_omp_taskreg (struct omp_region *region)
3342 basic_block entry_bb, exit_bb, new_bb;
3343 struct function *child_cfun;
3344 tree child_fn, block, t;
3345 tree save_current;
3346 gimple_stmt_iterator gsi;
3347 gimple entry_stmt, stmt;
3348 edge e;
3349 VEC(tree,gc) *ws_args;
3351 entry_stmt = last_stmt (region->entry);
3352 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3353 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3354 /* If this function has been already instrumented, make sure
3355 the child function isn't instrumented again. */
3356 child_cfun->after_tree_profile = cfun->after_tree_profile;
3358 entry_bb = region->entry;
3359 exit_bb = region->exit;
3361 if (is_combined_parallel (region))
3362 ws_args = region->ws_args;
3363 else
3364 ws_args = NULL;
3366 if (child_cfun->cfg)
3368 /* Due to inlining, it may happen that we have already outlined
3369 the region, in which case all we need to do is make the
3370 sub-graph unreachable and emit the parallel call. */
3371 edge entry_succ_e, exit_succ_e;
3372 gimple_stmt_iterator gsi;
3374 entry_succ_e = single_succ_edge (entry_bb);
3376 gsi = gsi_last_bb (entry_bb);
3377 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3378 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3379 gsi_remove (&gsi, true);
3381 new_bb = entry_bb;
3382 if (exit_bb)
3384 exit_succ_e = single_succ_edge (exit_bb);
3385 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3387 remove_edge_and_dominated_blocks (entry_succ_e);
3389 else
3391 unsigned srcidx, dstidx, num;
3393 /* If the parallel region needs data sent from the parent
3394 function, then the very first statement (except possible
3395 tree profile counter updates) of the parallel body
3396 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3397 &.OMP_DATA_O is passed as an argument to the child function,
3398 we need to replace it with the argument as seen by the child
3399 function.
3401 In most cases, this will end up being the identity assignment
3402 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3403 a function call that has been inlined, the original PARM_DECL
3404 .OMP_DATA_I may have been converted into a different local
3405 variable. In which case, we need to keep the assignment. */
3406 if (gimple_omp_taskreg_data_arg (entry_stmt))
3408 basic_block entry_succ_bb = single_succ (entry_bb);
3409 gimple_stmt_iterator gsi;
3410 tree arg, narg;
3411 gimple parcopy_stmt = NULL;
3413 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3415 gimple stmt;
3417 gcc_assert (!gsi_end_p (gsi));
3418 stmt = gsi_stmt (gsi);
3419 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3420 continue;
3422 if (gimple_num_ops (stmt) == 2)
3424 tree arg = gimple_assign_rhs1 (stmt);
3426 /* We're ignore the subcode because we're
3427 effectively doing a STRIP_NOPS. */
3429 if (TREE_CODE (arg) == ADDR_EXPR
3430 && TREE_OPERAND (arg, 0)
3431 == gimple_omp_taskreg_data_arg (entry_stmt))
3433 parcopy_stmt = stmt;
3434 break;
3439 gcc_assert (parcopy_stmt != NULL);
3440 arg = DECL_ARGUMENTS (child_fn);
3442 if (!gimple_in_ssa_p (cfun))
3444 if (gimple_assign_lhs (parcopy_stmt) == arg)
3445 gsi_remove (&gsi, true);
3446 else
3448 /* ?? Is setting the subcode really necessary ?? */
3449 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3450 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3453 else
3455 /* If we are in ssa form, we must load the value from the default
3456 definition of the argument. That should not be defined now,
3457 since the argument is not used uninitialized. */
3458 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3459 narg = make_ssa_name (arg, gimple_build_nop ());
3460 set_default_def (arg, narg);
3461 /* ?? Is setting the subcode really necessary ?? */
3462 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3463 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3464 update_stmt (parcopy_stmt);
3468 /* Declare local variables needed in CHILD_CFUN. */
3469 block = DECL_INITIAL (child_fn);
3470 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3471 /* The gimplifier could record temporaries in parallel/task block
3472 rather than in containing function's local_decls chain,
3473 which would mean cgraph missed finalizing them. Do it now. */
3474 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3475 if (TREE_CODE (t) == VAR_DECL
3476 && TREE_STATIC (t)
3477 && !DECL_EXTERNAL (t))
3478 varpool_finalize_decl (t);
3479 DECL_SAVED_TREE (child_fn) = NULL;
3480 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3481 TREE_USED (block) = 1;
3483 /* Reset DECL_CONTEXT on function arguments. */
3484 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3485 DECL_CONTEXT (t) = child_fn;
3487 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3488 so that it can be moved to the child function. */
3489 gsi = gsi_last_bb (entry_bb);
3490 stmt = gsi_stmt (gsi);
3491 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3492 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3493 gsi_remove (&gsi, true);
3494 e = split_block (entry_bb, stmt);
3495 entry_bb = e->dest;
3496 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3498 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3499 if (exit_bb)
3501 gsi = gsi_last_bb (exit_bb);
3502 gcc_assert (!gsi_end_p (gsi)
3503 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3504 stmt = gimple_build_return (NULL);
3505 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3506 gsi_remove (&gsi, true);
3509 /* Move the parallel region into CHILD_CFUN. */
3511 if (gimple_in_ssa_p (cfun))
3513 push_cfun (child_cfun);
3514 init_tree_ssa (child_cfun);
3515 init_ssa_operands ();
3516 cfun->gimple_df->in_ssa_p = true;
3517 pop_cfun ();
3518 block = NULL_TREE;
3520 else
3521 block = gimple_block (entry_stmt);
3523 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3524 if (exit_bb)
3525 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3527 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3528 num = VEC_length (tree, child_cfun->local_decls);
3529 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3531 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3532 if (DECL_CONTEXT (t) == cfun->decl)
3533 continue;
3534 if (srcidx != dstidx)
3535 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3536 dstidx++;
3538 if (dstidx != num)
3539 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3541 /* Inform the callgraph about the new function. */
3542 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3543 = cfun->curr_properties;
3544 cgraph_add_new_function (child_fn, true);
3546 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3547 fixed in a following pass. */
3548 push_cfun (child_cfun);
3549 save_current = current_function_decl;
3550 current_function_decl = child_fn;
3551 if (optimize)
3552 optimize_omp_library_calls (entry_stmt);
3553 rebuild_cgraph_edges ();
3555 /* Some EH regions might become dead, see PR34608. If
3556 pass_cleanup_cfg isn't the first pass to happen with the
3557 new child, these dead EH edges might cause problems.
3558 Clean them up now. */
3559 if (flag_exceptions)
3561 basic_block bb;
3562 bool changed = false;
3564 FOR_EACH_BB (bb)
3565 changed |= gimple_purge_dead_eh_edges (bb);
3566 if (changed)
3567 cleanup_tree_cfg ();
3569 if (gimple_in_ssa_p (cfun))
3570 update_ssa (TODO_update_ssa);
3571 current_function_decl = save_current;
3572 pop_cfun ();
3575 /* Emit a library call to launch the children threads. */
3576 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3577 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3578 else
3579 expand_task_call (new_bb, entry_stmt);
3580 update_ssa (TODO_update_ssa_only_virtuals);
3584 /* A subroutine of expand_omp_for. Generate code for a parallel
3585 loop with any schedule. Given parameters:
3587 for (V = N1; V cond N2; V += STEP) BODY;
3589 where COND is "<" or ">", we generate pseudocode
3591 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3592 if (more) goto L0; else goto L3;
3594 V = istart0;
3595 iend = iend0;
3597 BODY;
3598 V += STEP;
3599 if (V cond iend) goto L1; else goto L2;
3601 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3604 If this is a combined omp parallel loop, instead of the call to
3605 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3607 For collapsed loops, given parameters:
3608 collapse(3)
3609 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3610 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3611 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3612 BODY;
3614 we generate pseudocode
3616 if (cond3 is <)
3617 adj = STEP3 - 1;
3618 else
3619 adj = STEP3 + 1;
3620 count3 = (adj + N32 - N31) / STEP3;
3621 if (cond2 is <)
3622 adj = STEP2 - 1;
3623 else
3624 adj = STEP2 + 1;
3625 count2 = (adj + N22 - N21) / STEP2;
3626 if (cond1 is <)
3627 adj = STEP1 - 1;
3628 else
3629 adj = STEP1 + 1;
3630 count1 = (adj + N12 - N11) / STEP1;
3631 count = count1 * count2 * count3;
3632 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3633 if (more) goto L0; else goto L3;
3635 V = istart0;
3636 T = V;
3637 V3 = N31 + (T % count3) * STEP3;
3638 T = T / count3;
3639 V2 = N21 + (T % count2) * STEP2;
3640 T = T / count2;
3641 V1 = N11 + T * STEP1;
3642 iend = iend0;
3644 BODY;
3645 V += 1;
3646 if (V < iend) goto L10; else goto L2;
3647 L10:
3648 V3 += STEP3;
3649 if (V3 cond3 N32) goto L1; else goto L11;
3650 L11:
3651 V3 = N31;
3652 V2 += STEP2;
3653 if (V2 cond2 N22) goto L1; else goto L12;
3654 L12:
3655 V2 = N21;
3656 V1 += STEP1;
3657 goto L1;
3659 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3664 static void
3665 expand_omp_for_generic (struct omp_region *region,
3666 struct omp_for_data *fd,
3667 enum built_in_function start_fn,
3668 enum built_in_function next_fn)
3670 tree type, istart0, iend0, iend;
3671 tree t, vmain, vback, bias = NULL_TREE;
3672 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3673 basic_block l2_bb = NULL, l3_bb = NULL;
3674 gimple_stmt_iterator gsi;
3675 gimple stmt;
3676 bool in_combined_parallel = is_combined_parallel (region);
3677 bool broken_loop = region->cont == NULL;
3678 edge e, ne;
3679 tree *counts = NULL;
3680 int i;
3682 gcc_assert (!broken_loop || !in_combined_parallel);
3683 gcc_assert (fd->iter_type == long_integer_type_node
3684 || !in_combined_parallel);
3686 type = TREE_TYPE (fd->loop.v);
3687 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3688 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3689 TREE_ADDRESSABLE (istart0) = 1;
3690 TREE_ADDRESSABLE (iend0) = 1;
3691 if (gimple_in_ssa_p (cfun))
3693 add_referenced_var (istart0);
3694 add_referenced_var (iend0);
3697 /* See if we need to bias by LLONG_MIN. */
3698 if (fd->iter_type == long_long_unsigned_type_node
3699 && TREE_CODE (type) == INTEGER_TYPE
3700 && !TYPE_UNSIGNED (type))
3702 tree n1, n2;
3704 if (fd->loop.cond_code == LT_EXPR)
3706 n1 = fd->loop.n1;
3707 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3709 else
3711 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3712 n2 = fd->loop.n1;
3714 if (TREE_CODE (n1) != INTEGER_CST
3715 || TREE_CODE (n2) != INTEGER_CST
3716 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3717 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3720 entry_bb = region->entry;
3721 cont_bb = region->cont;
3722 collapse_bb = NULL;
3723 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3724 gcc_assert (broken_loop
3725 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3726 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3727 l1_bb = single_succ (l0_bb);
3728 if (!broken_loop)
3730 l2_bb = create_empty_bb (cont_bb);
3731 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3732 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3734 else
3735 l2_bb = NULL;
3736 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3737 exit_bb = region->exit;
3739 gsi = gsi_last_bb (entry_bb);
3741 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3742 if (fd->collapse > 1)
3744 /* collapsed loops need work for expansion in SSA form. */
3745 gcc_assert (!gimple_in_ssa_p (cfun));
3746 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3747 for (i = 0; i < fd->collapse; i++)
3749 tree itype = TREE_TYPE (fd->loops[i].v);
3751 if (POINTER_TYPE_P (itype))
3752 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3753 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3754 ? -1 : 1));
3755 t = fold_build2 (PLUS_EXPR, itype,
3756 fold_convert (itype, fd->loops[i].step), t);
3757 t = fold_build2 (PLUS_EXPR, itype, t,
3758 fold_convert (itype, fd->loops[i].n2));
3759 t = fold_build2 (MINUS_EXPR, itype, t,
3760 fold_convert (itype, fd->loops[i].n1));
3761 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3762 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3763 fold_build1 (NEGATE_EXPR, itype, t),
3764 fold_build1 (NEGATE_EXPR, itype,
3765 fold_convert (itype,
3766 fd->loops[i].step)));
3767 else
3768 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3769 fold_convert (itype, fd->loops[i].step));
3770 t = fold_convert (type, t);
3771 if (TREE_CODE (t) == INTEGER_CST)
3772 counts[i] = t;
3773 else
3775 counts[i] = create_tmp_var (type, ".count");
3776 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3777 true, GSI_SAME_STMT);
3778 stmt = gimple_build_assign (counts[i], t);
3779 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3781 if (SSA_VAR_P (fd->loop.n2))
3783 if (i == 0)
3784 t = counts[0];
3785 else
3787 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3788 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3789 true, GSI_SAME_STMT);
3791 stmt = gimple_build_assign (fd->loop.n2, t);
3792 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3796 if (in_combined_parallel)
3798 /* In a combined parallel loop, emit a call to
3799 GOMP_loop_foo_next. */
3800 t = build_call_expr (built_in_decls[next_fn], 2,
3801 build_fold_addr_expr (istart0),
3802 build_fold_addr_expr (iend0));
3804 else
3806 tree t0, t1, t2, t3, t4;
3807 /* If this is not a combined parallel loop, emit a call to
3808 GOMP_loop_foo_start in ENTRY_BB. */
3809 t4 = build_fold_addr_expr (iend0);
3810 t3 = build_fold_addr_expr (istart0);
3811 t2 = fold_convert (fd->iter_type, fd->loop.step);
3812 if (POINTER_TYPE_P (type)
3813 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3815 /* Avoid casting pointers to integer of a different size. */
3816 tree itype
3817 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3818 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3819 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3821 else
3823 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3824 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3826 if (bias)
3828 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3829 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3831 if (fd->iter_type == long_integer_type_node)
3833 if (fd->chunk_size)
3835 t = fold_convert (fd->iter_type, fd->chunk_size);
3836 t = build_call_expr (built_in_decls[start_fn], 6,
3837 t0, t1, t2, t, t3, t4);
3839 else
3840 t = build_call_expr (built_in_decls[start_fn], 5,
3841 t0, t1, t2, t3, t4);
3843 else
3845 tree t5;
3846 tree c_bool_type;
3848 /* The GOMP_loop_ull_*start functions have additional boolean
3849 argument, true for < loops and false for > loops.
3850 In Fortran, the C bool type can be different from
3851 boolean_type_node. */
3852 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3853 t5 = build_int_cst (c_bool_type,
3854 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3855 if (fd->chunk_size)
3857 t = fold_convert (fd->iter_type, fd->chunk_size);
3858 t = build_call_expr (built_in_decls[start_fn], 7,
3859 t5, t0, t1, t2, t, t3, t4);
3861 else
3862 t = build_call_expr (built_in_decls[start_fn], 6,
3863 t5, t0, t1, t2, t3, t4);
3866 if (TREE_TYPE (t) != boolean_type_node)
3867 t = fold_build2 (NE_EXPR, boolean_type_node,
3868 t, build_int_cst (TREE_TYPE (t), 0));
3869 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3870 true, GSI_SAME_STMT);
3871 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3873 /* Remove the GIMPLE_OMP_FOR statement. */
3874 gsi_remove (&gsi, true);
3876 /* Iteration setup for sequential loop goes in L0_BB. */
3877 gsi = gsi_start_bb (l0_bb);
3878 t = istart0;
3879 if (bias)
3880 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3881 if (POINTER_TYPE_P (type))
3882 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3883 0), t);
3884 t = fold_convert (type, t);
3885 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3886 false, GSI_CONTINUE_LINKING);
3887 stmt = gimple_build_assign (fd->loop.v, t);
3888 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3890 t = iend0;
3891 if (bias)
3892 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3893 if (POINTER_TYPE_P (type))
3894 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3895 0), t);
3896 t = fold_convert (type, t);
3897 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3898 false, GSI_CONTINUE_LINKING);
3899 if (fd->collapse > 1)
3901 tree tem = create_tmp_var (type, ".tem");
3903 stmt = gimple_build_assign (tem, fd->loop.v);
3904 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3905 for (i = fd->collapse - 1; i >= 0; i--)
3907 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3908 itype = vtype;
3909 if (POINTER_TYPE_P (vtype))
3910 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3911 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3912 t = fold_convert (itype, t);
3913 t = fold_build2 (MULT_EXPR, itype, t,
3914 fold_convert (itype, fd->loops[i].step));
3915 if (POINTER_TYPE_P (vtype))
3916 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3917 fd->loops[i].n1, fold_convert (sizetype, t));
3918 else
3919 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3920 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3921 false, GSI_CONTINUE_LINKING);
3922 stmt = gimple_build_assign (fd->loops[i].v, t);
3923 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3924 if (i != 0)
3926 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3927 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3928 false, GSI_CONTINUE_LINKING);
3929 stmt = gimple_build_assign (tem, t);
3930 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3935 if (!broken_loop)
3937 /* Code to control the increment and predicate for the sequential
3938 loop goes in the CONT_BB. */
3939 gsi = gsi_last_bb (cont_bb);
3940 stmt = gsi_stmt (gsi);
3941 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3942 vmain = gimple_omp_continue_control_use (stmt);
3943 vback = gimple_omp_continue_control_def (stmt);
3945 if (POINTER_TYPE_P (type))
3946 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3947 fold_convert (sizetype, fd->loop.step));
3948 else
3949 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3950 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3951 true, GSI_SAME_STMT);
3952 stmt = gimple_build_assign (vback, t);
3953 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3955 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3956 stmt = gimple_build_cond_empty (t);
3957 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3959 /* Remove GIMPLE_OMP_CONTINUE. */
3960 gsi_remove (&gsi, true);
3962 if (fd->collapse > 1)
3964 basic_block last_bb, bb;
3966 last_bb = cont_bb;
3967 for (i = fd->collapse - 1; i >= 0; i--)
3969 tree vtype = TREE_TYPE (fd->loops[i].v);
3971 bb = create_empty_bb (last_bb);
3972 gsi = gsi_start_bb (bb);
3974 if (i < fd->collapse - 1)
3976 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3977 e->probability = REG_BR_PROB_BASE / 8;
3979 t = fd->loops[i + 1].n1;
3980 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3981 false, GSI_CONTINUE_LINKING);
3982 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3983 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3985 else
3986 collapse_bb = bb;
3988 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3990 if (POINTER_TYPE_P (vtype))
3991 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3992 fd->loops[i].v,
3993 fold_convert (sizetype, fd->loops[i].step));
3994 else
3995 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3996 fd->loops[i].step);
3997 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3998 false, GSI_CONTINUE_LINKING);
3999 stmt = gimple_build_assign (fd->loops[i].v, t);
4000 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4002 if (i > 0)
4004 t = fd->loops[i].n2;
4005 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4006 false, GSI_CONTINUE_LINKING);
4007 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4008 fd->loops[i].v, t);
4009 stmt = gimple_build_cond_empty (t);
4010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4011 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4012 e->probability = REG_BR_PROB_BASE * 7 / 8;
4014 else
4015 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4016 last_bb = bb;
4020 /* Emit code to get the next parallel iteration in L2_BB. */
4021 gsi = gsi_start_bb (l2_bb);
4023 t = build_call_expr (built_in_decls[next_fn], 2,
4024 build_fold_addr_expr (istart0),
4025 build_fold_addr_expr (iend0));
4026 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4027 false, GSI_CONTINUE_LINKING);
4028 if (TREE_TYPE (t) != boolean_type_node)
4029 t = fold_build2 (NE_EXPR, boolean_type_node,
4030 t, build_int_cst (TREE_TYPE (t), 0));
4031 stmt = gimple_build_cond_empty (t);
4032 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4035 /* Add the loop cleanup function. */
4036 gsi = gsi_last_bb (exit_bb);
4037 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4038 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4039 else
4040 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4041 stmt = gimple_build_call (t, 0);
4042 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4043 gsi_remove (&gsi, true);
4045 /* Connect the new blocks. */
4046 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4047 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4049 if (!broken_loop)
4051 gimple_seq phis;
4053 e = find_edge (cont_bb, l3_bb);
4054 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4056 phis = phi_nodes (l3_bb);
4057 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4059 gimple phi = gsi_stmt (gsi);
4060 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4061 PHI_ARG_DEF_FROM_EDGE (phi, e));
4063 remove_edge (e);
4065 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4066 if (fd->collapse > 1)
4068 e = find_edge (cont_bb, l1_bb);
4069 remove_edge (e);
4070 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4072 else
4074 e = find_edge (cont_bb, l1_bb);
4075 e->flags = EDGE_TRUE_VALUE;
4077 e->probability = REG_BR_PROB_BASE * 7 / 8;
4078 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4079 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4081 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4082 recompute_dominator (CDI_DOMINATORS, l2_bb));
4083 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4084 recompute_dominator (CDI_DOMINATORS, l3_bb));
4085 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4086 recompute_dominator (CDI_DOMINATORS, l0_bb));
4087 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4088 recompute_dominator (CDI_DOMINATORS, l1_bb));
4093 /* A subroutine of expand_omp_for. Generate code for a parallel
4094 loop with static schedule and no specified chunk size. Given
4095 parameters:
4097 for (V = N1; V cond N2; V += STEP) BODY;
4099 where COND is "<" or ">", we generate pseudocode
4101 if (cond is <)
4102 adj = STEP - 1;
4103 else
4104 adj = STEP + 1;
4105 if ((__typeof (V)) -1 > 0 && cond is >)
4106 n = -(adj + N2 - N1) / -STEP;
4107 else
4108 n = (adj + N2 - N1) / STEP;
4109 q = n / nthreads;
4110 q += (q * nthreads != n);
4111 s0 = q * threadid;
4112 e0 = min(s0 + q, n);
4113 V = s0 * STEP + N1;
4114 if (s0 >= e0) goto L2; else goto L0;
4116 e = e0 * STEP + N1;
4118 BODY;
4119 V += STEP;
4120 if (V cond e) goto L1;
4124 static void
4125 expand_omp_for_static_nochunk (struct omp_region *region,
4126 struct omp_for_data *fd)
4128 tree n, q, s0, e0, e, t, nthreads, threadid;
4129 tree type, itype, vmain, vback;
4130 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4131 basic_block fin_bb;
4132 gimple_stmt_iterator gsi;
4133 gimple stmt;
4135 itype = type = TREE_TYPE (fd->loop.v);
4136 if (POINTER_TYPE_P (type))
4137 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4139 entry_bb = region->entry;
4140 cont_bb = region->cont;
4141 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4142 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4143 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4144 body_bb = single_succ (seq_start_bb);
4145 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4146 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4147 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4148 exit_bb = region->exit;
4150 /* Iteration space partitioning goes in ENTRY_BB. */
4151 gsi = gsi_last_bb (entry_bb);
4152 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4154 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4155 t = fold_convert (itype, t);
4156 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4157 true, GSI_SAME_STMT);
4159 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4160 t = fold_convert (itype, t);
4161 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4162 true, GSI_SAME_STMT);
4164 fd->loop.n1
4165 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4166 true, NULL_TREE, true, GSI_SAME_STMT);
4167 fd->loop.n2
4168 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4169 true, NULL_TREE, true, GSI_SAME_STMT);
4170 fd->loop.step
4171 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4172 true, NULL_TREE, true, GSI_SAME_STMT);
4174 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4175 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4176 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4177 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4178 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4179 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4180 fold_build1 (NEGATE_EXPR, itype, t),
4181 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4182 else
4183 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4184 t = fold_convert (itype, t);
4185 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4188 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4190 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4191 t = fold_build2 (NE_EXPR, itype, t, n);
4192 t = fold_build2 (PLUS_EXPR, itype, q, t);
4193 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4195 t = build2 (MULT_EXPR, itype, q, threadid);
4196 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4198 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4199 t = fold_build2 (MIN_EXPR, itype, t, n);
4200 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4202 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4203 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4205 /* Remove the GIMPLE_OMP_FOR statement. */
4206 gsi_remove (&gsi, true);
4208 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4209 gsi = gsi_start_bb (seq_start_bb);
4211 t = fold_convert (itype, s0);
4212 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4213 if (POINTER_TYPE_P (type))
4214 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4215 fold_convert (sizetype, t));
4216 else
4217 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4218 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4219 false, GSI_CONTINUE_LINKING);
4220 stmt = gimple_build_assign (fd->loop.v, t);
4221 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4223 t = fold_convert (itype, e0);
4224 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4225 if (POINTER_TYPE_P (type))
4226 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4227 fold_convert (sizetype, t));
4228 else
4229 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4230 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4231 false, GSI_CONTINUE_LINKING);
4233 /* The code controlling the sequential loop replaces the
4234 GIMPLE_OMP_CONTINUE. */
4235 gsi = gsi_last_bb (cont_bb);
4236 stmt = gsi_stmt (gsi);
4237 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4238 vmain = gimple_omp_continue_control_use (stmt);
4239 vback = gimple_omp_continue_control_def (stmt);
4241 if (POINTER_TYPE_P (type))
4242 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4243 fold_convert (sizetype, fd->loop.step));
4244 else
4245 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4246 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4247 true, GSI_SAME_STMT);
4248 stmt = gimple_build_assign (vback, t);
4249 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4251 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4252 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4254 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4255 gsi_remove (&gsi, true);
4257 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4258 gsi = gsi_last_bb (exit_bb);
4259 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4260 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4261 false, GSI_SAME_STMT);
4262 gsi_remove (&gsi, true);
4264 /* Connect all the blocks. */
4265 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4266 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4268 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4269 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4271 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4272 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4273 recompute_dominator (CDI_DOMINATORS, body_bb));
4274 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4275 recompute_dominator (CDI_DOMINATORS, fin_bb));
4279 /* A subroutine of expand_omp_for. Generate code for a parallel
4280 loop with static schedule and a specified chunk size. Given
4281 parameters:
4283 for (V = N1; V cond N2; V += STEP) BODY;
4285 where COND is "<" or ">", we generate pseudocode
4287 if (cond is <)
4288 adj = STEP - 1;
4289 else
4290 adj = STEP + 1;
4291 if ((__typeof (V)) -1 > 0 && cond is >)
4292 n = -(adj + N2 - N1) / -STEP;
4293 else
4294 n = (adj + N2 - N1) / STEP;
4295 trip = 0;
4296 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4297 here so that V is defined
4298 if the loop is not entered
4300 s0 = (trip * nthreads + threadid) * CHUNK;
4301 e0 = min(s0 + CHUNK, n);
4302 if (s0 < n) goto L1; else goto L4;
4304 V = s0 * STEP + N1;
4305 e = e0 * STEP + N1;
4307 BODY;
4308 V += STEP;
4309 if (V cond e) goto L2; else goto L3;
4311 trip += 1;
4312 goto L0;
4316 static void
4317 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4319 tree n, s0, e0, e, t;
4320 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4321 tree type, itype, v_main, v_back, v_extra;
4322 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4323 basic_block trip_update_bb, cont_bb, fin_bb;
4324 gimple_stmt_iterator si;
4325 gimple stmt;
4326 edge se;
4328 itype = type = TREE_TYPE (fd->loop.v);
4329 if (POINTER_TYPE_P (type))
4330 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4332 entry_bb = region->entry;
4333 se = split_block (entry_bb, last_stmt (entry_bb));
4334 entry_bb = se->src;
4335 iter_part_bb = se->dest;
4336 cont_bb = region->cont;
4337 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4338 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4339 == FALLTHRU_EDGE (cont_bb)->dest);
4340 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4341 body_bb = single_succ (seq_start_bb);
4342 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4343 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4344 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4345 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4346 exit_bb = region->exit;
4348 /* Trip and adjustment setup goes in ENTRY_BB. */
4349 si = gsi_last_bb (entry_bb);
4350 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4352 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4353 t = fold_convert (itype, t);
4354 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4355 true, GSI_SAME_STMT);
4357 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4358 t = fold_convert (itype, t);
4359 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4360 true, GSI_SAME_STMT);
4362 fd->loop.n1
4363 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4364 true, NULL_TREE, true, GSI_SAME_STMT);
4365 fd->loop.n2
4366 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4367 true, NULL_TREE, true, GSI_SAME_STMT);
4368 fd->loop.step
4369 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4370 true, NULL_TREE, true, GSI_SAME_STMT);
4371 fd->chunk_size
4372 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4373 true, NULL_TREE, true, GSI_SAME_STMT);
4375 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4376 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4377 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4378 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4379 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4380 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4381 fold_build1 (NEGATE_EXPR, itype, t),
4382 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4383 else
4384 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4385 t = fold_convert (itype, t);
4386 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4387 true, GSI_SAME_STMT);
4389 trip_var = create_tmp_var (itype, ".trip");
4390 if (gimple_in_ssa_p (cfun))
4392 add_referenced_var (trip_var);
4393 trip_init = make_ssa_name (trip_var, NULL);
4394 trip_main = make_ssa_name (trip_var, NULL);
4395 trip_back = make_ssa_name (trip_var, NULL);
4397 else
4399 trip_init = trip_var;
4400 trip_main = trip_var;
4401 trip_back = trip_var;
4404 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4405 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4407 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4408 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4409 if (POINTER_TYPE_P (type))
4410 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4411 fold_convert (sizetype, t));
4412 else
4413 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4414 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4415 true, GSI_SAME_STMT);
4417 /* Remove the GIMPLE_OMP_FOR. */
4418 gsi_remove (&si, true);
4420 /* Iteration space partitioning goes in ITER_PART_BB. */
4421 si = gsi_last_bb (iter_part_bb);
4423 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4424 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4425 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4426 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4427 false, GSI_CONTINUE_LINKING);
4429 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4430 t = fold_build2 (MIN_EXPR, itype, t, n);
4431 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4432 false, GSI_CONTINUE_LINKING);
4434 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4435 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4437 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4438 si = gsi_start_bb (seq_start_bb);
4440 t = fold_convert (itype, s0);
4441 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4442 if (POINTER_TYPE_P (type))
4443 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4444 fold_convert (sizetype, t));
4445 else
4446 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4447 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4448 false, GSI_CONTINUE_LINKING);
4449 stmt = gimple_build_assign (fd->loop.v, t);
4450 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4452 t = fold_convert (itype, e0);
4453 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4454 if (POINTER_TYPE_P (type))
4455 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4456 fold_convert (sizetype, t));
4457 else
4458 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4459 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4460 false, GSI_CONTINUE_LINKING);
4462 /* The code controlling the sequential loop goes in CONT_BB,
4463 replacing the GIMPLE_OMP_CONTINUE. */
4464 si = gsi_last_bb (cont_bb);
4465 stmt = gsi_stmt (si);
4466 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4467 v_main = gimple_omp_continue_control_use (stmt);
4468 v_back = gimple_omp_continue_control_def (stmt);
4470 if (POINTER_TYPE_P (type))
4471 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4472 fold_convert (sizetype, fd->loop.step));
4473 else
4474 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4475 stmt = gimple_build_assign (v_back, t);
4476 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4478 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4479 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4481 /* Remove GIMPLE_OMP_CONTINUE. */
4482 gsi_remove (&si, true);
4484 /* Trip update code goes into TRIP_UPDATE_BB. */
4485 si = gsi_start_bb (trip_update_bb);
4487 t = build_int_cst (itype, 1);
4488 t = build2 (PLUS_EXPR, itype, trip_main, t);
4489 stmt = gimple_build_assign (trip_back, t);
4490 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4492 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4493 si = gsi_last_bb (exit_bb);
4494 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4495 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4496 false, GSI_SAME_STMT);
4497 gsi_remove (&si, true);
4499 /* Connect the new blocks. */
4500 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4501 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4503 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4504 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4506 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4508 if (gimple_in_ssa_p (cfun))
4510 gimple_stmt_iterator psi;
4511 gimple phi;
4512 edge re, ene;
4513 edge_var_map_vector head;
4514 edge_var_map *vm;
4515 size_t i;
4517 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4518 remove arguments of the phi nodes in fin_bb. We need to create
4519 appropriate phi nodes in iter_part_bb instead. */
4520 se = single_pred_edge (fin_bb);
4521 re = single_succ_edge (trip_update_bb);
4522 head = redirect_edge_var_map_vector (re);
4523 ene = single_succ_edge (entry_bb);
4525 psi = gsi_start_phis (fin_bb);
4526 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4527 gsi_next (&psi), ++i)
4529 gimple nphi;
4530 source_location locus;
4532 phi = gsi_stmt (psi);
4533 t = gimple_phi_result (phi);
4534 gcc_assert (t == redirect_edge_var_map_result (vm));
4535 nphi = create_phi_node (t, iter_part_bb);
4536 SSA_NAME_DEF_STMT (t) = nphi;
4538 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4539 locus = gimple_phi_arg_location_from_edge (phi, se);
4541 /* A special case -- fd->loop.v is not yet computed in
4542 iter_part_bb, we need to use v_extra instead. */
4543 if (t == fd->loop.v)
4544 t = v_extra;
4545 add_phi_arg (nphi, t, ene, locus);
4546 locus = redirect_edge_var_map_location (vm);
4547 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4549 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4550 redirect_edge_var_map_clear (re);
4551 while (1)
4553 psi = gsi_start_phis (fin_bb);
4554 if (gsi_end_p (psi))
4555 break;
4556 remove_phi_node (&psi, false);
4559 /* Make phi node for trip. */
4560 phi = create_phi_node (trip_main, iter_part_bb);
4561 SSA_NAME_DEF_STMT (trip_main) = phi;
4562 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4563 UNKNOWN_LOCATION);
4564 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4565 UNKNOWN_LOCATION);
4568 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4569 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4570 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4571 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4572 recompute_dominator (CDI_DOMINATORS, fin_bb));
4573 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4574 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4575 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4576 recompute_dominator (CDI_DOMINATORS, body_bb));
4580 /* Expand the OpenMP loop defined by REGION. */
4582 static void
4583 expand_omp_for (struct omp_region *region)
4585 struct omp_for_data fd;
4586 struct omp_for_data_loop *loops;
4588 loops
4589 = (struct omp_for_data_loop *)
4590 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4591 * sizeof (struct omp_for_data_loop));
4592 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4593 region->sched_kind = fd.sched_kind;
4595 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4596 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4597 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4598 if (region->cont)
4600 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4601 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4602 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4605 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4606 && !fd.have_ordered
4607 && fd.collapse == 1
4608 && region->cont != NULL)
4610 if (fd.chunk_size == NULL)
4611 expand_omp_for_static_nochunk (region, &fd);
4612 else
4613 expand_omp_for_static_chunk (region, &fd);
4615 else
4617 int fn_index, start_ix, next_ix;
4619 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4620 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4621 ? 3 : fd.sched_kind;
4622 fn_index += fd.have_ordered * 4;
4623 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4624 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4625 if (fd.iter_type == long_long_unsigned_type_node)
4627 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4628 - BUILT_IN_GOMP_LOOP_STATIC_START;
4629 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4630 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4632 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4633 (enum built_in_function) next_ix);
4636 update_ssa (TODO_update_ssa_only_virtuals);
4640 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4642 v = GOMP_sections_start (n);
4644 switch (v)
4646 case 0:
4647 goto L2;
4648 case 1:
4649 section 1;
4650 goto L1;
4651 case 2:
4653 case n:
4655 default:
4656 abort ();
4659 v = GOMP_sections_next ();
4660 goto L0;
4662 reduction;
4664 If this is a combined parallel sections, replace the call to
4665 GOMP_sections_start with call to GOMP_sections_next. */
4667 static void
4668 expand_omp_sections (struct omp_region *region)
4670 tree t, u, vin = NULL, vmain, vnext, l2;
4671 VEC (tree,heap) *label_vec;
4672 unsigned len;
4673 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4674 gimple_stmt_iterator si, switch_si;
4675 gimple sections_stmt, stmt, cont;
4676 edge_iterator ei;
4677 edge e;
4678 struct omp_region *inner;
4679 unsigned i, casei;
4680 bool exit_reachable = region->cont != NULL;
4682 gcc_assert (exit_reachable == (region->exit != NULL));
4683 entry_bb = region->entry;
4684 l0_bb = single_succ (entry_bb);
4685 l1_bb = region->cont;
4686 l2_bb = region->exit;
4687 if (exit_reachable)
4689 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4690 l2 = gimple_block_label (l2_bb);
4691 else
4693 /* This can happen if there are reductions. */
4694 len = EDGE_COUNT (l0_bb->succs);
4695 gcc_assert (len > 0);
4696 e = EDGE_SUCC (l0_bb, len - 1);
4697 si = gsi_last_bb (e->dest);
4698 l2 = NULL_TREE;
4699 if (gsi_end_p (si)
4700 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4701 l2 = gimple_block_label (e->dest);
4702 else
4703 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4705 si = gsi_last_bb (e->dest);
4706 if (gsi_end_p (si)
4707 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4709 l2 = gimple_block_label (e->dest);
4710 break;
4714 default_bb = create_empty_bb (l1_bb->prev_bb);
4716 else
4718 default_bb = create_empty_bb (l0_bb);
4719 l2 = gimple_block_label (default_bb);
4722 /* We will build a switch() with enough cases for all the
4723 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4724 and a default case to abort if something goes wrong. */
4725 len = EDGE_COUNT (l0_bb->succs);
4727 /* Use VEC_quick_push on label_vec throughout, since we know the size
4728 in advance. */
4729 label_vec = VEC_alloc (tree, heap, len);
4731 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4732 GIMPLE_OMP_SECTIONS statement. */
4733 si = gsi_last_bb (entry_bb);
4734 sections_stmt = gsi_stmt (si);
4735 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4736 vin = gimple_omp_sections_control (sections_stmt);
4737 if (!is_combined_parallel (region))
4739 /* If we are not inside a combined parallel+sections region,
4740 call GOMP_sections_start. */
4741 t = build_int_cst (unsigned_type_node,
4742 exit_reachable ? len - 1 : len);
4743 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4744 stmt = gimple_build_call (u, 1, t);
4746 else
4748 /* Otherwise, call GOMP_sections_next. */
4749 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4750 stmt = gimple_build_call (u, 0);
4752 gimple_call_set_lhs (stmt, vin);
4753 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4754 gsi_remove (&si, true);
4756 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4757 L0_BB. */
4758 switch_si = gsi_last_bb (l0_bb);
4759 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4760 if (exit_reachable)
4762 cont = last_stmt (l1_bb);
4763 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4764 vmain = gimple_omp_continue_control_use (cont);
4765 vnext = gimple_omp_continue_control_def (cont);
4767 else
4769 vmain = vin;
4770 vnext = NULL_TREE;
4773 i = 0;
4774 if (exit_reachable)
4776 t = build3 (CASE_LABEL_EXPR, void_type_node,
4777 build_int_cst (unsigned_type_node, 0), NULL, l2);
4778 VEC_quick_push (tree, label_vec, t);
4779 i++;
4782 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4783 for (inner = region->inner, casei = 1;
4784 inner;
4785 inner = inner->next, i++, casei++)
4787 basic_block s_entry_bb, s_exit_bb;
4789 /* Skip optional reduction region. */
4790 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4792 --i;
4793 --casei;
4794 continue;
4797 s_entry_bb = inner->entry;
4798 s_exit_bb = inner->exit;
4800 t = gimple_block_label (s_entry_bb);
4801 u = build_int_cst (unsigned_type_node, casei);
4802 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4803 VEC_quick_push (tree, label_vec, u);
4805 si = gsi_last_bb (s_entry_bb);
4806 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4807 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4808 gsi_remove (&si, true);
4809 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4811 if (s_exit_bb == NULL)
4812 continue;
4814 si = gsi_last_bb (s_exit_bb);
4815 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4816 gsi_remove (&si, true);
4818 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4821 /* Error handling code goes in DEFAULT_BB. */
4822 t = gimple_block_label (default_bb);
4823 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4824 make_edge (l0_bb, default_bb, 0);
4826 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4827 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4828 gsi_remove (&switch_si, true);
4829 VEC_free (tree, heap, label_vec);
4831 si = gsi_start_bb (default_bb);
4832 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4833 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4835 if (exit_reachable)
4837 /* Code to get the next section goes in L1_BB. */
4838 si = gsi_last_bb (l1_bb);
4839 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4841 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4842 gimple_call_set_lhs (stmt, vnext);
4843 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4844 gsi_remove (&si, true);
4846 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4848 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4849 si = gsi_last_bb (l2_bb);
4850 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4851 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4852 else
4853 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4854 stmt = gimple_build_call (t, 0);
4855 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4856 gsi_remove (&si, true);
4859 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4863 /* Expand code for an OpenMP single directive. We've already expanded
4864 much of the code, here we simply place the GOMP_barrier call. */
4866 static void
4867 expand_omp_single (struct omp_region *region)
4869 basic_block entry_bb, exit_bb;
4870 gimple_stmt_iterator si;
4871 bool need_barrier = false;
4873 entry_bb = region->entry;
4874 exit_bb = region->exit;
4876 si = gsi_last_bb (entry_bb);
4877 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4878 be removed. We need to ensure that the thread that entered the single
4879 does not exit before the data is copied out by the other threads. */
4880 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4881 OMP_CLAUSE_COPYPRIVATE))
4882 need_barrier = true;
4883 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4884 gsi_remove (&si, true);
4885 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4887 si = gsi_last_bb (exit_bb);
4888 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4889 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4890 false, GSI_SAME_STMT);
4891 gsi_remove (&si, true);
4892 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4896 /* Generic expansion for OpenMP synchronization directives: master,
4897 ordered and critical. All we need to do here is remove the entry
4898 and exit markers for REGION. */
4900 static void
4901 expand_omp_synch (struct omp_region *region)
4903 basic_block entry_bb, exit_bb;
4904 gimple_stmt_iterator si;
4906 entry_bb = region->entry;
4907 exit_bb = region->exit;
4909 si = gsi_last_bb (entry_bb);
4910 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4911 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4912 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4913 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4914 gsi_remove (&si, true);
4915 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4917 if (exit_bb)
4919 si = gsi_last_bb (exit_bb);
4920 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4921 gsi_remove (&si, true);
4922 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4926 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4927 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4928 size of the data type, and thus usable to find the index of the builtin
4929 decl. Returns false if the expression is not of the proper form. */
4931 static bool
4932 expand_omp_atomic_fetch_op (basic_block load_bb,
4933 tree addr, tree loaded_val,
4934 tree stored_val, int index)
4936 enum built_in_function base;
4937 tree decl, itype, call;
4938 direct_optab optab;
4939 tree rhs;
4940 basic_block store_bb = single_succ (load_bb);
4941 gimple_stmt_iterator gsi;
4942 gimple stmt;
4943 location_t loc;
4945 /* We expect to find the following sequences:
4947 load_bb:
4948 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4950 store_bb:
4951 val = tmp OP something; (or: something OP tmp)
4952 GIMPLE_OMP_STORE (val)
4954 ???FIXME: Allow a more flexible sequence.
4955 Perhaps use data flow to pick the statements.
4959 gsi = gsi_after_labels (store_bb);
4960 stmt = gsi_stmt (gsi);
4961 loc = gimple_location (stmt);
4962 if (!is_gimple_assign (stmt))
4963 return false;
4964 gsi_next (&gsi);
4965 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4966 return false;
4968 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4969 return false;
4971 /* Check for one of the supported fetch-op operations. */
4972 switch (gimple_assign_rhs_code (stmt))
4974 case PLUS_EXPR:
4975 case POINTER_PLUS_EXPR:
4976 base = BUILT_IN_FETCH_AND_ADD_N;
4977 optab = sync_add_optab;
4978 break;
4979 case MINUS_EXPR:
4980 base = BUILT_IN_FETCH_AND_SUB_N;
4981 optab = sync_add_optab;
4982 break;
4983 case BIT_AND_EXPR:
4984 base = BUILT_IN_FETCH_AND_AND_N;
4985 optab = sync_and_optab;
4986 break;
4987 case BIT_IOR_EXPR:
4988 base = BUILT_IN_FETCH_AND_OR_N;
4989 optab = sync_ior_optab;
4990 break;
4991 case BIT_XOR_EXPR:
4992 base = BUILT_IN_FETCH_AND_XOR_N;
4993 optab = sync_xor_optab;
4994 break;
4995 default:
4996 return false;
4998 /* Make sure the expression is of the proper form. */
4999 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5000 rhs = gimple_assign_rhs2 (stmt);
5001 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5002 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5003 rhs = gimple_assign_rhs1 (stmt);
5004 else
5005 return false;
5007 decl = built_in_decls[base + index + 1];
5008 itype = TREE_TYPE (TREE_TYPE (decl));
5010 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5011 return false;
5013 gsi = gsi_last_bb (load_bb);
5014 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5015 call = build_call_expr_loc (loc,
5016 decl, 2, addr,
5017 fold_convert_loc (loc, itype, rhs));
5018 call = fold_convert_loc (loc, void_type_node, call);
5019 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5020 gsi_remove (&gsi, true);
5022 gsi = gsi_last_bb (store_bb);
5023 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5024 gsi_remove (&gsi, true);
5025 gsi = gsi_last_bb (store_bb);
5026 gsi_remove (&gsi, true);
5028 if (gimple_in_ssa_p (cfun))
5029 update_ssa (TODO_update_ssa_no_phi);
5031 return true;
5034 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5036 oldval = *addr;
5037 repeat:
5038 newval = rhs; // with oldval replacing *addr in rhs
5039 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5040 if (oldval != newval)
5041 goto repeat;
5043 INDEX is log2 of the size of the data type, and thus usable to find the
5044 index of the builtin decl. */
5046 static bool
5047 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5048 tree addr, tree loaded_val, tree stored_val,
5049 int index)
5051 tree loadedi, storedi, initial, new_storedi, old_vali;
5052 tree type, itype, cmpxchg, iaddr;
5053 gimple_stmt_iterator si;
5054 basic_block loop_header = single_succ (load_bb);
5055 gimple phi, stmt;
5056 edge e;
5058 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5059 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5060 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5062 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5063 == CODE_FOR_nothing)
5064 return false;
5066 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5067 si = gsi_last_bb (load_bb);
5068 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5070 /* For floating-point values, we'll need to view-convert them to integers
5071 so that we can perform the atomic compare and swap. Simplify the
5072 following code by always setting up the "i"ntegral variables. */
5073 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5075 tree iaddr_val;
5077 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5078 true), NULL);
5079 iaddr_val
5080 = force_gimple_operand_gsi (&si,
5081 fold_convert (TREE_TYPE (iaddr), addr),
5082 false, NULL_TREE, true, GSI_SAME_STMT);
5083 stmt = gimple_build_assign (iaddr, iaddr_val);
5084 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5085 loadedi = create_tmp_var (itype, NULL);
5086 if (gimple_in_ssa_p (cfun))
5088 add_referenced_var (iaddr);
5089 add_referenced_var (loadedi);
5090 loadedi = make_ssa_name (loadedi, NULL);
5093 else
5095 iaddr = addr;
5096 loadedi = loaded_val;
5099 initial
5100 = force_gimple_operand_gsi (&si,
5101 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5102 iaddr,
5103 build_int_cst (TREE_TYPE (iaddr), 0)),
5104 true, NULL_TREE, true, GSI_SAME_STMT);
5106 /* Move the value to the LOADEDI temporary. */
5107 if (gimple_in_ssa_p (cfun))
5109 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5110 phi = create_phi_node (loadedi, loop_header);
5111 SSA_NAME_DEF_STMT (loadedi) = phi;
5112 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5113 initial);
5115 else
5116 gsi_insert_before (&si,
5117 gimple_build_assign (loadedi, initial),
5118 GSI_SAME_STMT);
5119 if (loadedi != loaded_val)
5121 gimple_stmt_iterator gsi2;
5122 tree x;
5124 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5125 gsi2 = gsi_start_bb (loop_header);
5126 if (gimple_in_ssa_p (cfun))
5128 gimple stmt;
5129 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5130 true, GSI_SAME_STMT);
5131 stmt = gimple_build_assign (loaded_val, x);
5132 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5134 else
5136 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5137 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5138 true, GSI_SAME_STMT);
5141 gsi_remove (&si, true);
5143 si = gsi_last_bb (store_bb);
5144 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5146 if (iaddr == addr)
5147 storedi = stored_val;
5148 else
5149 storedi =
5150 force_gimple_operand_gsi (&si,
5151 build1 (VIEW_CONVERT_EXPR, itype,
5152 stored_val), true, NULL_TREE, true,
5153 GSI_SAME_STMT);
5155 /* Build the compare&swap statement. */
5156 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5157 new_storedi = force_gimple_operand_gsi (&si,
5158 fold_convert (TREE_TYPE (loadedi),
5159 new_storedi),
5160 true, NULL_TREE,
5161 true, GSI_SAME_STMT);
5163 if (gimple_in_ssa_p (cfun))
5164 old_vali = loadedi;
5165 else
5167 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5168 if (gimple_in_ssa_p (cfun))
5169 add_referenced_var (old_vali);
5170 stmt = gimple_build_assign (old_vali, loadedi);
5171 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5173 stmt = gimple_build_assign (loadedi, new_storedi);
5174 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5177 /* Note that we always perform the comparison as an integer, even for
5178 floating point. This allows the atomic operation to properly
5179 succeed even with NaNs and -0.0. */
5180 stmt = gimple_build_cond_empty
5181 (build2 (NE_EXPR, boolean_type_node,
5182 new_storedi, old_vali));
5183 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5185 /* Update cfg. */
5186 e = single_succ_edge (store_bb);
5187 e->flags &= ~EDGE_FALLTHRU;
5188 e->flags |= EDGE_FALSE_VALUE;
5190 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5192 /* Copy the new value to loadedi (we already did that before the condition
5193 if we are not in SSA). */
5194 if (gimple_in_ssa_p (cfun))
5196 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5197 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5200 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5201 gsi_remove (&si, true);
5203 if (gimple_in_ssa_p (cfun))
5204 update_ssa (TODO_update_ssa_no_phi);
5206 return true;
5209 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5211 GOMP_atomic_start ();
5212 *addr = rhs;
5213 GOMP_atomic_end ();
5215 The result is not globally atomic, but works so long as all parallel
5216 references are within #pragma omp atomic directives. According to
5217 responses received from omp@openmp.org, appears to be within spec.
5218 Which makes sense, since that's how several other compilers handle
5219 this situation as well.
5220 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5221 expanding. STORED_VAL is the operand of the matching
5222 GIMPLE_OMP_ATOMIC_STORE.
5224 We replace
5225 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5226 loaded_val = *addr;
5228 and replace
5229 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5230 *addr = stored_val;
5233 static bool
5234 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5235 tree addr, tree loaded_val, tree stored_val)
5237 gimple_stmt_iterator si;
5238 gimple stmt;
5239 tree t;
5241 si = gsi_last_bb (load_bb);
5242 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5244 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5245 t = build_call_expr (t, 0);
5246 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5248 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5249 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5250 gsi_remove (&si, true);
5252 si = gsi_last_bb (store_bb);
5253 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5255 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5256 stored_val);
5257 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5259 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5260 t = build_call_expr (t, 0);
5261 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5262 gsi_remove (&si, true);
5264 if (gimple_in_ssa_p (cfun))
5265 update_ssa (TODO_update_ssa_no_phi);
5266 return true;
5269 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5270 using expand_omp_atomic_fetch_op. If it failed, we try to
5271 call expand_omp_atomic_pipeline, and if it fails too, the
5272 ultimate fallback is wrapping the operation in a mutex
5273 (expand_omp_atomic_mutex). REGION is the atomic region built
5274 by build_omp_regions_1(). */
5276 static void
5277 expand_omp_atomic (struct omp_region *region)
5279 basic_block load_bb = region->entry, store_bb = region->exit;
5280 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5281 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5282 tree addr = gimple_omp_atomic_load_rhs (load);
5283 tree stored_val = gimple_omp_atomic_store_val (store);
5284 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5285 HOST_WIDE_INT index;
5287 /* Make sure the type is one of the supported sizes. */
5288 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5289 index = exact_log2 (index);
5290 if (index >= 0 && index <= 4)
5292 unsigned int align = TYPE_ALIGN_UNIT (type);
5294 /* __sync builtins require strict data alignment. */
5295 if (exact_log2 (align) >= index)
5297 /* When possible, use specialized atomic update functions. */
5298 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5299 && store_bb == single_succ (load_bb))
5301 if (expand_omp_atomic_fetch_op (load_bb, addr,
5302 loaded_val, stored_val, index))
5303 return;
5306 /* If we don't have specialized __sync builtins, try and implement
5307 as a compare and swap loop. */
5308 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5309 loaded_val, stored_val, index))
5310 return;
5314 /* The ultimate fallback is wrapping the operation in a mutex. */
5315 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5319 /* Expand the parallel region tree rooted at REGION. Expansion
5320 proceeds in depth-first order. Innermost regions are expanded
5321 first. This way, parallel regions that require a new function to
5322 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5323 internal dependencies in their body. */
5325 static void
5326 expand_omp (struct omp_region *region)
5328 while (region)
5330 location_t saved_location;
5332 /* First, determine whether this is a combined parallel+workshare
5333 region. */
5334 if (region->type == GIMPLE_OMP_PARALLEL)
5335 determine_parallel_type (region);
5337 if (region->inner)
5338 expand_omp (region->inner);
5340 saved_location = input_location;
5341 if (gimple_has_location (last_stmt (region->entry)))
5342 input_location = gimple_location (last_stmt (region->entry));
5344 switch (region->type)
5346 case GIMPLE_OMP_PARALLEL:
5347 case GIMPLE_OMP_TASK:
5348 expand_omp_taskreg (region);
5349 break;
5351 case GIMPLE_OMP_FOR:
5352 expand_omp_for (region);
5353 break;
5355 case GIMPLE_OMP_SECTIONS:
5356 expand_omp_sections (region);
5357 break;
5359 case GIMPLE_OMP_SECTION:
5360 /* Individual omp sections are handled together with their
5361 parent GIMPLE_OMP_SECTIONS region. */
5362 break;
5364 case GIMPLE_OMP_SINGLE:
5365 expand_omp_single (region);
5366 break;
5368 case GIMPLE_OMP_MASTER:
5369 case GIMPLE_OMP_ORDERED:
5370 case GIMPLE_OMP_CRITICAL:
5371 expand_omp_synch (region);
5372 break;
5374 case GIMPLE_OMP_ATOMIC_LOAD:
5375 expand_omp_atomic (region);
5376 break;
5378 default:
5379 gcc_unreachable ();
5382 input_location = saved_location;
5383 region = region->next;
5388 /* Helper for build_omp_regions. Scan the dominator tree starting at
5389 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5390 true, the function ends once a single tree is built (otherwise, whole
5391 forest of OMP constructs may be built). */
5393 static void
5394 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5395 bool single_tree)
5397 gimple_stmt_iterator gsi;
5398 gimple stmt;
5399 basic_block son;
5401 gsi = gsi_last_bb (bb);
5402 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5404 struct omp_region *region;
5405 enum gimple_code code;
5407 stmt = gsi_stmt (gsi);
5408 code = gimple_code (stmt);
5409 if (code == GIMPLE_OMP_RETURN)
5411 /* STMT is the return point out of region PARENT. Mark it
5412 as the exit point and make PARENT the immediately
5413 enclosing region. */
5414 gcc_assert (parent);
5415 region = parent;
5416 region->exit = bb;
5417 parent = parent->outer;
5419 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5421 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5422 GIMPLE_OMP_RETURN, but matches with
5423 GIMPLE_OMP_ATOMIC_LOAD. */
5424 gcc_assert (parent);
5425 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5426 region = parent;
5427 region->exit = bb;
5428 parent = parent->outer;
5431 else if (code == GIMPLE_OMP_CONTINUE)
5433 gcc_assert (parent);
5434 parent->cont = bb;
5436 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5438 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5439 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5442 else
5444 /* Otherwise, this directive becomes the parent for a new
5445 region. */
5446 region = new_omp_region (bb, code, parent);
5447 parent = region;
5451 if (single_tree && !parent)
5452 return;
5454 for (son = first_dom_son (CDI_DOMINATORS, bb);
5455 son;
5456 son = next_dom_son (CDI_DOMINATORS, son))
5457 build_omp_regions_1 (son, parent, single_tree);
5460 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5461 root_omp_region. */
5463 static void
5464 build_omp_regions_root (basic_block root)
5466 gcc_assert (root_omp_region == NULL);
5467 build_omp_regions_1 (root, NULL, true);
5468 gcc_assert (root_omp_region != NULL);
5471 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5473 void
5474 omp_expand_local (basic_block head)
5476 build_omp_regions_root (head);
5477 if (dump_file && (dump_flags & TDF_DETAILS))
5479 fprintf (dump_file, "\nOMP region tree\n\n");
5480 dump_omp_region (dump_file, root_omp_region, 0);
5481 fprintf (dump_file, "\n");
5484 remove_exit_barriers (root_omp_region);
5485 expand_omp (root_omp_region);
5487 free_omp_regions ();
5490 /* Scan the CFG and build a tree of OMP regions. Return the root of
5491 the OMP region tree. */
5493 static void
5494 build_omp_regions (void)
5496 gcc_assert (root_omp_region == NULL);
5497 calculate_dominance_info (CDI_DOMINATORS);
5498 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5501 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5503 static unsigned int
5504 execute_expand_omp (void)
5506 build_omp_regions ();
5508 if (!root_omp_region)
5509 return 0;
5511 if (dump_file)
5513 fprintf (dump_file, "\nOMP region tree\n\n");
5514 dump_omp_region (dump_file, root_omp_region, 0);
5515 fprintf (dump_file, "\n");
5518 remove_exit_barriers (root_omp_region);
5520 expand_omp (root_omp_region);
5522 cleanup_tree_cfg ();
5524 free_omp_regions ();
5526 return 0;
5529 /* OMP expansion -- the default pass, run before creation of SSA form. */
5531 static bool
5532 gate_expand_omp (void)
5534 return (flag_openmp != 0 && !seen_error ());
5537 struct gimple_opt_pass pass_expand_omp =
5540 GIMPLE_PASS,
5541 "ompexp", /* name */
5542 gate_expand_omp, /* gate */
5543 execute_expand_omp, /* execute */
5544 NULL, /* sub */
5545 NULL, /* next */
5546 0, /* static_pass_number */
5547 TV_NONE, /* tv_id */
5548 PROP_gimple_any, /* properties_required */
5549 0, /* properties_provided */
5550 0, /* properties_destroyed */
5551 0, /* todo_flags_start */
5552 TODO_dump_func /* todo_flags_finish */
5556 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5558 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5559 CTX is the enclosing OMP context for the current statement. */
5561 static void
5562 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5564 tree block, control;
5565 gimple_stmt_iterator tgsi;
5566 unsigned i, len;
5567 gimple stmt, new_stmt, bind, t;
5568 gimple_seq ilist, dlist, olist, new_body, body;
5569 struct gimplify_ctx gctx;
5571 stmt = gsi_stmt (*gsi_p);
5573 push_gimplify_context (&gctx);
5575 dlist = NULL;
5576 ilist = NULL;
5577 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5578 &ilist, &dlist, ctx);
5580 tgsi = gsi_start (gimple_omp_body (stmt));
5581 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5582 continue;
5584 tgsi = gsi_start (gimple_omp_body (stmt));
5585 body = NULL;
5586 for (i = 0; i < len; i++, gsi_next (&tgsi))
5588 omp_context *sctx;
5589 gimple sec_start;
5591 sec_start = gsi_stmt (tgsi);
5592 sctx = maybe_lookup_ctx (sec_start);
5593 gcc_assert (sctx);
5595 gimple_seq_add_stmt (&body, sec_start);
5597 lower_omp (gimple_omp_body (sec_start), sctx);
5598 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5599 gimple_omp_set_body (sec_start, NULL);
5601 if (i == len - 1)
5603 gimple_seq l = NULL;
5604 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5605 &l, ctx);
5606 gimple_seq_add_seq (&body, l);
5607 gimple_omp_section_set_last (sec_start);
5610 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5613 block = make_node (BLOCK);
5614 bind = gimple_build_bind (NULL, body, block);
5616 olist = NULL;
5617 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5619 block = make_node (BLOCK);
5620 new_stmt = gimple_build_bind (NULL, NULL, block);
5622 pop_gimplify_context (new_stmt);
5623 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5624 BLOCK_VARS (block) = gimple_bind_vars (bind);
5625 if (BLOCK_VARS (block))
5626 TREE_USED (block) = 1;
5628 new_body = NULL;
5629 gimple_seq_add_seq (&new_body, ilist);
5630 gimple_seq_add_stmt (&new_body, stmt);
5631 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5632 gimple_seq_add_stmt (&new_body, bind);
5634 control = create_tmp_var (unsigned_type_node, ".section");
5635 t = gimple_build_omp_continue (control, control);
5636 gimple_omp_sections_set_control (stmt, control);
5637 gimple_seq_add_stmt (&new_body, t);
5639 gimple_seq_add_seq (&new_body, olist);
5640 gimple_seq_add_seq (&new_body, dlist);
5642 new_body = maybe_catch_exception (new_body);
5644 t = gimple_build_omp_return
5645 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5646 OMP_CLAUSE_NOWAIT));
5647 gimple_seq_add_stmt (&new_body, t);
5649 gimple_bind_set_body (new_stmt, new_body);
5650 gimple_omp_set_body (stmt, NULL);
5652 gsi_replace (gsi_p, new_stmt, true);
5656 /* A subroutine of lower_omp_single. Expand the simple form of
5657 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5659 if (GOMP_single_start ())
5660 BODY;
5661 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5663 FIXME. It may be better to delay expanding the logic of this until
5664 pass_expand_omp. The expanded logic may make the job more difficult
5665 to a synchronization analysis pass. */
5667 static void
5668 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5670 location_t loc = gimple_location (single_stmt);
5671 tree tlabel = create_artificial_label (loc);
5672 tree flabel = create_artificial_label (loc);
5673 gimple call, cond;
5674 tree lhs, decl;
5676 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5677 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5678 call = gimple_build_call (decl, 0);
5679 gimple_call_set_lhs (call, lhs);
5680 gimple_seq_add_stmt (pre_p, call);
5682 cond = gimple_build_cond (EQ_EXPR, lhs,
5683 fold_convert_loc (loc, TREE_TYPE (lhs),
5684 boolean_true_node),
5685 tlabel, flabel);
5686 gimple_seq_add_stmt (pre_p, cond);
5687 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5688 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5689 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5693 /* A subroutine of lower_omp_single. Expand the simple form of
5694 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5696 #pragma omp single copyprivate (a, b, c)
5698 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5701 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5703 BODY;
5704 copyout.a = a;
5705 copyout.b = b;
5706 copyout.c = c;
5707 GOMP_single_copy_end (&copyout);
5709 else
5711 a = copyout_p->a;
5712 b = copyout_p->b;
5713 c = copyout_p->c;
5715 GOMP_barrier ();
5718 FIXME. It may be better to delay expanding the logic of this until
5719 pass_expand_omp. The expanded logic may make the job more difficult
5720 to a synchronization analysis pass. */
5722 static void
5723 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5725 tree ptr_type, t, l0, l1, l2;
5726 gimple_seq copyin_seq;
5727 location_t loc = gimple_location (single_stmt);
5729 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5731 ptr_type = build_pointer_type (ctx->record_type);
5732 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5734 l0 = create_artificial_label (loc);
5735 l1 = create_artificial_label (loc);
5736 l2 = create_artificial_label (loc);
5738 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5739 t = fold_convert_loc (loc, ptr_type, t);
5740 gimplify_assign (ctx->receiver_decl, t, pre_p);
5742 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5743 build_int_cst (ptr_type, 0));
5744 t = build3 (COND_EXPR, void_type_node, t,
5745 build_and_jump (&l0), build_and_jump (&l1));
5746 gimplify_and_add (t, pre_p);
5748 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5750 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5752 copyin_seq = NULL;
5753 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5754 &copyin_seq, ctx);
5756 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5757 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5758 1, t);
5759 gimplify_and_add (t, pre_p);
5761 t = build_and_jump (&l2);
5762 gimplify_and_add (t, pre_p);
5764 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5766 gimple_seq_add_seq (pre_p, copyin_seq);
5768 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5772 /* Expand code for an OpenMP single directive. */
5774 static void
5775 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5777 tree block;
5778 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5779 gimple_seq bind_body, dlist;
5780 struct gimplify_ctx gctx;
5782 push_gimplify_context (&gctx);
5784 bind_body = NULL;
5785 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5786 &bind_body, &dlist, ctx);
5787 lower_omp (gimple_omp_body (single_stmt), ctx);
5789 gimple_seq_add_stmt (&bind_body, single_stmt);
5791 if (ctx->record_type)
5792 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5793 else
5794 lower_omp_single_simple (single_stmt, &bind_body);
5796 gimple_omp_set_body (single_stmt, NULL);
5798 gimple_seq_add_seq (&bind_body, dlist);
5800 bind_body = maybe_catch_exception (bind_body);
5802 t = gimple_build_omp_return
5803 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5804 OMP_CLAUSE_NOWAIT));
5805 gimple_seq_add_stmt (&bind_body, t);
5807 block = make_node (BLOCK);
5808 bind = gimple_build_bind (NULL, bind_body, block);
5810 pop_gimplify_context (bind);
5812 gimple_bind_append_vars (bind, ctx->block_vars);
5813 BLOCK_VARS (block) = ctx->block_vars;
5814 gsi_replace (gsi_p, bind, true);
5815 if (BLOCK_VARS (block))
5816 TREE_USED (block) = 1;
5820 /* Expand code for an OpenMP master directive. */
5822 static void
5823 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5825 tree block, lab = NULL, x;
5826 gimple stmt = gsi_stmt (*gsi_p), bind;
5827 location_t loc = gimple_location (stmt);
5828 gimple_seq tseq;
5829 struct gimplify_ctx gctx;
5831 push_gimplify_context (&gctx);
5833 block = make_node (BLOCK);
5834 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5835 block);
5837 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5838 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5839 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5840 tseq = NULL;
5841 gimplify_and_add (x, &tseq);
5842 gimple_bind_add_seq (bind, tseq);
5844 lower_omp (gimple_omp_body (stmt), ctx);
5845 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5846 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5847 gimple_omp_set_body (stmt, NULL);
5849 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5851 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5853 pop_gimplify_context (bind);
5855 gimple_bind_append_vars (bind, ctx->block_vars);
5856 BLOCK_VARS (block) = ctx->block_vars;
5857 gsi_replace (gsi_p, bind, true);
5861 /* Expand code for an OpenMP ordered directive. */
5863 static void
5864 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5866 tree block;
5867 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5868 struct gimplify_ctx gctx;
5870 push_gimplify_context (&gctx);
5872 block = make_node (BLOCK);
5873 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5874 block);
5876 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5877 gimple_bind_add_stmt (bind, x);
5879 lower_omp (gimple_omp_body (stmt), ctx);
5880 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5881 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5882 gimple_omp_set_body (stmt, NULL);
5884 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5885 gimple_bind_add_stmt (bind, x);
5887 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5889 pop_gimplify_context (bind);
5891 gimple_bind_append_vars (bind, ctx->block_vars);
5892 BLOCK_VARS (block) = gimple_bind_vars (bind);
5893 gsi_replace (gsi_p, bind, true);
5897 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5898 substitution of a couple of function calls. But in the NAMED case,
5899 requires that languages coordinate a symbol name. It is therefore
5900 best put here in common code. */
5902 static GTY((param1_is (tree), param2_is (tree)))
5903 splay_tree critical_name_mutexes;
5905 static void
5906 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5908 tree block;
5909 tree name, lock, unlock;
5910 gimple stmt = gsi_stmt (*gsi_p), bind;
5911 location_t loc = gimple_location (stmt);
5912 gimple_seq tbody;
5913 struct gimplify_ctx gctx;
5915 name = gimple_omp_critical_name (stmt);
5916 if (name)
5918 tree decl;
5919 splay_tree_node n;
5921 if (!critical_name_mutexes)
5922 critical_name_mutexes
5923 = splay_tree_new_ggc (splay_tree_compare_pointers,
5924 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5925 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5927 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5928 if (n == NULL)
5930 char *new_str;
5932 decl = create_tmp_var_raw (ptr_type_node, NULL);
5934 new_str = ACONCAT ((".gomp_critical_user_",
5935 IDENTIFIER_POINTER (name), NULL));
5936 DECL_NAME (decl) = get_identifier (new_str);
5937 TREE_PUBLIC (decl) = 1;
5938 TREE_STATIC (decl) = 1;
5939 DECL_COMMON (decl) = 1;
5940 DECL_ARTIFICIAL (decl) = 1;
5941 DECL_IGNORED_P (decl) = 1;
5942 varpool_finalize_decl (decl);
5944 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5945 (splay_tree_value) decl);
5947 else
5948 decl = (tree) n->value;
5950 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5951 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5953 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5954 unlock = build_call_expr_loc (loc, unlock, 1,
5955 build_fold_addr_expr_loc (loc, decl));
5957 else
5959 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5960 lock = build_call_expr_loc (loc, lock, 0);
5962 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5963 unlock = build_call_expr_loc (loc, unlock, 0);
5966 push_gimplify_context (&gctx);
5968 block = make_node (BLOCK);
5969 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5971 tbody = gimple_bind_body (bind);
5972 gimplify_and_add (lock, &tbody);
5973 gimple_bind_set_body (bind, tbody);
5975 lower_omp (gimple_omp_body (stmt), ctx);
5976 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5977 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5978 gimple_omp_set_body (stmt, NULL);
5980 tbody = gimple_bind_body (bind);
5981 gimplify_and_add (unlock, &tbody);
5982 gimple_bind_set_body (bind, tbody);
5984 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5986 pop_gimplify_context (bind);
5987 gimple_bind_append_vars (bind, ctx->block_vars);
5988 BLOCK_VARS (block) = gimple_bind_vars (bind);
5989 gsi_replace (gsi_p, bind, true);
5993 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5994 for a lastprivate clause. Given a loop control predicate of (V
5995 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5996 is appended to *DLIST, iterator initialization is appended to
5997 *BODY_P. */
5999 static void
6000 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6001 gimple_seq *dlist, struct omp_context *ctx)
6003 tree clauses, cond, vinit;
6004 enum tree_code cond_code;
6005 gimple_seq stmts;
6007 cond_code = fd->loop.cond_code;
6008 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6010 /* When possible, use a strict equality expression. This can let VRP
6011 type optimizations deduce the value and remove a copy. */
6012 if (host_integerp (fd->loop.step, 0))
6014 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6015 if (step == 1 || step == -1)
6016 cond_code = EQ_EXPR;
6019 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6021 clauses = gimple_omp_for_clauses (fd->for_stmt);
6022 stmts = NULL;
6023 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6024 if (!gimple_seq_empty_p (stmts))
6026 gimple_seq_add_seq (&stmts, *dlist);
6027 *dlist = stmts;
6029 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6030 vinit = fd->loop.n1;
6031 if (cond_code == EQ_EXPR
6032 && host_integerp (fd->loop.n2, 0)
6033 && ! integer_zerop (fd->loop.n2))
6034 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6036 /* Initialize the iterator variable, so that threads that don't execute
6037 any iterations don't execute the lastprivate clauses by accident. */
6038 gimplify_assign (fd->loop.v, vinit, body_p);
6043 /* Lower code for an OpenMP loop directive. */
6045 static void
6046 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6048 tree *rhs_p, block;
6049 struct omp_for_data fd;
6050 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6051 gimple_seq omp_for_body, body, dlist;
6052 size_t i;
6053 struct gimplify_ctx gctx;
6055 push_gimplify_context (&gctx);
6057 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6058 lower_omp (gimple_omp_body (stmt), ctx);
6060 block = make_node (BLOCK);
6061 new_stmt = gimple_build_bind (NULL, NULL, block);
6063 /* Move declaration of temporaries in the loop body before we make
6064 it go away. */
6065 omp_for_body = gimple_omp_body (stmt);
6066 if (!gimple_seq_empty_p (omp_for_body)
6067 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6069 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6070 gimple_bind_append_vars (new_stmt, vars);
6073 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6074 dlist = NULL;
6075 body = NULL;
6076 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6077 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6079 /* Lower the header expressions. At this point, we can assume that
6080 the header is of the form:
6082 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6084 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6085 using the .omp_data_s mapping, if needed. */
6086 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6088 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6089 if (!is_gimple_min_invariant (*rhs_p))
6090 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6092 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6093 if (!is_gimple_min_invariant (*rhs_p))
6094 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6096 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6097 if (!is_gimple_min_invariant (*rhs_p))
6098 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6101 /* Once lowered, extract the bounds and clauses. */
6102 extract_omp_for_data (stmt, &fd, NULL);
6104 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6106 gimple_seq_add_stmt (&body, stmt);
6107 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6109 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6110 fd.loop.v));
6112 /* After the loop, add exit clauses. */
6113 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6114 gimple_seq_add_seq (&body, dlist);
6116 body = maybe_catch_exception (body);
6118 /* Region exit marker goes at the end of the loop body. */
6119 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6121 pop_gimplify_context (new_stmt);
6123 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6124 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6125 if (BLOCK_VARS (block))
6126 TREE_USED (block) = 1;
6128 gimple_bind_set_body (new_stmt, body);
6129 gimple_omp_set_body (stmt, NULL);
6130 gimple_omp_for_set_pre_body (stmt, NULL);
6131 gsi_replace (gsi_p, new_stmt, true);
6134 /* Callback for walk_stmts. Check if the current statement only contains
6135 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6137 static tree
6138 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6139 bool *handled_ops_p,
6140 struct walk_stmt_info *wi)
6142 int *info = (int *) wi->info;
6143 gimple stmt = gsi_stmt (*gsi_p);
6145 *handled_ops_p = true;
6146 switch (gimple_code (stmt))
6148 WALK_SUBSTMTS;
6150 case GIMPLE_OMP_FOR:
6151 case GIMPLE_OMP_SECTIONS:
6152 *info = *info == 0 ? 1 : -1;
6153 break;
6154 default:
6155 *info = -1;
6156 break;
6158 return NULL;
6161 struct omp_taskcopy_context
6163 /* This field must be at the beginning, as we do "inheritance": Some
6164 callback functions for tree-inline.c (e.g., omp_copy_decl)
6165 receive a copy_body_data pointer that is up-casted to an
6166 omp_context pointer. */
6167 copy_body_data cb;
6168 omp_context *ctx;
6171 static tree
6172 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6174 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6176 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6177 return create_tmp_var (TREE_TYPE (var), NULL);
6179 return var;
6182 static tree
6183 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6185 tree name, new_fields = NULL, type, f;
6187 type = lang_hooks.types.make_type (RECORD_TYPE);
6188 name = DECL_NAME (TYPE_NAME (orig_type));
6189 name = build_decl (gimple_location (tcctx->ctx->stmt),
6190 TYPE_DECL, name, type);
6191 TYPE_NAME (type) = name;
6193 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6195 tree new_f = copy_node (f);
6196 DECL_CONTEXT (new_f) = type;
6197 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6198 TREE_CHAIN (new_f) = new_fields;
6199 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6200 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6201 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6202 &tcctx->cb, NULL);
6203 new_fields = new_f;
6204 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6206 TYPE_FIELDS (type) = nreverse (new_fields);
6207 layout_type (type);
6208 return type;
6211 /* Create task copyfn. */
6213 static void
6214 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6216 struct function *child_cfun;
6217 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6218 tree record_type, srecord_type, bind, list;
6219 bool record_needs_remap = false, srecord_needs_remap = false;
6220 splay_tree_node n;
6221 struct omp_taskcopy_context tcctx;
6222 struct gimplify_ctx gctx;
6223 location_t loc = gimple_location (task_stmt);
6225 child_fn = gimple_omp_task_copy_fn (task_stmt);
6226 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6227 gcc_assert (child_cfun->cfg == NULL);
6228 child_cfun->dont_save_pending_sizes_p = 1;
6229 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6231 /* Reset DECL_CONTEXT on function arguments. */
6232 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6233 DECL_CONTEXT (t) = child_fn;
6235 /* Populate the function. */
6236 push_gimplify_context (&gctx);
6237 current_function_decl = child_fn;
6239 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6240 TREE_SIDE_EFFECTS (bind) = 1;
6241 list = NULL;
6242 DECL_SAVED_TREE (child_fn) = bind;
6243 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6245 /* Remap src and dst argument types if needed. */
6246 record_type = ctx->record_type;
6247 srecord_type = ctx->srecord_type;
6248 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6249 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6251 record_needs_remap = true;
6252 break;
6254 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6255 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6257 srecord_needs_remap = true;
6258 break;
6261 if (record_needs_remap || srecord_needs_remap)
6263 memset (&tcctx, '\0', sizeof (tcctx));
6264 tcctx.cb.src_fn = ctx->cb.src_fn;
6265 tcctx.cb.dst_fn = child_fn;
6266 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6267 tcctx.cb.dst_node = tcctx.cb.src_node;
6268 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6269 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6270 tcctx.cb.eh_lp_nr = 0;
6271 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6272 tcctx.cb.decl_map = pointer_map_create ();
6273 tcctx.ctx = ctx;
6275 if (record_needs_remap)
6276 record_type = task_copyfn_remap_type (&tcctx, record_type);
6277 if (srecord_needs_remap)
6278 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6280 else
6281 tcctx.cb.decl_map = NULL;
6283 push_cfun (child_cfun);
6285 arg = DECL_ARGUMENTS (child_fn);
6286 TREE_TYPE (arg) = build_pointer_type (record_type);
6287 sarg = DECL_CHAIN (arg);
6288 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6290 /* First pass: initialize temporaries used in record_type and srecord_type
6291 sizes and field offsets. */
6292 if (tcctx.cb.decl_map)
6293 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6294 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6296 tree *p;
6298 decl = OMP_CLAUSE_DECL (c);
6299 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6300 if (p == NULL)
6301 continue;
6302 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6303 sf = (tree) n->value;
6304 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6305 src = build_simple_mem_ref_loc (loc, sarg);
6306 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6307 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6308 append_to_statement_list (t, &list);
6311 /* Second pass: copy shared var pointers and copy construct non-VLA
6312 firstprivate vars. */
6313 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6314 switch (OMP_CLAUSE_CODE (c))
6316 case OMP_CLAUSE_SHARED:
6317 decl = OMP_CLAUSE_DECL (c);
6318 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6319 if (n == NULL)
6320 break;
6321 f = (tree) n->value;
6322 if (tcctx.cb.decl_map)
6323 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6324 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6325 sf = (tree) n->value;
6326 if (tcctx.cb.decl_map)
6327 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6328 src = build_simple_mem_ref_loc (loc, sarg);
6329 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6330 dst = build_simple_mem_ref_loc (loc, arg);
6331 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6332 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6333 append_to_statement_list (t, &list);
6334 break;
6335 case OMP_CLAUSE_FIRSTPRIVATE:
6336 decl = OMP_CLAUSE_DECL (c);
6337 if (is_variable_sized (decl))
6338 break;
6339 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6340 if (n == NULL)
6341 break;
6342 f = (tree) n->value;
6343 if (tcctx.cb.decl_map)
6344 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6345 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6346 if (n != NULL)
6348 sf = (tree) n->value;
6349 if (tcctx.cb.decl_map)
6350 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6351 src = build_simple_mem_ref_loc (loc, sarg);
6352 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6353 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6354 src = build_simple_mem_ref_loc (loc, src);
6356 else
6357 src = decl;
6358 dst = build_simple_mem_ref_loc (loc, arg);
6359 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6360 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6361 append_to_statement_list (t, &list);
6362 break;
6363 case OMP_CLAUSE_PRIVATE:
6364 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6365 break;
6366 decl = OMP_CLAUSE_DECL (c);
6367 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6368 f = (tree) n->value;
6369 if (tcctx.cb.decl_map)
6370 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6371 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6372 if (n != NULL)
6374 sf = (tree) n->value;
6375 if (tcctx.cb.decl_map)
6376 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6377 src = build_simple_mem_ref_loc (loc, sarg);
6378 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6379 if (use_pointer_for_field (decl, NULL))
6380 src = build_simple_mem_ref_loc (loc, src);
6382 else
6383 src = decl;
6384 dst = build_simple_mem_ref_loc (loc, arg);
6385 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6386 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6387 append_to_statement_list (t, &list);
6388 break;
6389 default:
6390 break;
6393 /* Last pass: handle VLA firstprivates. */
6394 if (tcctx.cb.decl_map)
6395 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6396 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6398 tree ind, ptr, df;
6400 decl = OMP_CLAUSE_DECL (c);
6401 if (!is_variable_sized (decl))
6402 continue;
6403 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6404 if (n == NULL)
6405 continue;
6406 f = (tree) n->value;
6407 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6408 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6409 ind = DECL_VALUE_EXPR (decl);
6410 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6411 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6412 n = splay_tree_lookup (ctx->sfield_map,
6413 (splay_tree_key) TREE_OPERAND (ind, 0));
6414 sf = (tree) n->value;
6415 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6416 src = build_simple_mem_ref_loc (loc, sarg);
6417 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6418 src = build_simple_mem_ref_loc (loc, src);
6419 dst = build_simple_mem_ref_loc (loc, arg);
6420 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6421 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6422 append_to_statement_list (t, &list);
6423 n = splay_tree_lookup (ctx->field_map,
6424 (splay_tree_key) TREE_OPERAND (ind, 0));
6425 df = (tree) n->value;
6426 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6427 ptr = build_simple_mem_ref_loc (loc, arg);
6428 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6429 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6430 build_fold_addr_expr_loc (loc, dst));
6431 append_to_statement_list (t, &list);
6434 t = build1 (RETURN_EXPR, void_type_node, NULL);
6435 append_to_statement_list (t, &list);
6437 if (tcctx.cb.decl_map)
6438 pointer_map_destroy (tcctx.cb.decl_map);
6439 pop_gimplify_context (NULL);
6440 BIND_EXPR_BODY (bind) = list;
6441 pop_cfun ();
6442 current_function_decl = ctx->cb.src_fn;
6445 /* Lower the OpenMP parallel or task directive in the current statement
6446 in GSI_P. CTX holds context information for the directive. */
6448 static void
6449 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6451 tree clauses;
6452 tree child_fn, t;
6453 gimple stmt = gsi_stmt (*gsi_p);
6454 gimple par_bind, bind;
6455 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6456 struct gimplify_ctx gctx;
6457 location_t loc = gimple_location (stmt);
6459 clauses = gimple_omp_taskreg_clauses (stmt);
6460 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6461 par_body = gimple_bind_body (par_bind);
6462 child_fn = ctx->cb.dst_fn;
6463 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6464 && !gimple_omp_parallel_combined_p (stmt))
6466 struct walk_stmt_info wi;
6467 int ws_num = 0;
6469 memset (&wi, 0, sizeof (wi));
6470 wi.info = &ws_num;
6471 wi.val_only = true;
6472 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6473 if (ws_num == 1)
6474 gimple_omp_parallel_set_combined_p (stmt, true);
6476 if (ctx->srecord_type)
6477 create_task_copyfn (stmt, ctx);
6479 push_gimplify_context (&gctx);
6481 par_olist = NULL;
6482 par_ilist = NULL;
6483 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6484 lower_omp (par_body, ctx);
6485 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6486 lower_reduction_clauses (clauses, &par_olist, ctx);
6488 /* Declare all the variables created by mapping and the variables
6489 declared in the scope of the parallel body. */
6490 record_vars_into (ctx->block_vars, child_fn);
6491 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6493 if (ctx->record_type)
6495 ctx->sender_decl
6496 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6497 : ctx->record_type, ".omp_data_o");
6498 DECL_NAMELESS (ctx->sender_decl) = 1;
6499 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6500 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6503 olist = NULL;
6504 ilist = NULL;
6505 lower_send_clauses (clauses, &ilist, &olist, ctx);
6506 lower_send_shared_vars (&ilist, &olist, ctx);
6508 /* Once all the expansions are done, sequence all the different
6509 fragments inside gimple_omp_body. */
6511 new_body = NULL;
6513 if (ctx->record_type)
6515 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6516 /* fixup_child_record_type might have changed receiver_decl's type. */
6517 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6518 gimple_seq_add_stmt (&new_body,
6519 gimple_build_assign (ctx->receiver_decl, t));
6522 gimple_seq_add_seq (&new_body, par_ilist);
6523 gimple_seq_add_seq (&new_body, par_body);
6524 gimple_seq_add_seq (&new_body, par_olist);
6525 new_body = maybe_catch_exception (new_body);
6526 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6527 gimple_omp_set_body (stmt, new_body);
6529 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6530 gimple_bind_add_stmt (bind, stmt);
6531 if (ilist || olist)
6533 gimple_seq_add_stmt (&ilist, bind);
6534 gimple_seq_add_seq (&ilist, olist);
6535 bind = gimple_build_bind (NULL, ilist, NULL);
6538 gsi_replace (gsi_p, bind, true);
6540 pop_gimplify_context (NULL);
6543 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6544 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6545 of OpenMP context, but with task_shared_vars set. */
6547 static tree
6548 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6549 void *data)
6551 tree t = *tp;
6553 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6554 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6555 return t;
6557 if (task_shared_vars
6558 && DECL_P (t)
6559 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6560 return t;
6562 /* If a global variable has been privatized, TREE_CONSTANT on
6563 ADDR_EXPR might be wrong. */
6564 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6565 recompute_tree_invariant_for_addr_expr (t);
6567 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6568 return NULL_TREE;
6571 static void
6572 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6574 gimple stmt = gsi_stmt (*gsi_p);
6575 struct walk_stmt_info wi;
6577 if (gimple_has_location (stmt))
6578 input_location = gimple_location (stmt);
6580 if (task_shared_vars)
6581 memset (&wi, '\0', sizeof (wi));
6583 /* If we have issued syntax errors, avoid doing any heavy lifting.
6584 Just replace the OpenMP directives with a NOP to avoid
6585 confusing RTL expansion. */
6586 if (seen_error () && is_gimple_omp (stmt))
6588 gsi_replace (gsi_p, gimple_build_nop (), true);
6589 return;
6592 switch (gimple_code (stmt))
6594 case GIMPLE_COND:
6595 if ((ctx || task_shared_vars)
6596 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6597 ctx ? NULL : &wi, NULL)
6598 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6599 ctx ? NULL : &wi, NULL)))
6600 gimple_regimplify_operands (stmt, gsi_p);
6601 break;
6602 case GIMPLE_CATCH:
6603 lower_omp (gimple_catch_handler (stmt), ctx);
6604 break;
6605 case GIMPLE_EH_FILTER:
6606 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6607 break;
6608 case GIMPLE_TRY:
6609 lower_omp (gimple_try_eval (stmt), ctx);
6610 lower_omp (gimple_try_cleanup (stmt), ctx);
6611 break;
6612 case GIMPLE_BIND:
6613 lower_omp (gimple_bind_body (stmt), ctx);
6614 break;
6615 case GIMPLE_OMP_PARALLEL:
6616 case GIMPLE_OMP_TASK:
6617 ctx = maybe_lookup_ctx (stmt);
6618 lower_omp_taskreg (gsi_p, ctx);
6619 break;
6620 case GIMPLE_OMP_FOR:
6621 ctx = maybe_lookup_ctx (stmt);
6622 gcc_assert (ctx);
6623 lower_omp_for (gsi_p, ctx);
6624 break;
6625 case GIMPLE_OMP_SECTIONS:
6626 ctx = maybe_lookup_ctx (stmt);
6627 gcc_assert (ctx);
6628 lower_omp_sections (gsi_p, ctx);
6629 break;
6630 case GIMPLE_OMP_SINGLE:
6631 ctx = maybe_lookup_ctx (stmt);
6632 gcc_assert (ctx);
6633 lower_omp_single (gsi_p, ctx);
6634 break;
6635 case GIMPLE_OMP_MASTER:
6636 ctx = maybe_lookup_ctx (stmt);
6637 gcc_assert (ctx);
6638 lower_omp_master (gsi_p, ctx);
6639 break;
6640 case GIMPLE_OMP_ORDERED:
6641 ctx = maybe_lookup_ctx (stmt);
6642 gcc_assert (ctx);
6643 lower_omp_ordered (gsi_p, ctx);
6644 break;
6645 case GIMPLE_OMP_CRITICAL:
6646 ctx = maybe_lookup_ctx (stmt);
6647 gcc_assert (ctx);
6648 lower_omp_critical (gsi_p, ctx);
6649 break;
6650 case GIMPLE_OMP_ATOMIC_LOAD:
6651 if ((ctx || task_shared_vars)
6652 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6653 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6654 gimple_regimplify_operands (stmt, gsi_p);
6655 break;
6656 default:
6657 if ((ctx || task_shared_vars)
6658 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6659 ctx ? NULL : &wi))
6660 gimple_regimplify_operands (stmt, gsi_p);
6661 break;
6665 static void
6666 lower_omp (gimple_seq body, omp_context *ctx)
6668 location_t saved_location = input_location;
6669 gimple_stmt_iterator gsi = gsi_start (body);
6670 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6671 lower_omp_1 (&gsi, ctx);
6672 input_location = saved_location;
6675 /* Main entry point. */
6677 static unsigned int
6678 execute_lower_omp (void)
6680 gimple_seq body;
6682 /* This pass always runs, to provide PROP_gimple_lomp.
6683 But there is nothing to do unless -fopenmp is given. */
6684 if (flag_openmp == 0)
6685 return 0;
6687 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6688 delete_omp_context);
6690 body = gimple_body (current_function_decl);
6691 scan_omp (body, NULL);
6692 gcc_assert (taskreg_nesting_level == 0);
6694 if (all_contexts->root)
6696 struct gimplify_ctx gctx;
6698 if (task_shared_vars)
6699 push_gimplify_context (&gctx);
6700 lower_omp (body, NULL);
6701 if (task_shared_vars)
6702 pop_gimplify_context (NULL);
6705 if (all_contexts)
6707 splay_tree_delete (all_contexts);
6708 all_contexts = NULL;
6710 BITMAP_FREE (task_shared_vars);
6711 return 0;
6714 struct gimple_opt_pass pass_lower_omp =
6717 GIMPLE_PASS,
6718 "omplower", /* name */
6719 NULL, /* gate */
6720 execute_lower_omp, /* execute */
6721 NULL, /* sub */
6722 NULL, /* next */
6723 0, /* static_pass_number */
6724 TV_NONE, /* tv_id */
6725 PROP_gimple_any, /* properties_required */
6726 PROP_gimple_lomp, /* properties_provided */
6727 0, /* properties_destroyed */
6728 0, /* todo_flags_start */
6729 TODO_dump_func /* todo_flags_finish */
6733 /* The following is a utility to diagnose OpenMP structured block violations.
6734 It is not part of the "omplower" pass, as that's invoked too late. It
6735 should be invoked by the respective front ends after gimplification. */
6737 static splay_tree all_labels;
6739 /* Check for mismatched contexts and generate an error if needed. Return
6740 true if an error is detected. */
6742 static bool
6743 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6744 gimple branch_ctx, gimple label_ctx)
6746 if (label_ctx == branch_ctx)
6747 return false;
6751 Previously we kept track of the label's entire context in diagnose_sb_[12]
6752 so we could traverse it and issue a correct "exit" or "enter" error
6753 message upon a structured block violation.
6755 We built the context by building a list with tree_cons'ing, but there is
6756 no easy counterpart in gimple tuples. It seems like far too much work
6757 for issuing exit/enter error messages. If someone really misses the
6758 distinct error message... patches welcome.
6761 #if 0
6762 /* Try to avoid confusing the user by producing and error message
6763 with correct "exit" or "enter" verbiage. We prefer "exit"
6764 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6765 if (branch_ctx == NULL)
6766 exit_p = false;
6767 else
6769 while (label_ctx)
6771 if (TREE_VALUE (label_ctx) == branch_ctx)
6773 exit_p = false;
6774 break;
6776 label_ctx = TREE_CHAIN (label_ctx);
6780 if (exit_p)
6781 error ("invalid exit from OpenMP structured block");
6782 else
6783 error ("invalid entry to OpenMP structured block");
6784 #endif
6786 /* If it's obvious we have an invalid entry, be specific about the error. */
6787 if (branch_ctx == NULL)
6788 error ("invalid entry to OpenMP structured block");
6789 else
6790 /* Otherwise, be vague and lazy, but efficient. */
6791 error ("invalid branch to/from an OpenMP structured block");
6793 gsi_replace (gsi_p, gimple_build_nop (), false);
6794 return true;
6797 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6798 where each label is found. */
6800 static tree
6801 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6802 struct walk_stmt_info *wi)
6804 gimple context = (gimple) wi->info;
6805 gimple inner_context;
6806 gimple stmt = gsi_stmt (*gsi_p);
6808 *handled_ops_p = true;
6810 switch (gimple_code (stmt))
6812 WALK_SUBSTMTS;
6814 case GIMPLE_OMP_PARALLEL:
6815 case GIMPLE_OMP_TASK:
6816 case GIMPLE_OMP_SECTIONS:
6817 case GIMPLE_OMP_SINGLE:
6818 case GIMPLE_OMP_SECTION:
6819 case GIMPLE_OMP_MASTER:
6820 case GIMPLE_OMP_ORDERED:
6821 case GIMPLE_OMP_CRITICAL:
6822 /* The minimal context here is just the current OMP construct. */
6823 inner_context = stmt;
6824 wi->info = inner_context;
6825 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6826 wi->info = context;
6827 break;
6829 case GIMPLE_OMP_FOR:
6830 inner_context = stmt;
6831 wi->info = inner_context;
6832 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6833 walk them. */
6834 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6835 diagnose_sb_1, NULL, wi);
6836 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6837 wi->info = context;
6838 break;
6840 case GIMPLE_LABEL:
6841 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6842 (splay_tree_value) context);
6843 break;
6845 default:
6846 break;
6849 return NULL_TREE;
6852 /* Pass 2: Check each branch and see if its context differs from that of
6853 the destination label's context. */
6855 static tree
6856 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6857 struct walk_stmt_info *wi)
6859 gimple context = (gimple) wi->info;
6860 splay_tree_node n;
6861 gimple stmt = gsi_stmt (*gsi_p);
6863 *handled_ops_p = true;
6865 switch (gimple_code (stmt))
6867 WALK_SUBSTMTS;
6869 case GIMPLE_OMP_PARALLEL:
6870 case GIMPLE_OMP_TASK:
6871 case GIMPLE_OMP_SECTIONS:
6872 case GIMPLE_OMP_SINGLE:
6873 case GIMPLE_OMP_SECTION:
6874 case GIMPLE_OMP_MASTER:
6875 case GIMPLE_OMP_ORDERED:
6876 case GIMPLE_OMP_CRITICAL:
6877 wi->info = stmt;
6878 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6879 wi->info = context;
6880 break;
6882 case GIMPLE_OMP_FOR:
6883 wi->info = stmt;
6884 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6885 walk them. */
6886 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6887 diagnose_sb_2, NULL, wi);
6888 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6889 wi->info = context;
6890 break;
6892 case GIMPLE_COND:
6894 tree lab = gimple_cond_true_label (stmt);
6895 if (lab)
6897 n = splay_tree_lookup (all_labels,
6898 (splay_tree_key) lab);
6899 diagnose_sb_0 (gsi_p, context,
6900 n ? (gimple) n->value : NULL);
6902 lab = gimple_cond_false_label (stmt);
6903 if (lab)
6905 n = splay_tree_lookup (all_labels,
6906 (splay_tree_key) lab);
6907 diagnose_sb_0 (gsi_p, context,
6908 n ? (gimple) n->value : NULL);
6911 break;
6913 case GIMPLE_GOTO:
6915 tree lab = gimple_goto_dest (stmt);
6916 if (TREE_CODE (lab) != LABEL_DECL)
6917 break;
6919 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6920 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6922 break;
6924 case GIMPLE_SWITCH:
6926 unsigned int i;
6927 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6929 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6930 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6931 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6932 break;
6935 break;
6937 case GIMPLE_RETURN:
6938 diagnose_sb_0 (gsi_p, context, NULL);
6939 break;
6941 default:
6942 break;
6945 return NULL_TREE;
6948 static unsigned int
6949 diagnose_omp_structured_block_errors (void)
6951 struct walk_stmt_info wi;
6952 gimple_seq body = gimple_body (current_function_decl);
6954 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6956 memset (&wi, 0, sizeof (wi));
6957 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6959 memset (&wi, 0, sizeof (wi));
6960 wi.want_locations = true;
6961 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6963 splay_tree_delete (all_labels);
6964 all_labels = NULL;
6966 return 0;
6969 static bool
6970 gate_diagnose_omp_blocks (void)
6972 return flag_openmp != 0;
6975 struct gimple_opt_pass pass_diagnose_omp_blocks =
6978 GIMPLE_PASS,
6979 "*diagnose_omp_blocks", /* name */
6980 gate_diagnose_omp_blocks, /* gate */
6981 diagnose_omp_structured_block_errors, /* execute */
6982 NULL, /* sub */
6983 NULL, /* next */
6984 0, /* static_pass_number */
6985 TV_NONE, /* tv_id */
6986 PROP_gimple_any, /* properties_required */
6987 0, /* properties_provided */
6988 0, /* properties_destroyed */
6989 0, /* todo_flags_start */
6990 0, /* todo_flags_finish */
6994 #include "gt-omp-low.h"