cgraph.c (cgraph_turn_edge_to_speculative): Fix debug output.
[official-gcc.git] / gcc / omp-low.c
blobe5eaddb51fc084103f5e1e5fcda15be070532b1d
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 gimple stmt;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map;
86 tree srecord_type;
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
90 tree block_vars;
92 /* What to do with variables with implicitly determined sharing
93 attributes. */
94 enum omp_clause_default_kind default_kind;
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
99 int depth;
101 /* True if this parallel directive is nested within another. */
102 bool is_nested;
103 } omp_context;
106 struct omp_for_data_loop
108 tree v, n1, n2, step;
109 enum tree_code cond_code;
112 /* A structure describing the main elements of a parallel loop. */
114 struct omp_for_data
116 struct omp_for_data_loop loop;
117 tree chunk_size;
118 gimple for_stmt;
119 tree pre, iter_type;
120 int collapse;
121 bool have_nowait, have_ordered;
122 enum omp_clause_schedule_kind sched_kind;
123 struct omp_for_data_loop *loops;
127 static splay_tree all_contexts;
128 static int taskreg_nesting_level;
129 struct omp_region *root_omp_region;
130 static bitmap task_shared_vars;
132 static void scan_omp (gimple_seq *, omp_context *);
133 static tree scan_omp_1_op (tree *, int *, void *);
135 #define WALK_SUBSTMTS \
136 case GIMPLE_BIND: \
137 case GIMPLE_TRY: \
138 case GIMPLE_CATCH: \
139 case GIMPLE_EH_FILTER: \
140 case GIMPLE_TRANSACTION: \
141 /* The sub-statements for these should be walked. */ \
142 *handled_ops_p = false; \
143 break;
145 /* Convenience function for calling scan_omp_1_op on tree operands. */
147 static inline tree
148 scan_omp_op (tree *tp, omp_context *ctx)
150 struct walk_stmt_info wi;
152 memset (&wi, 0, sizeof (wi));
153 wi.info = ctx;
154 wi.want_locations = true;
156 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 static void lower_omp (gimple_seq *, omp_context *);
160 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
161 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163 /* Find an OpenMP clause of type KIND within CLAUSES. */
165 tree
166 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
169 if (OMP_CLAUSE_CODE (clauses) == kind)
170 return clauses;
172 return NULL_TREE;
175 /* Return true if CTX is for an omp parallel. */
177 static inline bool
178 is_parallel_ctx (omp_context *ctx)
180 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
184 /* Return true if CTX is for an omp task. */
186 static inline bool
187 is_task_ctx (omp_context *ctx)
189 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
193 /* Return true if CTX is for an omp parallel or omp task. */
195 static inline bool
196 is_taskreg_ctx (omp_context *ctx)
198 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
199 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
203 /* Return true if REGION is a combined parallel+workshare region. */
205 static inline bool
206 is_combined_parallel (struct omp_region *region)
208 return region->is_combined_parallel;
212 /* Extract the header elements of parallel loop FOR_STMT and store
213 them into *FD. */
215 static void
216 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
217 struct omp_for_data_loop *loops)
219 tree t, var, *collapse_iter, *collapse_count;
220 tree count = NULL_TREE, iter_type = long_integer_type_node;
221 struct omp_for_data_loop *loop;
222 int i;
223 struct omp_for_data_loop dummy_loop;
224 location_t loc = gimple_location (for_stmt);
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 default:
260 break;
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
313 else
314 loop->n2 = fold_build2_loc (loc,
315 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
322 else
323 loop->n2 = fold_build2_loc (loc,
324 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
336 case PLUS_EXPR:
337 loop->step = TREE_OPERAND (t, 1);
338 break;
339 case POINTER_PLUS_EXPR:
340 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
352 if (iter_type != long_long_unsigned_type_node)
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
360 tree n;
362 if (loop->cond_code == LT_EXPR)
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
375 tree n1, n2;
377 if (loop->cond_code == LT_EXPR)
379 n1 = loop->n1;
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
384 else
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
388 loop->n2, loop->step);
389 n2 = loop->n1;
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
399 if (collapse_count && *collapse_count == NULL)
401 t = fold_binary (loop->cond_code, boolean_type_node,
402 fold_convert (TREE_TYPE (loop->v), loop->n1),
403 fold_convert (TREE_TYPE (loop->v), loop->n2));
404 if (t && integer_zerop (t))
405 count = build_zero_cst (long_long_unsigned_type_node);
406 else if ((i == 0 || count != NULL_TREE)
407 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
408 && TREE_CONSTANT (loop->n1)
409 && TREE_CONSTANT (loop->n2)
410 && TREE_CODE (loop->step) == INTEGER_CST)
412 tree itype = TREE_TYPE (loop->v);
414 if (POINTER_TYPE_P (itype))
415 itype = signed_type_for (itype);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
430 else
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434 if (count != NULL_TREE)
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
443 else if (count && !integer_zerop (count))
444 count = NULL_TREE;
448 if (count)
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
462 if (count)
463 *collapse_count = fold_convert_loc (loc, iter_type, count);
464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
468 if (fd->collapse > 1)
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
493 Is lowered into:
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
521 static bool
522 workshare_safe_to_combine_p (basic_block ws_entry_bb)
524 struct omp_for_data fd;
525 gimple ws_stmt = last_stmt (ws_entry_bb);
527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
528 return true;
530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
532 extract_omp_for_data (ws_stmt, &fd, NULL);
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
550 return true;
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
558 static vec<tree, va_gc> *
559 get_ws_args_for (gimple ws_stmt)
561 tree t;
562 location_t loc = gimple_location (ws_stmt);
563 vec<tree, va_gc> *ws_args;
565 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
567 struct omp_for_data fd;
569 extract_omp_for_data (ws_stmt, &fd, NULL);
571 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
573 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
574 ws_args->quick_push (t);
576 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
577 ws_args->quick_push (t);
579 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
580 ws_args->quick_push (t);
582 if (fd.chunk_size)
584 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
585 ws_args->quick_push (t);
588 return ws_args;
590 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
592 /* Number of sections is equal to the number of edges from the
593 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
594 the exit of the sections region. */
595 basic_block bb = single_succ (gimple_bb (ws_stmt));
596 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
597 vec_alloc (ws_args, 1);
598 ws_args->quick_push (t);
599 return ws_args;
602 gcc_unreachable ();
606 /* Discover whether REGION is a combined parallel+workshare region. */
608 static void
609 determine_parallel_type (struct omp_region *region)
611 basic_block par_entry_bb, par_exit_bb;
612 basic_block ws_entry_bb, ws_exit_bb;
614 if (region == NULL || region->inner == NULL
615 || region->exit == NULL || region->inner->exit == NULL
616 || region->inner->cont == NULL)
617 return;
619 /* We only support parallel+for and parallel+sections. */
620 if (region->type != GIMPLE_OMP_PARALLEL
621 || (region->inner->type != GIMPLE_OMP_FOR
622 && region->inner->type != GIMPLE_OMP_SECTIONS))
623 return;
625 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
626 WS_EXIT_BB -> PAR_EXIT_BB. */
627 par_entry_bb = region->entry;
628 par_exit_bb = region->exit;
629 ws_entry_bb = region->inner->entry;
630 ws_exit_bb = region->inner->exit;
632 if (single_succ (par_entry_bb) == ws_entry_bb
633 && single_succ (ws_exit_bb) == par_exit_bb
634 && workshare_safe_to_combine_p (ws_entry_bb)
635 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
636 || (last_and_only_stmt (ws_entry_bb)
637 && last_and_only_stmt (par_exit_bb))))
639 gimple ws_stmt = last_stmt (ws_entry_bb);
641 if (region->inner->type == GIMPLE_OMP_FOR)
643 /* If this is a combined parallel loop, we need to determine
644 whether or not to use the combined library calls. There
645 are two cases where we do not apply the transformation:
646 static loops and any kind of ordered loop. In the first
647 case, we already open code the loop so there is no need
648 to do anything else. In the latter case, the combined
649 parallel loop call would still need extra synchronization
650 to implement ordered semantics, so there would not be any
651 gain in using the combined call. */
652 tree clauses = gimple_omp_for_clauses (ws_stmt);
653 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
654 if (c == NULL
655 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
656 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
658 region->is_combined_parallel = false;
659 region->inner->is_combined_parallel = false;
660 return;
664 region->is_combined_parallel = true;
665 region->inner->is_combined_parallel = true;
666 region->ws_args = get_ws_args_for (ws_stmt);
671 /* Return true if EXPR is variable sized. */
673 static inline bool
674 is_variable_sized (const_tree expr)
676 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
679 /* Return true if DECL is a reference type. */
681 static inline bool
682 is_reference (tree decl)
684 return lang_hooks.decls.omp_privatize_by_reference (decl);
687 /* Lookup variables in the decl or field splay trees. The "maybe" form
688 allows for the variable form to not have been entered, otherwise we
689 assert that the variable must have been entered. */
691 static inline tree
692 lookup_decl (tree var, omp_context *ctx)
694 tree *n;
695 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
696 return *n;
699 static inline tree
700 maybe_lookup_decl (const_tree var, omp_context *ctx)
702 tree *n;
703 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
704 return n ? *n : NULL_TREE;
707 static inline tree
708 lookup_field (tree var, omp_context *ctx)
710 splay_tree_node n;
711 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
712 return (tree) n->value;
715 static inline tree
716 lookup_sfield (tree var, omp_context *ctx)
718 splay_tree_node n;
719 n = splay_tree_lookup (ctx->sfield_map
720 ? ctx->sfield_map : ctx->field_map,
721 (splay_tree_key) var);
722 return (tree) n->value;
725 static inline tree
726 maybe_lookup_field (tree var, omp_context *ctx)
728 splay_tree_node n;
729 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
730 return n ? (tree) n->value : NULL_TREE;
733 /* Return true if DECL should be copied by pointer. SHARED_CTX is
734 the parallel context if DECL is to be shared. */
736 static bool
737 use_pointer_for_field (tree decl, omp_context *shared_ctx)
739 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
740 return true;
742 /* We can only use copy-in/copy-out semantics for shared variables
743 when we know the value is not accessible from an outer scope. */
744 if (shared_ctx)
746 /* ??? Trivially accessible from anywhere. But why would we even
747 be passing an address in this case? Should we simply assert
748 this to be false, or should we have a cleanup pass that removes
749 these from the list of mappings? */
750 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
751 return true;
753 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
754 without analyzing the expression whether or not its location
755 is accessible to anyone else. In the case of nested parallel
756 regions it certainly may be. */
757 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
758 return true;
760 /* Do not use copy-in/copy-out for variables that have their
761 address taken. */
762 if (TREE_ADDRESSABLE (decl))
763 return true;
765 /* lower_send_shared_vars only uses copy-in, but not copy-out
766 for these. */
767 if (TREE_READONLY (decl)
768 || ((TREE_CODE (decl) == RESULT_DECL
769 || TREE_CODE (decl) == PARM_DECL)
770 && DECL_BY_REFERENCE (decl)))
771 return false;
773 /* Disallow copy-in/out in nested parallel if
774 decl is shared in outer parallel, otherwise
775 each thread could store the shared variable
776 in its own copy-in location, making the
777 variable no longer really shared. */
778 if (shared_ctx->is_nested)
780 omp_context *up;
782 for (up = shared_ctx->outer; up; up = up->outer)
783 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
784 break;
786 if (up)
788 tree c;
790 for (c = gimple_omp_taskreg_clauses (up->stmt);
791 c; c = OMP_CLAUSE_CHAIN (c))
792 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
793 && OMP_CLAUSE_DECL (c) == decl)
794 break;
796 if (c)
797 goto maybe_mark_addressable_and_ret;
801 /* For tasks avoid using copy-in/out. As tasks can be
802 deferred or executed in different thread, when GOMP_task
803 returns, the task hasn't necessarily terminated. */
804 if (is_task_ctx (shared_ctx))
806 tree outer;
807 maybe_mark_addressable_and_ret:
808 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
809 if (is_gimple_reg (outer))
811 /* Taking address of OUTER in lower_send_shared_vars
812 might need regimplification of everything that uses the
813 variable. */
814 if (!task_shared_vars)
815 task_shared_vars = BITMAP_ALLOC (NULL);
816 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
817 TREE_ADDRESSABLE (outer) = 1;
819 return true;
823 return false;
826 /* Create a new VAR_DECL and copy information from VAR to it. */
828 tree
829 copy_var_decl (tree var, tree name, tree type)
831 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
833 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
834 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
835 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
836 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
837 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
838 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
839 TREE_USED (copy) = 1;
840 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
842 return copy;
845 /* Construct a new automatic decl similar to VAR. */
847 static tree
848 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
850 tree copy = copy_var_decl (var, name, type);
852 DECL_CONTEXT (copy) = current_function_decl;
853 DECL_CHAIN (copy) = ctx->block_vars;
854 ctx->block_vars = copy;
856 return copy;
859 static tree
860 omp_copy_decl_1 (tree var, omp_context *ctx)
862 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
865 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
866 as appropriate. */
867 static tree
868 omp_build_component_ref (tree obj, tree field)
870 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
871 if (TREE_THIS_VOLATILE (field))
872 TREE_THIS_VOLATILE (ret) |= 1;
873 if (TREE_READONLY (field))
874 TREE_READONLY (ret) |= 1;
875 return ret;
878 /* Build tree nodes to access the field for VAR on the receiver side. */
880 static tree
881 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
883 tree x, field = lookup_field (var, ctx);
885 /* If the receiver record type was remapped in the child function,
886 remap the field into the new record type. */
887 x = maybe_lookup_field (field, ctx);
888 if (x != NULL)
889 field = x;
891 x = build_simple_mem_ref (ctx->receiver_decl);
892 x = omp_build_component_ref (x, field);
893 if (by_ref)
894 x = build_simple_mem_ref (x);
896 return x;
899 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
900 of a parallel, this is a component reference; for workshare constructs
901 this is some variable. */
903 static tree
904 build_outer_var_ref (tree var, omp_context *ctx)
906 tree x;
908 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
909 x = var;
910 else if (is_variable_sized (var))
912 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
913 x = build_outer_var_ref (x, ctx);
914 x = build_simple_mem_ref (x);
916 else if (is_taskreg_ctx (ctx))
918 bool by_ref = use_pointer_for_field (var, NULL);
919 x = build_receiver_ref (var, by_ref, ctx);
921 else if (ctx->outer)
922 x = lookup_decl (var, ctx->outer);
923 else if (is_reference (var))
924 /* This can happen with orphaned constructs. If var is reference, it is
925 possible it is shared and as such valid. */
926 x = var;
927 else
928 gcc_unreachable ();
930 if (is_reference (var))
931 x = build_simple_mem_ref (x);
933 return x;
936 /* Build tree nodes to access the field for VAR on the sender side. */
938 static tree
939 build_sender_ref (tree var, omp_context *ctx)
941 tree field = lookup_sfield (var, ctx);
942 return omp_build_component_ref (ctx->sender_decl, field);
945 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
947 static void
948 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
950 tree field, type, sfield = NULL_TREE;
952 gcc_assert ((mask & 1) == 0
953 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
954 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
955 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
957 type = TREE_TYPE (var);
958 if (by_ref)
959 type = build_pointer_type (type);
960 else if ((mask & 3) == 1 && is_reference (var))
961 type = TREE_TYPE (type);
963 field = build_decl (DECL_SOURCE_LOCATION (var),
964 FIELD_DECL, DECL_NAME (var), type);
966 /* Remember what variable this field was created for. This does have a
967 side effect of making dwarf2out ignore this member, so for helpful
968 debugging we clear it later in delete_omp_context. */
969 DECL_ABSTRACT_ORIGIN (field) = var;
970 if (type == TREE_TYPE (var))
972 DECL_ALIGN (field) = DECL_ALIGN (var);
973 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
974 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
976 else
977 DECL_ALIGN (field) = TYPE_ALIGN (type);
979 if ((mask & 3) == 3)
981 insert_field_into_struct (ctx->record_type, field);
982 if (ctx->srecord_type)
984 sfield = build_decl (DECL_SOURCE_LOCATION (var),
985 FIELD_DECL, DECL_NAME (var), type);
986 DECL_ABSTRACT_ORIGIN (sfield) = var;
987 DECL_ALIGN (sfield) = DECL_ALIGN (field);
988 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
989 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
990 insert_field_into_struct (ctx->srecord_type, sfield);
993 else
995 if (ctx->srecord_type == NULL_TREE)
997 tree t;
999 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1000 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1001 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1003 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1004 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1005 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1006 insert_field_into_struct (ctx->srecord_type, sfield);
1007 splay_tree_insert (ctx->sfield_map,
1008 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1009 (splay_tree_value) sfield);
1012 sfield = field;
1013 insert_field_into_struct ((mask & 1) ? ctx->record_type
1014 : ctx->srecord_type, field);
1017 if (mask & 1)
1018 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1019 (splay_tree_value) field);
1020 if ((mask & 2) && ctx->sfield_map)
1021 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1022 (splay_tree_value) sfield);
1025 static tree
1026 install_var_local (tree var, omp_context *ctx)
1028 tree new_var = omp_copy_decl_1 (var, ctx);
1029 insert_decl_map (&ctx->cb, var, new_var);
1030 return new_var;
1033 /* Adjust the replacement for DECL in CTX for the new context. This means
1034 copying the DECL_VALUE_EXPR, and fixing up the type. */
1036 static void
1037 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1039 tree new_decl, size;
1041 new_decl = lookup_decl (decl, ctx);
1043 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1045 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1046 && DECL_HAS_VALUE_EXPR_P (decl))
1048 tree ve = DECL_VALUE_EXPR (decl);
1049 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1050 SET_DECL_VALUE_EXPR (new_decl, ve);
1051 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1054 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1056 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1057 if (size == error_mark_node)
1058 size = TYPE_SIZE (TREE_TYPE (new_decl));
1059 DECL_SIZE (new_decl) = size;
1061 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1062 if (size == error_mark_node)
1063 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1064 DECL_SIZE_UNIT (new_decl) = size;
1068 /* The callback for remap_decl. Search all containing contexts for a
1069 mapping of the variable; this avoids having to duplicate the splay
1070 tree ahead of time. We know a mapping doesn't already exist in the
1071 given context. Create new mappings to implement default semantics. */
1073 static tree
1074 omp_copy_decl (tree var, copy_body_data *cb)
1076 omp_context *ctx = (omp_context *) cb;
1077 tree new_var;
1079 if (TREE_CODE (var) == LABEL_DECL)
1081 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1082 DECL_CONTEXT (new_var) = current_function_decl;
1083 insert_decl_map (&ctx->cb, var, new_var);
1084 return new_var;
1087 while (!is_taskreg_ctx (ctx))
1089 ctx = ctx->outer;
1090 if (ctx == NULL)
1091 return var;
1092 new_var = maybe_lookup_decl (var, ctx);
1093 if (new_var)
1094 return new_var;
1097 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1098 return var;
1100 return error_mark_node;
1104 /* Return the parallel region associated with STMT. */
1106 /* Debugging dumps for parallel regions. */
1107 void dump_omp_region (FILE *, struct omp_region *, int);
1108 void debug_omp_region (struct omp_region *);
1109 void debug_all_omp_regions (void);
1111 /* Dump the parallel region tree rooted at REGION. */
1113 void
1114 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1116 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1117 gimple_code_name[region->type]);
1119 if (region->inner)
1120 dump_omp_region (file, region->inner, indent + 4);
1122 if (region->cont)
1124 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1125 region->cont->index);
1128 if (region->exit)
1129 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1130 region->exit->index);
1131 else
1132 fprintf (file, "%*s[no exit marker]\n", indent, "");
1134 if (region->next)
1135 dump_omp_region (file, region->next, indent);
1138 DEBUG_FUNCTION void
1139 debug_omp_region (struct omp_region *region)
1141 dump_omp_region (stderr, region, 0);
1144 DEBUG_FUNCTION void
1145 debug_all_omp_regions (void)
1147 dump_omp_region (stderr, root_omp_region, 0);
1151 /* Create a new parallel region starting at STMT inside region PARENT. */
1153 struct omp_region *
1154 new_omp_region (basic_block bb, enum gimple_code type,
1155 struct omp_region *parent)
1157 struct omp_region *region = XCNEW (struct omp_region);
1159 region->outer = parent;
1160 region->entry = bb;
1161 region->type = type;
1163 if (parent)
1165 /* This is a nested region. Add it to the list of inner
1166 regions in PARENT. */
1167 region->next = parent->inner;
1168 parent->inner = region;
1170 else
1172 /* This is a toplevel region. Add it to the list of toplevel
1173 regions in ROOT_OMP_REGION. */
1174 region->next = root_omp_region;
1175 root_omp_region = region;
1178 return region;
1181 /* Release the memory associated with the region tree rooted at REGION. */
1183 static void
1184 free_omp_region_1 (struct omp_region *region)
1186 struct omp_region *i, *n;
1188 for (i = region->inner; i ; i = n)
1190 n = i->next;
1191 free_omp_region_1 (i);
1194 free (region);
1197 /* Release the memory for the entire omp region tree. */
1199 void
1200 free_omp_regions (void)
1202 struct omp_region *r, *n;
1203 for (r = root_omp_region; r ; r = n)
1205 n = r->next;
1206 free_omp_region_1 (r);
1208 root_omp_region = NULL;
1212 /* Create a new context, with OUTER_CTX being the surrounding context. */
1214 static omp_context *
1215 new_omp_context (gimple stmt, omp_context *outer_ctx)
1217 omp_context *ctx = XCNEW (omp_context);
1219 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1220 (splay_tree_value) ctx);
1221 ctx->stmt = stmt;
1223 if (outer_ctx)
1225 ctx->outer = outer_ctx;
1226 ctx->cb = outer_ctx->cb;
1227 ctx->cb.block = NULL;
1228 ctx->depth = outer_ctx->depth + 1;
1230 else
1232 ctx->cb.src_fn = current_function_decl;
1233 ctx->cb.dst_fn = current_function_decl;
1234 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1235 gcc_checking_assert (ctx->cb.src_node);
1236 ctx->cb.dst_node = ctx->cb.src_node;
1237 ctx->cb.src_cfun = cfun;
1238 ctx->cb.copy_decl = omp_copy_decl;
1239 ctx->cb.eh_lp_nr = 0;
1240 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1241 ctx->depth = 1;
1244 ctx->cb.decl_map = pointer_map_create ();
1246 return ctx;
1249 static gimple_seq maybe_catch_exception (gimple_seq);
1251 /* Finalize task copyfn. */
1253 static void
1254 finalize_task_copyfn (gimple task_stmt)
1256 struct function *child_cfun;
1257 tree child_fn;
1258 gimple_seq seq = NULL, new_seq;
1259 gimple bind;
1261 child_fn = gimple_omp_task_copy_fn (task_stmt);
1262 if (child_fn == NULL_TREE)
1263 return;
1265 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1266 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1268 push_cfun (child_cfun);
1269 bind = gimplify_body (child_fn, false);
1270 gimple_seq_add_stmt (&seq, bind);
1271 new_seq = maybe_catch_exception (seq);
1272 if (new_seq != seq)
1274 bind = gimple_build_bind (NULL, new_seq, NULL);
1275 seq = NULL;
1276 gimple_seq_add_stmt (&seq, bind);
1278 gimple_set_body (child_fn, seq);
1279 pop_cfun ();
1281 /* Inform the callgraph about the new function. */
1282 cgraph_add_new_function (child_fn, false);
1285 /* Destroy a omp_context data structures. Called through the splay tree
1286 value delete callback. */
1288 static void
1289 delete_omp_context (splay_tree_value value)
1291 omp_context *ctx = (omp_context *) value;
1293 pointer_map_destroy (ctx->cb.decl_map);
1295 if (ctx->field_map)
1296 splay_tree_delete (ctx->field_map);
1297 if (ctx->sfield_map)
1298 splay_tree_delete (ctx->sfield_map);
1300 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1301 it produces corrupt debug information. */
1302 if (ctx->record_type)
1304 tree t;
1305 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1306 DECL_ABSTRACT_ORIGIN (t) = NULL;
1308 if (ctx->srecord_type)
1310 tree t;
1311 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1312 DECL_ABSTRACT_ORIGIN (t) = NULL;
1315 if (is_task_ctx (ctx))
1316 finalize_task_copyfn (ctx->stmt);
1318 XDELETE (ctx);
1321 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1322 context. */
1324 static void
1325 fixup_child_record_type (omp_context *ctx)
1327 tree f, type = ctx->record_type;
1329 /* ??? It isn't sufficient to just call remap_type here, because
1330 variably_modified_type_p doesn't work the way we expect for
1331 record types. Testing each field for whether it needs remapping
1332 and creating a new record by hand works, however. */
1333 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1334 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1335 break;
1336 if (f)
1338 tree name, new_fields = NULL;
1340 type = lang_hooks.types.make_type (RECORD_TYPE);
1341 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1342 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1343 TYPE_DECL, name, type);
1344 TYPE_NAME (type) = name;
1346 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1348 tree new_f = copy_node (f);
1349 DECL_CONTEXT (new_f) = type;
1350 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1351 DECL_CHAIN (new_f) = new_fields;
1352 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1353 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1354 &ctx->cb, NULL);
1355 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1356 &ctx->cb, NULL);
1357 new_fields = new_f;
1359 /* Arrange to be able to look up the receiver field
1360 given the sender field. */
1361 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1362 (splay_tree_value) new_f);
1364 TYPE_FIELDS (type) = nreverse (new_fields);
1365 layout_type (type);
1368 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1371 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1372 specified by CLAUSES. */
1374 static void
1375 scan_sharing_clauses (tree clauses, omp_context *ctx)
1377 tree c, decl;
1378 bool scan_array_reductions = false;
1380 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1382 bool by_ref;
1384 switch (OMP_CLAUSE_CODE (c))
1386 case OMP_CLAUSE_PRIVATE:
1387 decl = OMP_CLAUSE_DECL (c);
1388 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1389 goto do_private;
1390 else if (!is_variable_sized (decl))
1391 install_var_local (decl, ctx);
1392 break;
1394 case OMP_CLAUSE_SHARED:
1395 gcc_assert (is_taskreg_ctx (ctx));
1396 decl = OMP_CLAUSE_DECL (c);
1397 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1398 || !is_variable_sized (decl));
1399 /* Global variables don't need to be copied,
1400 the receiver side will use them directly. */
1401 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1402 break;
1403 by_ref = use_pointer_for_field (decl, ctx);
1404 if (! TREE_READONLY (decl)
1405 || TREE_ADDRESSABLE (decl)
1406 || by_ref
1407 || is_reference (decl))
1409 install_var_field (decl, by_ref, 3, ctx);
1410 install_var_local (decl, ctx);
1411 break;
1413 /* We don't need to copy const scalar vars back. */
1414 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1415 goto do_private;
1417 case OMP_CLAUSE_LASTPRIVATE:
1418 /* Let the corresponding firstprivate clause create
1419 the variable. */
1420 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1421 break;
1422 /* FALLTHRU */
1424 case OMP_CLAUSE_FIRSTPRIVATE:
1425 case OMP_CLAUSE_REDUCTION:
1426 decl = OMP_CLAUSE_DECL (c);
1427 do_private:
1428 if (is_variable_sized (decl))
1430 if (is_task_ctx (ctx))
1431 install_var_field (decl, false, 1, ctx);
1432 break;
1434 else if (is_taskreg_ctx (ctx))
1436 bool global
1437 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1438 by_ref = use_pointer_for_field (decl, NULL);
1440 if (is_task_ctx (ctx)
1441 && (global || by_ref || is_reference (decl)))
1443 install_var_field (decl, false, 1, ctx);
1444 if (!global)
1445 install_var_field (decl, by_ref, 2, ctx);
1447 else if (!global)
1448 install_var_field (decl, by_ref, 3, ctx);
1450 install_var_local (decl, ctx);
1451 break;
1453 case OMP_CLAUSE_COPYPRIVATE:
1454 case OMP_CLAUSE_COPYIN:
1455 decl = OMP_CLAUSE_DECL (c);
1456 by_ref = use_pointer_for_field (decl, NULL);
1457 install_var_field (decl, by_ref, 3, ctx);
1458 break;
1460 case OMP_CLAUSE_DEFAULT:
1461 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1462 break;
1464 case OMP_CLAUSE_FINAL:
1465 case OMP_CLAUSE_IF:
1466 case OMP_CLAUSE_NUM_THREADS:
1467 case OMP_CLAUSE_SCHEDULE:
1468 if (ctx->outer)
1469 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1470 break;
1472 case OMP_CLAUSE_NOWAIT:
1473 case OMP_CLAUSE_ORDERED:
1474 case OMP_CLAUSE_COLLAPSE:
1475 case OMP_CLAUSE_UNTIED:
1476 case OMP_CLAUSE_MERGEABLE:
1477 break;
1479 default:
1480 gcc_unreachable ();
1484 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1486 switch (OMP_CLAUSE_CODE (c))
1488 case OMP_CLAUSE_LASTPRIVATE:
1489 /* Let the corresponding firstprivate clause create
1490 the variable. */
1491 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1492 scan_array_reductions = true;
1493 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1494 break;
1495 /* FALLTHRU */
1497 case OMP_CLAUSE_PRIVATE:
1498 case OMP_CLAUSE_FIRSTPRIVATE:
1499 case OMP_CLAUSE_REDUCTION:
1500 decl = OMP_CLAUSE_DECL (c);
1501 if (is_variable_sized (decl))
1502 install_var_local (decl, ctx);
1503 fixup_remapped_decl (decl, ctx,
1504 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1505 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1506 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1507 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1508 scan_array_reductions = true;
1509 break;
1511 case OMP_CLAUSE_SHARED:
1512 decl = OMP_CLAUSE_DECL (c);
1513 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1514 fixup_remapped_decl (decl, ctx, false);
1515 break;
1517 case OMP_CLAUSE_COPYPRIVATE:
1518 case OMP_CLAUSE_COPYIN:
1519 case OMP_CLAUSE_DEFAULT:
1520 case OMP_CLAUSE_IF:
1521 case OMP_CLAUSE_NUM_THREADS:
1522 case OMP_CLAUSE_SCHEDULE:
1523 case OMP_CLAUSE_NOWAIT:
1524 case OMP_CLAUSE_ORDERED:
1525 case OMP_CLAUSE_COLLAPSE:
1526 case OMP_CLAUSE_UNTIED:
1527 case OMP_CLAUSE_FINAL:
1528 case OMP_CLAUSE_MERGEABLE:
1529 break;
1531 default:
1532 gcc_unreachable ();
1536 if (scan_array_reductions)
1537 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1538 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1539 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1541 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1542 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1544 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1545 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1546 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1549 /* Create a new name for omp child function. Returns an identifier. */
1551 static GTY(()) unsigned int tmp_ompfn_id_num;
1553 static tree
1554 create_omp_child_function_name (bool task_copy)
1556 return (clone_function_name (current_function_decl,
1557 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1560 /* Build a decl for the omp child function. It'll not contain a body
1561 yet, just the bare decl. */
1563 static void
1564 create_omp_child_function (omp_context *ctx, bool task_copy)
1566 tree decl, type, name, t;
1568 name = create_omp_child_function_name (task_copy);
1569 if (task_copy)
1570 type = build_function_type_list (void_type_node, ptr_type_node,
1571 ptr_type_node, NULL_TREE);
1572 else
1573 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1575 decl = build_decl (gimple_location (ctx->stmt),
1576 FUNCTION_DECL, name, type);
1578 if (!task_copy)
1579 ctx->cb.dst_fn = decl;
1580 else
1581 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1583 TREE_STATIC (decl) = 1;
1584 TREE_USED (decl) = 1;
1585 DECL_ARTIFICIAL (decl) = 1;
1586 DECL_NAMELESS (decl) = 1;
1587 DECL_IGNORED_P (decl) = 0;
1588 TREE_PUBLIC (decl) = 0;
1589 DECL_UNINLINABLE (decl) = 1;
1590 DECL_EXTERNAL (decl) = 0;
1591 DECL_CONTEXT (decl) = NULL_TREE;
1592 DECL_INITIAL (decl) = make_node (BLOCK);
1594 t = build_decl (DECL_SOURCE_LOCATION (decl),
1595 RESULT_DECL, NULL_TREE, void_type_node);
1596 DECL_ARTIFICIAL (t) = 1;
1597 DECL_IGNORED_P (t) = 1;
1598 DECL_CONTEXT (t) = decl;
1599 DECL_RESULT (decl) = t;
1601 t = build_decl (DECL_SOURCE_LOCATION (decl),
1602 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1603 DECL_ARTIFICIAL (t) = 1;
1604 DECL_NAMELESS (t) = 1;
1605 DECL_ARG_TYPE (t) = ptr_type_node;
1606 DECL_CONTEXT (t) = current_function_decl;
1607 TREE_USED (t) = 1;
1608 DECL_ARGUMENTS (decl) = t;
1609 if (!task_copy)
1610 ctx->receiver_decl = t;
1611 else
1613 t = build_decl (DECL_SOURCE_LOCATION (decl),
1614 PARM_DECL, get_identifier (".omp_data_o"),
1615 ptr_type_node);
1616 DECL_ARTIFICIAL (t) = 1;
1617 DECL_NAMELESS (t) = 1;
1618 DECL_ARG_TYPE (t) = ptr_type_node;
1619 DECL_CONTEXT (t) = current_function_decl;
1620 TREE_USED (t) = 1;
1621 TREE_ADDRESSABLE (t) = 1;
1622 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1623 DECL_ARGUMENTS (decl) = t;
1626 /* Allocate memory for the function structure. The call to
1627 allocate_struct_function clobbers CFUN, so we need to restore
1628 it afterward. */
1629 push_struct_function (decl);
1630 cfun->function_end_locus = gimple_location (ctx->stmt);
1631 pop_cfun ();
1635 /* Scan an OpenMP parallel directive. */
1637 static void
1638 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1640 omp_context *ctx;
1641 tree name;
1642 gimple stmt = gsi_stmt (*gsi);
1644 /* Ignore parallel directives with empty bodies, unless there
1645 are copyin clauses. */
1646 if (optimize > 0
1647 && empty_body_p (gimple_omp_body (stmt))
1648 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1649 OMP_CLAUSE_COPYIN) == NULL)
1651 gsi_replace (gsi, gimple_build_nop (), false);
1652 return;
1655 ctx = new_omp_context (stmt, outer_ctx);
1656 if (taskreg_nesting_level > 1)
1657 ctx->is_nested = true;
1658 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1659 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1660 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1661 name = create_tmp_var_name (".omp_data_s");
1662 name = build_decl (gimple_location (stmt),
1663 TYPE_DECL, name, ctx->record_type);
1664 DECL_ARTIFICIAL (name) = 1;
1665 DECL_NAMELESS (name) = 1;
1666 TYPE_NAME (ctx->record_type) = name;
1667 create_omp_child_function (ctx, false);
1668 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1670 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1671 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1673 if (TYPE_FIELDS (ctx->record_type) == NULL)
1674 ctx->record_type = ctx->receiver_decl = NULL;
1675 else
1677 layout_type (ctx->record_type);
1678 fixup_child_record_type (ctx);
1682 /* Scan an OpenMP task directive. */
1684 static void
1685 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1687 omp_context *ctx;
1688 tree name, t;
1689 gimple stmt = gsi_stmt (*gsi);
1690 location_t loc = gimple_location (stmt);
1692 /* Ignore task directives with empty bodies. */
1693 if (optimize > 0
1694 && empty_body_p (gimple_omp_body (stmt)))
1696 gsi_replace (gsi, gimple_build_nop (), false);
1697 return;
1700 ctx = new_omp_context (stmt, outer_ctx);
1701 if (taskreg_nesting_level > 1)
1702 ctx->is_nested = true;
1703 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1704 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1705 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1706 name = create_tmp_var_name (".omp_data_s");
1707 name = build_decl (gimple_location (stmt),
1708 TYPE_DECL, name, ctx->record_type);
1709 DECL_ARTIFICIAL (name) = 1;
1710 DECL_NAMELESS (name) = 1;
1711 TYPE_NAME (ctx->record_type) = name;
1712 create_omp_child_function (ctx, false);
1713 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1715 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1717 if (ctx->srecord_type)
1719 name = create_tmp_var_name (".omp_data_a");
1720 name = build_decl (gimple_location (stmt),
1721 TYPE_DECL, name, ctx->srecord_type);
1722 DECL_ARTIFICIAL (name) = 1;
1723 DECL_NAMELESS (name) = 1;
1724 TYPE_NAME (ctx->srecord_type) = name;
1725 create_omp_child_function (ctx, true);
1728 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1730 if (TYPE_FIELDS (ctx->record_type) == NULL)
1732 ctx->record_type = ctx->receiver_decl = NULL;
1733 t = build_int_cst (long_integer_type_node, 0);
1734 gimple_omp_task_set_arg_size (stmt, t);
1735 t = build_int_cst (long_integer_type_node, 1);
1736 gimple_omp_task_set_arg_align (stmt, t);
1738 else
1740 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1741 /* Move VLA fields to the end. */
1742 p = &TYPE_FIELDS (ctx->record_type);
1743 while (*p)
1744 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1745 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1747 *q = *p;
1748 *p = TREE_CHAIN (*p);
1749 TREE_CHAIN (*q) = NULL_TREE;
1750 q = &TREE_CHAIN (*q);
1752 else
1753 p = &DECL_CHAIN (*p);
1754 *p = vla_fields;
1755 layout_type (ctx->record_type);
1756 fixup_child_record_type (ctx);
1757 if (ctx->srecord_type)
1758 layout_type (ctx->srecord_type);
1759 t = fold_convert_loc (loc, long_integer_type_node,
1760 TYPE_SIZE_UNIT (ctx->record_type));
1761 gimple_omp_task_set_arg_size (stmt, t);
1762 t = build_int_cst (long_integer_type_node,
1763 TYPE_ALIGN_UNIT (ctx->record_type));
1764 gimple_omp_task_set_arg_align (stmt, t);
1769 /* Scan an OpenMP loop directive. */
1771 static void
1772 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1774 omp_context *ctx;
1775 size_t i;
1777 ctx = new_omp_context (stmt, outer_ctx);
1779 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1781 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1782 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1784 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1785 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1786 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1787 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1789 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1792 /* Scan an OpenMP sections directive. */
1794 static void
1795 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1797 omp_context *ctx;
1799 ctx = new_omp_context (stmt, outer_ctx);
1800 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1801 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1804 /* Scan an OpenMP single directive. */
1806 static void
1807 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1809 omp_context *ctx;
1810 tree name;
1812 ctx = new_omp_context (stmt, outer_ctx);
1813 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1814 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1815 name = create_tmp_var_name (".omp_copy_s");
1816 name = build_decl (gimple_location (stmt),
1817 TYPE_DECL, name, ctx->record_type);
1818 TYPE_NAME (ctx->record_type) = name;
1820 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1821 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1823 if (TYPE_FIELDS (ctx->record_type) == NULL)
1824 ctx->record_type = NULL;
1825 else
1826 layout_type (ctx->record_type);
1830 /* Check OpenMP nesting restrictions. */
1831 static bool
1832 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1834 switch (gimple_code (stmt))
1836 case GIMPLE_OMP_FOR:
1837 case GIMPLE_OMP_SECTIONS:
1838 case GIMPLE_OMP_SINGLE:
1839 case GIMPLE_CALL:
1840 for (; ctx != NULL; ctx = ctx->outer)
1841 switch (gimple_code (ctx->stmt))
1843 case GIMPLE_OMP_FOR:
1844 case GIMPLE_OMP_SECTIONS:
1845 case GIMPLE_OMP_SINGLE:
1846 case GIMPLE_OMP_ORDERED:
1847 case GIMPLE_OMP_MASTER:
1848 case GIMPLE_OMP_TASK:
1849 if (is_gimple_call (stmt))
1851 error_at (gimple_location (stmt),
1852 "barrier region may not be closely nested inside "
1853 "of work-sharing, critical, ordered, master or "
1854 "explicit task region");
1855 return false;
1857 error_at (gimple_location (stmt),
1858 "work-sharing region may not be closely nested inside "
1859 "of work-sharing, critical, ordered, master or explicit "
1860 "task region");
1861 return false;
1862 case GIMPLE_OMP_PARALLEL:
1863 return true;
1864 default:
1865 break;
1867 break;
1868 case GIMPLE_OMP_MASTER:
1869 for (; ctx != NULL; ctx = ctx->outer)
1870 switch (gimple_code (ctx->stmt))
1872 case GIMPLE_OMP_FOR:
1873 case GIMPLE_OMP_SECTIONS:
1874 case GIMPLE_OMP_SINGLE:
1875 case GIMPLE_OMP_TASK:
1876 error_at (gimple_location (stmt),
1877 "master region may not be closely nested inside "
1878 "of work-sharing or explicit task region");
1879 return false;
1880 case GIMPLE_OMP_PARALLEL:
1881 return true;
1882 default:
1883 break;
1885 break;
1886 case GIMPLE_OMP_ORDERED:
1887 for (; ctx != NULL; ctx = ctx->outer)
1888 switch (gimple_code (ctx->stmt))
1890 case GIMPLE_OMP_CRITICAL:
1891 case GIMPLE_OMP_TASK:
1892 error_at (gimple_location (stmt),
1893 "ordered region may not be closely nested inside "
1894 "of critical or explicit task region");
1895 return false;
1896 case GIMPLE_OMP_FOR:
1897 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1898 OMP_CLAUSE_ORDERED) == NULL)
1900 error_at (gimple_location (stmt),
1901 "ordered region must be closely nested inside "
1902 "a loop region with an ordered clause");
1903 return false;
1905 return true;
1906 case GIMPLE_OMP_PARALLEL:
1907 return true;
1908 default:
1909 break;
1911 break;
1912 case GIMPLE_OMP_CRITICAL:
1913 for (; ctx != NULL; ctx = ctx->outer)
1914 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1915 && (gimple_omp_critical_name (stmt)
1916 == gimple_omp_critical_name (ctx->stmt)))
1918 error_at (gimple_location (stmt),
1919 "critical region may not be nested inside a critical "
1920 "region with the same name");
1921 return false;
1923 break;
1924 default:
1925 break;
1927 return true;
1931 /* Helper function scan_omp.
1933 Callback for walk_tree or operators in walk_gimple_stmt used to
1934 scan for OpenMP directives in TP. */
1936 static tree
1937 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1939 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1940 omp_context *ctx = (omp_context *) wi->info;
1941 tree t = *tp;
1943 switch (TREE_CODE (t))
1945 case VAR_DECL:
1946 case PARM_DECL:
1947 case LABEL_DECL:
1948 case RESULT_DECL:
1949 if (ctx)
1950 *tp = remap_decl (t, &ctx->cb);
1951 break;
1953 default:
1954 if (ctx && TYPE_P (t))
1955 *tp = remap_type (t, &ctx->cb);
1956 else if (!DECL_P (t))
1958 *walk_subtrees = 1;
1959 if (ctx)
1961 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1962 if (tem != TREE_TYPE (t))
1964 if (TREE_CODE (t) == INTEGER_CST)
1965 *tp = build_int_cst_wide (tem,
1966 TREE_INT_CST_LOW (t),
1967 TREE_INT_CST_HIGH (t));
1968 else
1969 TREE_TYPE (t) = tem;
1973 break;
1976 return NULL_TREE;
1980 /* Helper function for scan_omp.
1982 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1983 the current statement in GSI. */
1985 static tree
1986 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1987 struct walk_stmt_info *wi)
1989 gimple stmt = gsi_stmt (*gsi);
1990 omp_context *ctx = (omp_context *) wi->info;
1992 if (gimple_has_location (stmt))
1993 input_location = gimple_location (stmt);
1995 /* Check the OpenMP nesting restrictions. */
1996 if (ctx != NULL)
1998 bool remove = false;
1999 if (is_gimple_omp (stmt))
2000 remove = !check_omp_nesting_restrictions (stmt, ctx);
2001 else if (is_gimple_call (stmt))
2003 tree fndecl = gimple_call_fndecl (stmt);
2004 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2005 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2006 remove = !check_omp_nesting_restrictions (stmt, ctx);
2008 if (remove)
2010 stmt = gimple_build_nop ();
2011 gsi_replace (gsi, stmt, false);
2015 *handled_ops_p = true;
2017 switch (gimple_code (stmt))
2019 case GIMPLE_OMP_PARALLEL:
2020 taskreg_nesting_level++;
2021 scan_omp_parallel (gsi, ctx);
2022 taskreg_nesting_level--;
2023 break;
2025 case GIMPLE_OMP_TASK:
2026 taskreg_nesting_level++;
2027 scan_omp_task (gsi, ctx);
2028 taskreg_nesting_level--;
2029 break;
2031 case GIMPLE_OMP_FOR:
2032 scan_omp_for (stmt, ctx);
2033 break;
2035 case GIMPLE_OMP_SECTIONS:
2036 scan_omp_sections (stmt, ctx);
2037 break;
2039 case GIMPLE_OMP_SINGLE:
2040 scan_omp_single (stmt, ctx);
2041 break;
2043 case GIMPLE_OMP_SECTION:
2044 case GIMPLE_OMP_MASTER:
2045 case GIMPLE_OMP_ORDERED:
2046 case GIMPLE_OMP_CRITICAL:
2047 ctx = new_omp_context (stmt, ctx);
2048 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2049 break;
2051 case GIMPLE_BIND:
2053 tree var;
2055 *handled_ops_p = false;
2056 if (ctx)
2057 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2058 insert_decl_map (&ctx->cb, var, var);
2060 break;
2061 default:
2062 *handled_ops_p = false;
2063 break;
2066 return NULL_TREE;
2070 /* Scan all the statements starting at the current statement. CTX
2071 contains context information about the OpenMP directives and
2072 clauses found during the scan. */
2074 static void
2075 scan_omp (gimple_seq *body_p, omp_context *ctx)
2077 location_t saved_location;
2078 struct walk_stmt_info wi;
2080 memset (&wi, 0, sizeof (wi));
2081 wi.info = ctx;
2082 wi.want_locations = true;
2084 saved_location = input_location;
2085 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2086 input_location = saved_location;
2089 /* Re-gimplification and code generation routines. */
2091 /* Build a call to GOMP_barrier. */
2093 static tree
2094 build_omp_barrier (void)
2096 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2099 /* If a context was created for STMT when it was scanned, return it. */
2101 static omp_context *
2102 maybe_lookup_ctx (gimple stmt)
2104 splay_tree_node n;
2105 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2106 return n ? (omp_context *) n->value : NULL;
2110 /* Find the mapping for DECL in CTX or the immediately enclosing
2111 context that has a mapping for DECL.
2113 If CTX is a nested parallel directive, we may have to use the decl
2114 mappings created in CTX's parent context. Suppose that we have the
2115 following parallel nesting (variable UIDs showed for clarity):
2117 iD.1562 = 0;
2118 #omp parallel shared(iD.1562) -> outer parallel
2119 iD.1562 = iD.1562 + 1;
2121 #omp parallel shared (iD.1562) -> inner parallel
2122 iD.1562 = iD.1562 - 1;
2124 Each parallel structure will create a distinct .omp_data_s structure
2125 for copying iD.1562 in/out of the directive:
2127 outer parallel .omp_data_s.1.i -> iD.1562
2128 inner parallel .omp_data_s.2.i -> iD.1562
2130 A shared variable mapping will produce a copy-out operation before
2131 the parallel directive and a copy-in operation after it. So, in
2132 this case we would have:
2134 iD.1562 = 0;
2135 .omp_data_o.1.i = iD.1562;
2136 #omp parallel shared(iD.1562) -> outer parallel
2137 .omp_data_i.1 = &.omp_data_o.1
2138 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2140 .omp_data_o.2.i = iD.1562; -> **
2141 #omp parallel shared(iD.1562) -> inner parallel
2142 .omp_data_i.2 = &.omp_data_o.2
2143 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2146 ** This is a problem. The symbol iD.1562 cannot be referenced
2147 inside the body of the outer parallel region. But since we are
2148 emitting this copy operation while expanding the inner parallel
2149 directive, we need to access the CTX structure of the outer
2150 parallel directive to get the correct mapping:
2152 .omp_data_o.2.i = .omp_data_i.1->i
2154 Since there may be other workshare or parallel directives enclosing
2155 the parallel directive, it may be necessary to walk up the context
2156 parent chain. This is not a problem in general because nested
2157 parallelism happens only rarely. */
2159 static tree
2160 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2162 tree t;
2163 omp_context *up;
2165 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2166 t = maybe_lookup_decl (decl, up);
2168 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2170 return t ? t : decl;
2174 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2175 in outer contexts. */
2177 static tree
2178 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2180 tree t = NULL;
2181 omp_context *up;
2183 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2184 t = maybe_lookup_decl (decl, up);
2186 return t ? t : decl;
2190 /* Construct the initialization value for reduction CLAUSE. */
2192 tree
2193 omp_reduction_init (tree clause, tree type)
2195 location_t loc = OMP_CLAUSE_LOCATION (clause);
2196 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2198 case PLUS_EXPR:
2199 case MINUS_EXPR:
2200 case BIT_IOR_EXPR:
2201 case BIT_XOR_EXPR:
2202 case TRUTH_OR_EXPR:
2203 case TRUTH_ORIF_EXPR:
2204 case TRUTH_XOR_EXPR:
2205 case NE_EXPR:
2206 return build_zero_cst (type);
2208 case MULT_EXPR:
2209 case TRUTH_AND_EXPR:
2210 case TRUTH_ANDIF_EXPR:
2211 case EQ_EXPR:
2212 return fold_convert_loc (loc, type, integer_one_node);
2214 case BIT_AND_EXPR:
2215 return fold_convert_loc (loc, type, integer_minus_one_node);
2217 case MAX_EXPR:
2218 if (SCALAR_FLOAT_TYPE_P (type))
2220 REAL_VALUE_TYPE max, min;
2221 if (HONOR_INFINITIES (TYPE_MODE (type)))
2223 real_inf (&max);
2224 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2226 else
2227 real_maxval (&min, 1, TYPE_MODE (type));
2228 return build_real (type, min);
2230 else
2232 gcc_assert (INTEGRAL_TYPE_P (type));
2233 return TYPE_MIN_VALUE (type);
2236 case MIN_EXPR:
2237 if (SCALAR_FLOAT_TYPE_P (type))
2239 REAL_VALUE_TYPE max;
2240 if (HONOR_INFINITIES (TYPE_MODE (type)))
2241 real_inf (&max);
2242 else
2243 real_maxval (&max, 0, TYPE_MODE (type));
2244 return build_real (type, max);
2246 else
2248 gcc_assert (INTEGRAL_TYPE_P (type));
2249 return TYPE_MAX_VALUE (type);
2252 default:
2253 gcc_unreachable ();
2257 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2258 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2259 private variables. Initialization statements go in ILIST, while calls
2260 to destructors go in DLIST. */
2262 static void
2263 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2264 omp_context *ctx)
2266 tree c, dtor, copyin_seq, x, ptr;
2267 bool copyin_by_ref = false;
2268 bool lastprivate_firstprivate = false;
2269 int pass;
2271 copyin_seq = NULL;
2273 /* Do all the fixed sized types in the first pass, and the variable sized
2274 types in the second pass. This makes sure that the scalar arguments to
2275 the variable sized types are processed before we use them in the
2276 variable sized operations. */
2277 for (pass = 0; pass < 2; ++pass)
2279 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2281 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2282 tree var, new_var;
2283 bool by_ref;
2284 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2286 switch (c_kind)
2288 case OMP_CLAUSE_PRIVATE:
2289 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2290 continue;
2291 break;
2292 case OMP_CLAUSE_SHARED:
2293 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2295 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2296 continue;
2298 case OMP_CLAUSE_FIRSTPRIVATE:
2299 case OMP_CLAUSE_COPYIN:
2300 case OMP_CLAUSE_REDUCTION:
2301 break;
2302 case OMP_CLAUSE_LASTPRIVATE:
2303 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2305 lastprivate_firstprivate = true;
2306 if (pass != 0)
2307 continue;
2309 break;
2310 default:
2311 continue;
2314 new_var = var = OMP_CLAUSE_DECL (c);
2315 if (c_kind != OMP_CLAUSE_COPYIN)
2316 new_var = lookup_decl (var, ctx);
2318 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2320 if (pass != 0)
2321 continue;
2323 else if (is_variable_sized (var))
2325 /* For variable sized types, we need to allocate the
2326 actual storage here. Call alloca and store the
2327 result in the pointer decl that we created elsewhere. */
2328 if (pass == 0)
2329 continue;
2331 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2333 gimple stmt;
2334 tree tmp, atmp;
2336 ptr = DECL_VALUE_EXPR (new_var);
2337 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2338 ptr = TREE_OPERAND (ptr, 0);
2339 gcc_assert (DECL_P (ptr));
2340 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2342 /* void *tmp = __builtin_alloca */
2343 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2344 stmt = gimple_build_call (atmp, 1, x);
2345 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2346 gimple_add_tmp_var (tmp);
2347 gimple_call_set_lhs (stmt, tmp);
2349 gimple_seq_add_stmt (ilist, stmt);
2351 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2352 gimplify_assign (ptr, x, ilist);
2355 else if (is_reference (var))
2357 /* For references that are being privatized for Fortran,
2358 allocate new backing storage for the new pointer
2359 variable. This allows us to avoid changing all the
2360 code that expects a pointer to something that expects
2361 a direct variable. Note that this doesn't apply to
2362 C++, since reference types are disallowed in data
2363 sharing clauses there, except for NRV optimized
2364 return values. */
2365 if (pass == 0)
2366 continue;
2368 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2369 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2371 x = build_receiver_ref (var, false, ctx);
2372 x = build_fold_addr_expr_loc (clause_loc, x);
2374 else if (TREE_CONSTANT (x))
2376 const char *name = NULL;
2377 if (DECL_NAME (var))
2378 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2380 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2381 name);
2382 gimple_add_tmp_var (x);
2383 TREE_ADDRESSABLE (x) = 1;
2384 x = build_fold_addr_expr_loc (clause_loc, x);
2386 else
2388 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2389 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2392 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2393 gimplify_assign (new_var, x, ilist);
2395 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2397 else if (c_kind == OMP_CLAUSE_REDUCTION
2398 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2400 if (pass == 0)
2401 continue;
2403 else if (pass != 0)
2404 continue;
2406 switch (OMP_CLAUSE_CODE (c))
2408 case OMP_CLAUSE_SHARED:
2409 /* Shared global vars are just accessed directly. */
2410 if (is_global_var (new_var))
2411 break;
2412 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2413 needs to be delayed until after fixup_child_record_type so
2414 that we get the correct type during the dereference. */
2415 by_ref = use_pointer_for_field (var, ctx);
2416 x = build_receiver_ref (var, by_ref, ctx);
2417 SET_DECL_VALUE_EXPR (new_var, x);
2418 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2420 /* ??? If VAR is not passed by reference, and the variable
2421 hasn't been initialized yet, then we'll get a warning for
2422 the store into the omp_data_s structure. Ideally, we'd be
2423 able to notice this and not store anything at all, but
2424 we're generating code too early. Suppress the warning. */
2425 if (!by_ref)
2426 TREE_NO_WARNING (var) = 1;
2427 break;
2429 case OMP_CLAUSE_LASTPRIVATE:
2430 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2431 break;
2432 /* FALLTHRU */
2434 case OMP_CLAUSE_PRIVATE:
2435 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2436 x = build_outer_var_ref (var, ctx);
2437 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2439 if (is_task_ctx (ctx))
2440 x = build_receiver_ref (var, false, ctx);
2441 else
2442 x = build_outer_var_ref (var, ctx);
2444 else
2445 x = NULL;
2446 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2447 if (x)
2448 gimplify_and_add (x, ilist);
2449 /* FALLTHRU */
2451 do_dtor:
2452 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2453 if (x)
2455 gimple_seq tseq = NULL;
2457 dtor = x;
2458 gimplify_stmt (&dtor, &tseq);
2459 gimple_seq_add_seq (dlist, tseq);
2461 break;
2463 case OMP_CLAUSE_FIRSTPRIVATE:
2464 if (is_task_ctx (ctx))
2466 if (is_reference (var) || is_variable_sized (var))
2467 goto do_dtor;
2468 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2469 ctx))
2470 || use_pointer_for_field (var, NULL))
2472 x = build_receiver_ref (var, false, ctx);
2473 SET_DECL_VALUE_EXPR (new_var, x);
2474 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2475 goto do_dtor;
2478 x = build_outer_var_ref (var, ctx);
2479 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2480 gimplify_and_add (x, ilist);
2481 goto do_dtor;
2482 break;
2484 case OMP_CLAUSE_COPYIN:
2485 by_ref = use_pointer_for_field (var, NULL);
2486 x = build_receiver_ref (var, by_ref, ctx);
2487 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2488 append_to_statement_list (x, &copyin_seq);
2489 copyin_by_ref |= by_ref;
2490 break;
2492 case OMP_CLAUSE_REDUCTION:
2493 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2495 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2496 x = build_outer_var_ref (var, ctx);
2498 if (is_reference (var))
2499 x = build_fold_addr_expr_loc (clause_loc, x);
2500 SET_DECL_VALUE_EXPR (placeholder, x);
2501 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2502 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2503 gimple_seq_add_seq (ilist,
2504 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2505 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2506 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2508 else
2510 x = omp_reduction_init (c, TREE_TYPE (new_var));
2511 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2512 gimplify_assign (new_var, x, ilist);
2514 break;
2516 default:
2517 gcc_unreachable ();
2522 /* The copyin sequence is not to be executed by the main thread, since
2523 that would result in self-copies. Perhaps not visible to scalars,
2524 but it certainly is to C++ operator=. */
2525 if (copyin_seq)
2527 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2529 x = build2 (NE_EXPR, boolean_type_node, x,
2530 build_int_cst (TREE_TYPE (x), 0));
2531 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2532 gimplify_and_add (x, ilist);
2535 /* If any copyin variable is passed by reference, we must ensure the
2536 master thread doesn't modify it before it is copied over in all
2537 threads. Similarly for variables in both firstprivate and
2538 lastprivate clauses we need to ensure the lastprivate copying
2539 happens after firstprivate copying in all threads. */
2540 if (copyin_by_ref || lastprivate_firstprivate)
2541 gimplify_and_add (build_omp_barrier (), ilist);
2545 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2546 both parallel and workshare constructs. PREDICATE may be NULL if it's
2547 always true. */
2549 static void
2550 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2551 omp_context *ctx)
2553 tree x, c, label = NULL;
2554 bool par_clauses = false;
2556 /* Early exit if there are no lastprivate clauses. */
2557 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2558 if (clauses == NULL)
2560 /* If this was a workshare clause, see if it had been combined
2561 with its parallel. In that case, look for the clauses on the
2562 parallel statement itself. */
2563 if (is_parallel_ctx (ctx))
2564 return;
2566 ctx = ctx->outer;
2567 if (ctx == NULL || !is_parallel_ctx (ctx))
2568 return;
2570 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2571 OMP_CLAUSE_LASTPRIVATE);
2572 if (clauses == NULL)
2573 return;
2574 par_clauses = true;
2577 if (predicate)
2579 gimple stmt;
2580 tree label_true, arm1, arm2;
2582 label = create_artificial_label (UNKNOWN_LOCATION);
2583 label_true = create_artificial_label (UNKNOWN_LOCATION);
2584 arm1 = TREE_OPERAND (predicate, 0);
2585 arm2 = TREE_OPERAND (predicate, 1);
2586 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2587 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2588 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2589 label_true, label);
2590 gimple_seq_add_stmt (stmt_list, stmt);
2591 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2594 for (c = clauses; c ;)
2596 tree var, new_var;
2597 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2601 var = OMP_CLAUSE_DECL (c);
2602 new_var = lookup_decl (var, ctx);
2604 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2606 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2607 gimple_seq_add_seq (stmt_list,
2608 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2610 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2612 x = build_outer_var_ref (var, ctx);
2613 if (is_reference (var))
2614 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2615 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2616 gimplify_and_add (x, stmt_list);
2618 c = OMP_CLAUSE_CHAIN (c);
2619 if (c == NULL && !par_clauses)
2621 /* If this was a workshare clause, see if it had been combined
2622 with its parallel. In that case, continue looking for the
2623 clauses also on the parallel statement itself. */
2624 if (is_parallel_ctx (ctx))
2625 break;
2627 ctx = ctx->outer;
2628 if (ctx == NULL || !is_parallel_ctx (ctx))
2629 break;
2631 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2632 OMP_CLAUSE_LASTPRIVATE);
2633 par_clauses = true;
2637 if (label)
2638 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2642 /* Generate code to implement the REDUCTION clauses. */
2644 static void
2645 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2647 gimple_seq sub_seq = NULL;
2648 gimple stmt;
2649 tree x, c;
2650 int count = 0;
2652 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2653 update in that case, otherwise use a lock. */
2654 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2655 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2657 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2659 /* Never use OMP_ATOMIC for array reductions. */
2660 count = -1;
2661 break;
2663 count++;
2666 if (count == 0)
2667 return;
2669 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2671 tree var, ref, new_var;
2672 enum tree_code code;
2673 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2675 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2676 continue;
2678 var = OMP_CLAUSE_DECL (c);
2679 new_var = lookup_decl (var, ctx);
2680 if (is_reference (var))
2681 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2682 ref = build_outer_var_ref (var, ctx);
2683 code = OMP_CLAUSE_REDUCTION_CODE (c);
2685 /* reduction(-:var) sums up the partial results, so it acts
2686 identically to reduction(+:var). */
2687 if (code == MINUS_EXPR)
2688 code = PLUS_EXPR;
2690 if (count == 1)
2692 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2694 addr = save_expr (addr);
2695 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2696 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2697 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2698 gimplify_and_add (x, stmt_seqp);
2699 return;
2702 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2704 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2706 if (is_reference (var))
2707 ref = build_fold_addr_expr_loc (clause_loc, ref);
2708 SET_DECL_VALUE_EXPR (placeholder, ref);
2709 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2710 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2711 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2712 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2713 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2715 else
2717 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2718 ref = build_outer_var_ref (var, ctx);
2719 gimplify_assign (ref, x, &sub_seq);
2723 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2725 gimple_seq_add_stmt (stmt_seqp, stmt);
2727 gimple_seq_add_seq (stmt_seqp, sub_seq);
2729 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2731 gimple_seq_add_stmt (stmt_seqp, stmt);
2735 /* Generate code to implement the COPYPRIVATE clauses. */
2737 static void
2738 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2739 omp_context *ctx)
2741 tree c;
2743 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2745 tree var, new_var, ref, x;
2746 bool by_ref;
2747 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2749 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2750 continue;
2752 var = OMP_CLAUSE_DECL (c);
2753 by_ref = use_pointer_for_field (var, NULL);
2755 ref = build_sender_ref (var, ctx);
2756 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2757 if (by_ref)
2759 x = build_fold_addr_expr_loc (clause_loc, new_var);
2760 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2762 gimplify_assign (ref, x, slist);
2764 ref = build_receiver_ref (var, false, ctx);
2765 if (by_ref)
2767 ref = fold_convert_loc (clause_loc,
2768 build_pointer_type (TREE_TYPE (new_var)),
2769 ref);
2770 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2772 if (is_reference (var))
2774 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2775 ref = build_simple_mem_ref_loc (clause_loc, ref);
2776 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2778 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2779 gimplify_and_add (x, rlist);
2784 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2785 and REDUCTION from the sender (aka parent) side. */
2787 static void
2788 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2789 omp_context *ctx)
2791 tree c;
2793 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2795 tree val, ref, x, var;
2796 bool by_ref, do_in = false, do_out = false;
2797 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2799 switch (OMP_CLAUSE_CODE (c))
2801 case OMP_CLAUSE_PRIVATE:
2802 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2803 break;
2804 continue;
2805 case OMP_CLAUSE_FIRSTPRIVATE:
2806 case OMP_CLAUSE_COPYIN:
2807 case OMP_CLAUSE_LASTPRIVATE:
2808 case OMP_CLAUSE_REDUCTION:
2809 break;
2810 default:
2811 continue;
2814 val = OMP_CLAUSE_DECL (c);
2815 var = lookup_decl_in_outer_ctx (val, ctx);
2817 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2818 && is_global_var (var))
2819 continue;
2820 if (is_variable_sized (val))
2821 continue;
2822 by_ref = use_pointer_for_field (val, NULL);
2824 switch (OMP_CLAUSE_CODE (c))
2826 case OMP_CLAUSE_PRIVATE:
2827 case OMP_CLAUSE_FIRSTPRIVATE:
2828 case OMP_CLAUSE_COPYIN:
2829 do_in = true;
2830 break;
2832 case OMP_CLAUSE_LASTPRIVATE:
2833 if (by_ref || is_reference (val))
2835 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2836 continue;
2837 do_in = true;
2839 else
2841 do_out = true;
2842 if (lang_hooks.decls.omp_private_outer_ref (val))
2843 do_in = true;
2845 break;
2847 case OMP_CLAUSE_REDUCTION:
2848 do_in = true;
2849 do_out = !(by_ref || is_reference (val));
2850 break;
2852 default:
2853 gcc_unreachable ();
2856 if (do_in)
2858 ref = build_sender_ref (val, ctx);
2859 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2860 gimplify_assign (ref, x, ilist);
2861 if (is_task_ctx (ctx))
2862 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2865 if (do_out)
2867 ref = build_sender_ref (val, ctx);
2868 gimplify_assign (var, ref, olist);
2873 /* Generate code to implement SHARED from the sender (aka parent)
2874 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2875 list things that got automatically shared. */
2877 static void
2878 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2880 tree var, ovar, nvar, f, x, record_type;
2882 if (ctx->record_type == NULL)
2883 return;
2885 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2886 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2888 ovar = DECL_ABSTRACT_ORIGIN (f);
2889 nvar = maybe_lookup_decl (ovar, ctx);
2890 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2891 continue;
2893 /* If CTX is a nested parallel directive. Find the immediately
2894 enclosing parallel or workshare construct that contains a
2895 mapping for OVAR. */
2896 var = lookup_decl_in_outer_ctx (ovar, ctx);
2898 if (use_pointer_for_field (ovar, ctx))
2900 x = build_sender_ref (ovar, ctx);
2901 var = build_fold_addr_expr (var);
2902 gimplify_assign (x, var, ilist);
2904 else
2906 x = build_sender_ref (ovar, ctx);
2907 gimplify_assign (x, var, ilist);
2909 if (!TREE_READONLY (var)
2910 /* We don't need to receive a new reference to a result
2911 or parm decl. In fact we may not store to it as we will
2912 invalidate any pending RSO and generate wrong gimple
2913 during inlining. */
2914 && !((TREE_CODE (var) == RESULT_DECL
2915 || TREE_CODE (var) == PARM_DECL)
2916 && DECL_BY_REFERENCE (var)))
2918 x = build_sender_ref (ovar, ctx);
2919 gimplify_assign (var, x, olist);
2926 /* A convenience function to build an empty GIMPLE_COND with just the
2927 condition. */
2929 static gimple
2930 gimple_build_cond_empty (tree cond)
2932 enum tree_code pred_code;
2933 tree lhs, rhs;
2935 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2936 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2940 /* Build the function calls to GOMP_parallel_start etc to actually
2941 generate the parallel operation. REGION is the parallel region
2942 being expanded. BB is the block where to insert the code. WS_ARGS
2943 will be set if this is a call to a combined parallel+workshare
2944 construct, it contains the list of additional arguments needed by
2945 the workshare construct. */
2947 static void
2948 expand_parallel_call (struct omp_region *region, basic_block bb,
2949 gimple entry_stmt, vec<tree, va_gc> *ws_args)
2951 tree t, t1, t2, val, cond, c, clauses;
2952 gimple_stmt_iterator gsi;
2953 gimple stmt;
2954 enum built_in_function start_ix;
2955 int start_ix2;
2956 location_t clause_loc;
2957 vec<tree, va_gc> *args;
2959 clauses = gimple_omp_parallel_clauses (entry_stmt);
2961 /* Determine what flavor of GOMP_parallel_start we will be
2962 emitting. */
2963 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2964 if (is_combined_parallel (region))
2966 switch (region->inner->type)
2968 case GIMPLE_OMP_FOR:
2969 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2970 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2971 + (region->inner->sched_kind
2972 == OMP_CLAUSE_SCHEDULE_RUNTIME
2973 ? 3 : region->inner->sched_kind));
2974 start_ix = (enum built_in_function)start_ix2;
2975 break;
2976 case GIMPLE_OMP_SECTIONS:
2977 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2978 break;
2979 default:
2980 gcc_unreachable ();
2984 /* By default, the value of NUM_THREADS is zero (selected at run time)
2985 and there is no conditional. */
2986 cond = NULL_TREE;
2987 val = build_int_cst (unsigned_type_node, 0);
2989 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2990 if (c)
2991 cond = OMP_CLAUSE_IF_EXPR (c);
2993 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2994 if (c)
2996 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2997 clause_loc = OMP_CLAUSE_LOCATION (c);
2999 else
3000 clause_loc = gimple_location (entry_stmt);
3002 /* Ensure 'val' is of the correct type. */
3003 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3005 /* If we found the clause 'if (cond)', build either
3006 (cond != 0) or (cond ? val : 1u). */
3007 if (cond)
3009 gimple_stmt_iterator gsi;
3011 cond = gimple_boolify (cond);
3013 if (integer_zerop (val))
3014 val = fold_build2_loc (clause_loc,
3015 EQ_EXPR, unsigned_type_node, cond,
3016 build_int_cst (TREE_TYPE (cond), 0));
3017 else
3019 basic_block cond_bb, then_bb, else_bb;
3020 edge e, e_then, e_else;
3021 tree tmp_then, tmp_else, tmp_join, tmp_var;
3023 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3024 if (gimple_in_ssa_p (cfun))
3026 tmp_then = make_ssa_name (tmp_var, NULL);
3027 tmp_else = make_ssa_name (tmp_var, NULL);
3028 tmp_join = make_ssa_name (tmp_var, NULL);
3030 else
3032 tmp_then = tmp_var;
3033 tmp_else = tmp_var;
3034 tmp_join = tmp_var;
3037 e = split_block (bb, NULL);
3038 cond_bb = e->src;
3039 bb = e->dest;
3040 remove_edge (e);
3042 then_bb = create_empty_bb (cond_bb);
3043 else_bb = create_empty_bb (then_bb);
3044 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3045 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3047 stmt = gimple_build_cond_empty (cond);
3048 gsi = gsi_start_bb (cond_bb);
3049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3051 gsi = gsi_start_bb (then_bb);
3052 stmt = gimple_build_assign (tmp_then, val);
3053 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3055 gsi = gsi_start_bb (else_bb);
3056 stmt = gimple_build_assign
3057 (tmp_else, build_int_cst (unsigned_type_node, 1));
3058 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3060 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3061 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3062 if (current_loops)
3064 add_bb_to_loop (then_bb, cond_bb->loop_father);
3065 add_bb_to_loop (else_bb, cond_bb->loop_father);
3067 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3068 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3070 if (gimple_in_ssa_p (cfun))
3072 gimple phi = create_phi_node (tmp_join, bb);
3073 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3074 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3077 val = tmp_join;
3080 gsi = gsi_start_bb (bb);
3081 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3082 false, GSI_CONTINUE_LINKING);
3085 gsi = gsi_last_bb (bb);
3086 t = gimple_omp_parallel_data_arg (entry_stmt);
3087 if (t == NULL)
3088 t1 = null_pointer_node;
3089 else
3090 t1 = build_fold_addr_expr (t);
3091 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3093 vec_alloc (args, 3 + vec_safe_length (ws_args));
3094 args->quick_push (t2);
3095 args->quick_push (t1);
3096 args->quick_push (val);
3097 if (ws_args)
3098 args->splice (*ws_args);
3100 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3101 builtin_decl_explicit (start_ix), args);
3103 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3104 false, GSI_CONTINUE_LINKING);
3106 t = gimple_omp_parallel_data_arg (entry_stmt);
3107 if (t == NULL)
3108 t = null_pointer_node;
3109 else
3110 t = build_fold_addr_expr (t);
3111 t = build_call_expr_loc (gimple_location (entry_stmt),
3112 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3113 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3114 false, GSI_CONTINUE_LINKING);
3116 t = build_call_expr_loc (gimple_location (entry_stmt),
3117 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3119 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3120 false, GSI_CONTINUE_LINKING);
3124 /* Build the function call to GOMP_task to actually
3125 generate the task operation. BB is the block where to insert the code. */
3127 static void
3128 expand_task_call (basic_block bb, gimple entry_stmt)
3130 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3131 gimple_stmt_iterator gsi;
3132 location_t loc = gimple_location (entry_stmt);
3134 clauses = gimple_omp_task_clauses (entry_stmt);
3136 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3137 if (c)
3138 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3139 else
3140 cond = boolean_true_node;
3142 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3143 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3144 flags = build_int_cst (unsigned_type_node,
3145 (c ? 1 : 0) + (c2 ? 4 : 0));
3147 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3148 if (c)
3150 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3151 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3152 build_int_cst (unsigned_type_node, 2),
3153 build_int_cst (unsigned_type_node, 0));
3154 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3157 gsi = gsi_last_bb (bb);
3158 t = gimple_omp_task_data_arg (entry_stmt);
3159 if (t == NULL)
3160 t2 = null_pointer_node;
3161 else
3162 t2 = build_fold_addr_expr_loc (loc, t);
3163 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3164 t = gimple_omp_task_copy_fn (entry_stmt);
3165 if (t == NULL)
3166 t3 = null_pointer_node;
3167 else
3168 t3 = build_fold_addr_expr_loc (loc, t);
3170 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3171 7, t1, t2, t3,
3172 gimple_omp_task_arg_size (entry_stmt),
3173 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3175 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3176 false, GSI_CONTINUE_LINKING);
3180 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3181 catch handler and return it. This prevents programs from violating the
3182 structured block semantics with throws. */
3184 static gimple_seq
3185 maybe_catch_exception (gimple_seq body)
3187 gimple g;
3188 tree decl;
3190 if (!flag_exceptions)
3191 return body;
3193 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3194 decl = lang_hooks.eh_protect_cleanup_actions ();
3195 else
3196 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3198 g = gimple_build_eh_must_not_throw (decl);
3199 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3200 GIMPLE_TRY_CATCH);
3202 return gimple_seq_alloc_with_stmt (g);
3205 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3207 static tree
3208 vec2chain (vec<tree, va_gc> *v)
3210 tree chain = NULL_TREE, t;
3211 unsigned ix;
3213 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3215 DECL_CHAIN (t) = chain;
3216 chain = t;
3219 return chain;
3223 /* Remove barriers in REGION->EXIT's block. Note that this is only
3224 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3225 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3226 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3227 removed. */
3229 static void
3230 remove_exit_barrier (struct omp_region *region)
3232 gimple_stmt_iterator gsi;
3233 basic_block exit_bb;
3234 edge_iterator ei;
3235 edge e;
3236 gimple stmt;
3237 int any_addressable_vars = -1;
3239 exit_bb = region->exit;
3241 /* If the parallel region doesn't return, we don't have REGION->EXIT
3242 block at all. */
3243 if (! exit_bb)
3244 return;
3246 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3247 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3248 statements that can appear in between are extremely limited -- no
3249 memory operations at all. Here, we allow nothing at all, so the
3250 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3251 gsi = gsi_last_bb (exit_bb);
3252 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3253 gsi_prev (&gsi);
3254 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3255 return;
3257 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3259 gsi = gsi_last_bb (e->src);
3260 if (gsi_end_p (gsi))
3261 continue;
3262 stmt = gsi_stmt (gsi);
3263 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3264 && !gimple_omp_return_nowait_p (stmt))
3266 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3267 in many cases. If there could be tasks queued, the barrier
3268 might be needed to let the tasks run before some local
3269 variable of the parallel that the task uses as shared
3270 runs out of scope. The task can be spawned either
3271 from within current function (this would be easy to check)
3272 or from some function it calls and gets passed an address
3273 of such a variable. */
3274 if (any_addressable_vars < 0)
3276 gimple parallel_stmt = last_stmt (region->entry);
3277 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3278 tree local_decls, block, decl;
3279 unsigned ix;
3281 any_addressable_vars = 0;
3282 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3283 if (TREE_ADDRESSABLE (decl))
3285 any_addressable_vars = 1;
3286 break;
3288 for (block = gimple_block (stmt);
3289 !any_addressable_vars
3290 && block
3291 && TREE_CODE (block) == BLOCK;
3292 block = BLOCK_SUPERCONTEXT (block))
3294 for (local_decls = BLOCK_VARS (block);
3295 local_decls;
3296 local_decls = DECL_CHAIN (local_decls))
3297 if (TREE_ADDRESSABLE (local_decls))
3299 any_addressable_vars = 1;
3300 break;
3302 if (block == gimple_block (parallel_stmt))
3303 break;
3306 if (!any_addressable_vars)
3307 gimple_omp_return_set_nowait (stmt);
3312 static void
3313 remove_exit_barriers (struct omp_region *region)
3315 if (region->type == GIMPLE_OMP_PARALLEL)
3316 remove_exit_barrier (region);
3318 if (region->inner)
3320 region = region->inner;
3321 remove_exit_barriers (region);
3322 while (region->next)
3324 region = region->next;
3325 remove_exit_barriers (region);
3330 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3331 calls. These can't be declared as const functions, but
3332 within one parallel body they are constant, so they can be
3333 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3334 which are declared const. Similarly for task body, except
3335 that in untied task omp_get_thread_num () can change at any task
3336 scheduling point. */
3338 static void
3339 optimize_omp_library_calls (gimple entry_stmt)
3341 basic_block bb;
3342 gimple_stmt_iterator gsi;
3343 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3344 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3345 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3346 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3347 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3348 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3349 OMP_CLAUSE_UNTIED) != NULL);
3351 FOR_EACH_BB (bb)
3352 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3354 gimple call = gsi_stmt (gsi);
3355 tree decl;
3357 if (is_gimple_call (call)
3358 && (decl = gimple_call_fndecl (call))
3359 && DECL_EXTERNAL (decl)
3360 && TREE_PUBLIC (decl)
3361 && DECL_INITIAL (decl) == NULL)
3363 tree built_in;
3365 if (DECL_NAME (decl) == thr_num_id)
3367 /* In #pragma omp task untied omp_get_thread_num () can change
3368 during the execution of the task region. */
3369 if (untied_task)
3370 continue;
3371 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3373 else if (DECL_NAME (decl) == num_thr_id)
3374 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3375 else
3376 continue;
3378 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3379 || gimple_call_num_args (call) != 0)
3380 continue;
3382 if (flag_exceptions && !TREE_NOTHROW (decl))
3383 continue;
3385 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3386 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3387 TREE_TYPE (TREE_TYPE (built_in))))
3388 continue;
3390 gimple_call_set_fndecl (call, built_in);
3395 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3396 regimplified. */
3398 static tree
3399 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3401 tree t = *tp;
3403 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3404 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3405 return t;
3407 if (TREE_CODE (t) == ADDR_EXPR)
3408 recompute_tree_invariant_for_addr_expr (t);
3410 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3411 return NULL_TREE;
3414 /* Expand the OpenMP parallel or task directive starting at REGION. */
3416 static void
3417 expand_omp_taskreg (struct omp_region *region)
3419 basic_block entry_bb, exit_bb, new_bb;
3420 struct function *child_cfun;
3421 tree child_fn, block, t;
3422 gimple_stmt_iterator gsi;
3423 gimple entry_stmt, stmt;
3424 edge e;
3425 vec<tree, va_gc> *ws_args;
3427 entry_stmt = last_stmt (region->entry);
3428 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3429 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3431 entry_bb = region->entry;
3432 exit_bb = region->exit;
3434 if (is_combined_parallel (region))
3435 ws_args = region->ws_args;
3436 else
3437 ws_args = NULL;
3439 if (child_cfun->cfg)
3441 /* Due to inlining, it may happen that we have already outlined
3442 the region, in which case all we need to do is make the
3443 sub-graph unreachable and emit the parallel call. */
3444 edge entry_succ_e, exit_succ_e;
3445 gimple_stmt_iterator gsi;
3447 entry_succ_e = single_succ_edge (entry_bb);
3449 gsi = gsi_last_bb (entry_bb);
3450 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3451 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3452 gsi_remove (&gsi, true);
3454 new_bb = entry_bb;
3455 if (exit_bb)
3457 exit_succ_e = single_succ_edge (exit_bb);
3458 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3460 remove_edge_and_dominated_blocks (entry_succ_e);
3462 else
3464 unsigned srcidx, dstidx, num;
3466 /* If the parallel region needs data sent from the parent
3467 function, then the very first statement (except possible
3468 tree profile counter updates) of the parallel body
3469 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3470 &.OMP_DATA_O is passed as an argument to the child function,
3471 we need to replace it with the argument as seen by the child
3472 function.
3474 In most cases, this will end up being the identity assignment
3475 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3476 a function call that has been inlined, the original PARM_DECL
3477 .OMP_DATA_I may have been converted into a different local
3478 variable. In which case, we need to keep the assignment. */
3479 if (gimple_omp_taskreg_data_arg (entry_stmt))
3481 basic_block entry_succ_bb = single_succ (entry_bb);
3482 gimple_stmt_iterator gsi;
3483 tree arg, narg;
3484 gimple parcopy_stmt = NULL;
3486 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3488 gimple stmt;
3490 gcc_assert (!gsi_end_p (gsi));
3491 stmt = gsi_stmt (gsi);
3492 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3493 continue;
3495 if (gimple_num_ops (stmt) == 2)
3497 tree arg = gimple_assign_rhs1 (stmt);
3499 /* We're ignore the subcode because we're
3500 effectively doing a STRIP_NOPS. */
3502 if (TREE_CODE (arg) == ADDR_EXPR
3503 && TREE_OPERAND (arg, 0)
3504 == gimple_omp_taskreg_data_arg (entry_stmt))
3506 parcopy_stmt = stmt;
3507 break;
3512 gcc_assert (parcopy_stmt != NULL);
3513 arg = DECL_ARGUMENTS (child_fn);
3515 if (!gimple_in_ssa_p (cfun))
3517 if (gimple_assign_lhs (parcopy_stmt) == arg)
3518 gsi_remove (&gsi, true);
3519 else
3521 /* ?? Is setting the subcode really necessary ?? */
3522 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3523 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3526 else
3528 /* If we are in ssa form, we must load the value from the default
3529 definition of the argument. That should not be defined now,
3530 since the argument is not used uninitialized. */
3531 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3532 narg = make_ssa_name (arg, gimple_build_nop ());
3533 set_ssa_default_def (cfun, arg, narg);
3534 /* ?? Is setting the subcode really necessary ?? */
3535 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3536 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3537 update_stmt (parcopy_stmt);
3541 /* Declare local variables needed in CHILD_CFUN. */
3542 block = DECL_INITIAL (child_fn);
3543 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3544 /* The gimplifier could record temporaries in parallel/task block
3545 rather than in containing function's local_decls chain,
3546 which would mean cgraph missed finalizing them. Do it now. */
3547 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3548 if (TREE_CODE (t) == VAR_DECL
3549 && TREE_STATIC (t)
3550 && !DECL_EXTERNAL (t))
3551 varpool_finalize_decl (t);
3552 DECL_SAVED_TREE (child_fn) = NULL;
3553 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3554 gimple_set_body (child_fn, NULL);
3555 TREE_USED (block) = 1;
3557 /* Reset DECL_CONTEXT on function arguments. */
3558 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3559 DECL_CONTEXT (t) = child_fn;
3561 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3562 so that it can be moved to the child function. */
3563 gsi = gsi_last_bb (entry_bb);
3564 stmt = gsi_stmt (gsi);
3565 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3566 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3567 gsi_remove (&gsi, true);
3568 e = split_block (entry_bb, stmt);
3569 entry_bb = e->dest;
3570 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3572 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3573 if (exit_bb)
3575 gsi = gsi_last_bb (exit_bb);
3576 gcc_assert (!gsi_end_p (gsi)
3577 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3578 stmt = gimple_build_return (NULL);
3579 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3580 gsi_remove (&gsi, true);
3583 /* Move the parallel region into CHILD_CFUN. */
3585 if (gimple_in_ssa_p (cfun))
3587 init_tree_ssa (child_cfun);
3588 init_ssa_operands (child_cfun);
3589 child_cfun->gimple_df->in_ssa_p = true;
3590 block = NULL_TREE;
3592 else
3593 block = gimple_block (entry_stmt);
3595 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3596 if (exit_bb)
3597 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3598 /* When the OMP expansion process cannot guarantee an up-to-date
3599 loop tree arrange for the child function to fixup loops. */
3600 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
3601 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
3603 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3604 num = vec_safe_length (child_cfun->local_decls);
3605 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3607 t = (*child_cfun->local_decls)[srcidx];
3608 if (DECL_CONTEXT (t) == cfun->decl)
3609 continue;
3610 if (srcidx != dstidx)
3611 (*child_cfun->local_decls)[dstidx] = t;
3612 dstidx++;
3614 if (dstidx != num)
3615 vec_safe_truncate (child_cfun->local_decls, dstidx);
3617 /* Inform the callgraph about the new function. */
3618 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
3619 cgraph_add_new_function (child_fn, true);
3621 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3622 fixed in a following pass. */
3623 push_cfun (child_cfun);
3624 if (optimize)
3625 optimize_omp_library_calls (entry_stmt);
3626 rebuild_cgraph_edges ();
3628 /* Some EH regions might become dead, see PR34608. If
3629 pass_cleanup_cfg isn't the first pass to happen with the
3630 new child, these dead EH edges might cause problems.
3631 Clean them up now. */
3632 if (flag_exceptions)
3634 basic_block bb;
3635 bool changed = false;
3637 FOR_EACH_BB (bb)
3638 changed |= gimple_purge_dead_eh_edges (bb);
3639 if (changed)
3640 cleanup_tree_cfg ();
3642 if (gimple_in_ssa_p (cfun))
3643 update_ssa (TODO_update_ssa);
3644 pop_cfun ();
3647 /* Emit a library call to launch the children threads. */
3648 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3649 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3650 else
3651 expand_task_call (new_bb, entry_stmt);
3652 if (gimple_in_ssa_p (cfun))
3653 update_ssa (TODO_update_ssa_only_virtuals);
3657 /* A subroutine of expand_omp_for. Generate code for a parallel
3658 loop with any schedule. Given parameters:
3660 for (V = N1; V cond N2; V += STEP) BODY;
3662 where COND is "<" or ">", we generate pseudocode
3664 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3665 if (more) goto L0; else goto L3;
3667 V = istart0;
3668 iend = iend0;
3670 BODY;
3671 V += STEP;
3672 if (V cond iend) goto L1; else goto L2;
3674 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3677 If this is a combined omp parallel loop, instead of the call to
3678 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3680 For collapsed loops, given parameters:
3681 collapse(3)
3682 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3683 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3684 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3685 BODY;
3687 we generate pseudocode
3689 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
3690 if (cond3 is <)
3691 adj = STEP3 - 1;
3692 else
3693 adj = STEP3 + 1;
3694 count3 = (adj + N32 - N31) / STEP3;
3695 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
3696 if (cond2 is <)
3697 adj = STEP2 - 1;
3698 else
3699 adj = STEP2 + 1;
3700 count2 = (adj + N22 - N21) / STEP2;
3701 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
3702 if (cond1 is <)
3703 adj = STEP1 - 1;
3704 else
3705 adj = STEP1 + 1;
3706 count1 = (adj + N12 - N11) / STEP1;
3707 count = count1 * count2 * count3;
3708 goto Z1;
3710 count = 0;
3712 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3713 if (more) goto L0; else goto L3;
3715 V = istart0;
3716 T = V;
3717 V3 = N31 + (T % count3) * STEP3;
3718 T = T / count3;
3719 V2 = N21 + (T % count2) * STEP2;
3720 T = T / count2;
3721 V1 = N11 + T * STEP1;
3722 iend = iend0;
3724 BODY;
3725 V += 1;
3726 if (V < iend) goto L10; else goto L2;
3727 L10:
3728 V3 += STEP3;
3729 if (V3 cond3 N32) goto L1; else goto L11;
3730 L11:
3731 V3 = N31;
3732 V2 += STEP2;
3733 if (V2 cond2 N22) goto L1; else goto L12;
3734 L12:
3735 V2 = N21;
3736 V1 += STEP1;
3737 goto L1;
3739 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3744 static void
3745 expand_omp_for_generic (struct omp_region *region,
3746 struct omp_for_data *fd,
3747 enum built_in_function start_fn,
3748 enum built_in_function next_fn)
3750 tree type, istart0, iend0, iend;
3751 tree t, vmain, vback, bias = NULL_TREE;
3752 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3753 basic_block l2_bb = NULL, l3_bb = NULL;
3754 gimple_stmt_iterator gsi;
3755 gimple stmt;
3756 bool in_combined_parallel = is_combined_parallel (region);
3757 bool broken_loop = region->cont == NULL;
3758 edge e, ne;
3759 tree *counts = NULL;
3760 int i;
3762 gcc_assert (!broken_loop || !in_combined_parallel);
3763 gcc_assert (fd->iter_type == long_integer_type_node
3764 || !in_combined_parallel);
3766 type = TREE_TYPE (fd->loop.v);
3767 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3768 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3769 TREE_ADDRESSABLE (istart0) = 1;
3770 TREE_ADDRESSABLE (iend0) = 1;
3772 /* See if we need to bias by LLONG_MIN. */
3773 if (fd->iter_type == long_long_unsigned_type_node
3774 && TREE_CODE (type) == INTEGER_TYPE
3775 && !TYPE_UNSIGNED (type))
3777 tree n1, n2;
3779 if (fd->loop.cond_code == LT_EXPR)
3781 n1 = fd->loop.n1;
3782 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3784 else
3786 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3787 n2 = fd->loop.n1;
3789 if (TREE_CODE (n1) != INTEGER_CST
3790 || TREE_CODE (n2) != INTEGER_CST
3791 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3792 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3795 entry_bb = region->entry;
3796 cont_bb = region->cont;
3797 collapse_bb = NULL;
3798 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3799 gcc_assert (broken_loop
3800 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3801 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3802 l1_bb = single_succ (l0_bb);
3803 if (!broken_loop)
3805 l2_bb = create_empty_bb (cont_bb);
3806 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3807 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3809 else
3810 l2_bb = NULL;
3811 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3812 exit_bb = region->exit;
3814 gsi = gsi_last_bb (entry_bb);
3816 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3817 if (fd->collapse > 1)
3819 basic_block zero_iter_bb = NULL;
3820 int first_zero_iter = -1;
3822 /* collapsed loops need work for expansion in SSA form. */
3823 gcc_assert (!gimple_in_ssa_p (cfun));
3824 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3825 for (i = 0; i < fd->collapse; i++)
3827 tree itype = TREE_TYPE (fd->loops[i].v);
3829 if (SSA_VAR_P (fd->loop.n2)
3830 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
3831 fold_convert (itype, fd->loops[i].n1),
3832 fold_convert (itype, fd->loops[i].n2)))
3833 == NULL_TREE || !integer_onep (t)))
3835 tree n1, n2;
3836 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
3837 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3838 true, GSI_SAME_STMT);
3839 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
3840 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3841 true, GSI_SAME_STMT);
3842 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
3843 NULL_TREE, NULL_TREE);
3844 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3845 if (walk_tree (gimple_cond_lhs_ptr (stmt),
3846 expand_omp_regimplify_p, NULL, NULL)
3847 || walk_tree (gimple_cond_rhs_ptr (stmt),
3848 expand_omp_regimplify_p, NULL, NULL))
3850 gsi = gsi_for_stmt (stmt);
3851 gimple_regimplify_operands (stmt, &gsi);
3853 e = split_block (entry_bb, stmt);
3854 if (zero_iter_bb == NULL)
3856 first_zero_iter = i;
3857 zero_iter_bb = create_empty_bb (entry_bb);
3858 if (current_loops)
3859 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
3860 gsi = gsi_after_labels (zero_iter_bb);
3861 stmt = gimple_build_assign (fd->loop.n2,
3862 build_zero_cst (type));
3863 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3864 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
3865 entry_bb);
3867 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
3868 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
3869 e->flags = EDGE_TRUE_VALUE;
3870 e->probability = REG_BR_PROB_BASE - ne->probability;
3871 entry_bb = e->dest;
3872 gsi = gsi_last_bb (entry_bb);
3874 if (POINTER_TYPE_P (itype))
3875 itype = signed_type_for (itype);
3876 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3877 ? -1 : 1));
3878 t = fold_build2 (PLUS_EXPR, itype,
3879 fold_convert (itype, fd->loops[i].step), t);
3880 t = fold_build2 (PLUS_EXPR, itype, t,
3881 fold_convert (itype, fd->loops[i].n2));
3882 t = fold_build2 (MINUS_EXPR, itype, t,
3883 fold_convert (itype, fd->loops[i].n1));
3884 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3885 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3886 fold_build1 (NEGATE_EXPR, itype, t),
3887 fold_build1 (NEGATE_EXPR, itype,
3888 fold_convert (itype,
3889 fd->loops[i].step)));
3890 else
3891 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3892 fold_convert (itype, fd->loops[i].step));
3893 t = fold_convert (type, t);
3894 if (TREE_CODE (t) == INTEGER_CST)
3895 counts[i] = t;
3896 else
3898 counts[i] = create_tmp_reg (type, ".count");
3899 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3900 true, GSI_SAME_STMT);
3901 stmt = gimple_build_assign (counts[i], t);
3902 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3904 if (SSA_VAR_P (fd->loop.n2))
3906 if (i == 0)
3907 t = counts[0];
3908 else
3910 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3911 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3912 true, GSI_SAME_STMT);
3914 stmt = gimple_build_assign (fd->loop.n2, t);
3915 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3918 if (zero_iter_bb)
3920 /* Some counts[i] vars might be uninitialized if
3921 some loop has zero iterations. But the body shouldn't
3922 be executed in that case, so just avoid uninit warnings. */
3923 for (i = first_zero_iter; i < fd->collapse; i++)
3924 if (SSA_VAR_P (counts[i]))
3925 TREE_NO_WARNING (counts[i]) = 1;
3926 gsi_prev (&gsi);
3927 e = split_block (entry_bb, gsi_stmt (gsi));
3928 entry_bb = e->dest;
3929 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
3930 gsi = gsi_last_bb (entry_bb);
3931 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
3932 get_immediate_dominator (CDI_DOMINATORS,
3933 zero_iter_bb));
3936 if (in_combined_parallel)
3938 /* In a combined parallel loop, emit a call to
3939 GOMP_loop_foo_next. */
3940 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3941 build_fold_addr_expr (istart0),
3942 build_fold_addr_expr (iend0));
3944 else
3946 tree t0, t1, t2, t3, t4;
3947 /* If this is not a combined parallel loop, emit a call to
3948 GOMP_loop_foo_start in ENTRY_BB. */
3949 t4 = build_fold_addr_expr (iend0);
3950 t3 = build_fold_addr_expr (istart0);
3951 t2 = fold_convert (fd->iter_type, fd->loop.step);
3952 if (POINTER_TYPE_P (type)
3953 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3955 /* Avoid casting pointers to integer of a different size. */
3956 tree itype = signed_type_for (type);
3957 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3958 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3960 else
3962 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3963 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3965 if (bias)
3967 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3968 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3970 if (fd->iter_type == long_integer_type_node)
3972 if (fd->chunk_size)
3974 t = fold_convert (fd->iter_type, fd->chunk_size);
3975 t = build_call_expr (builtin_decl_explicit (start_fn),
3976 6, t0, t1, t2, t, t3, t4);
3978 else
3979 t = build_call_expr (builtin_decl_explicit (start_fn),
3980 5, t0, t1, t2, t3, t4);
3982 else
3984 tree t5;
3985 tree c_bool_type;
3986 tree bfn_decl;
3988 /* The GOMP_loop_ull_*start functions have additional boolean
3989 argument, true for < loops and false for > loops.
3990 In Fortran, the C bool type can be different from
3991 boolean_type_node. */
3992 bfn_decl = builtin_decl_explicit (start_fn);
3993 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3994 t5 = build_int_cst (c_bool_type,
3995 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3996 if (fd->chunk_size)
3998 tree bfn_decl = builtin_decl_explicit (start_fn);
3999 t = fold_convert (fd->iter_type, fd->chunk_size);
4000 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
4002 else
4003 t = build_call_expr (builtin_decl_explicit (start_fn),
4004 6, t5, t0, t1, t2, t3, t4);
4007 if (TREE_TYPE (t) != boolean_type_node)
4008 t = fold_build2 (NE_EXPR, boolean_type_node,
4009 t, build_int_cst (TREE_TYPE (t), 0));
4010 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4011 true, GSI_SAME_STMT);
4012 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4014 /* Remove the GIMPLE_OMP_FOR statement. */
4015 gsi_remove (&gsi, true);
4017 /* Iteration setup for sequential loop goes in L0_BB. */
4018 gsi = gsi_start_bb (l0_bb);
4019 t = istart0;
4020 if (bias)
4021 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4022 if (POINTER_TYPE_P (type))
4023 t = fold_convert (signed_type_for (type), t);
4024 t = fold_convert (type, t);
4025 t = force_gimple_operand_gsi (&gsi, t,
4026 DECL_P (fd->loop.v)
4027 && TREE_ADDRESSABLE (fd->loop.v),
4028 NULL_TREE, false, GSI_CONTINUE_LINKING);
4029 stmt = gimple_build_assign (fd->loop.v, t);
4030 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4032 t = iend0;
4033 if (bias)
4034 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4035 if (POINTER_TYPE_P (type))
4036 t = fold_convert (signed_type_for (type), t);
4037 t = fold_convert (type, t);
4038 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4039 false, GSI_CONTINUE_LINKING);
4040 if (fd->collapse > 1)
4042 tree tem = create_tmp_reg (type, ".tem");
4043 stmt = gimple_build_assign (tem, fd->loop.v);
4044 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4045 for (i = fd->collapse - 1; i >= 0; i--)
4047 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
4048 itype = vtype;
4049 if (POINTER_TYPE_P (vtype))
4050 itype = signed_type_for (vtype);
4051 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4052 t = fold_convert (itype, t);
4053 t = fold_build2 (MULT_EXPR, itype, t,
4054 fold_convert (itype, fd->loops[i].step));
4055 if (POINTER_TYPE_P (vtype))
4056 t = fold_build_pointer_plus (fd->loops[i].n1, t);
4057 else
4058 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4059 t = force_gimple_operand_gsi (&gsi, t,
4060 DECL_P (fd->loops[i].v)
4061 && TREE_ADDRESSABLE (fd->loops[i].v),
4062 NULL_TREE, false,
4063 GSI_CONTINUE_LINKING);
4064 stmt = gimple_build_assign (fd->loops[i].v, t);
4065 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4066 if (i != 0)
4068 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4069 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4070 false, GSI_CONTINUE_LINKING);
4071 stmt = gimple_build_assign (tem, t);
4072 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4077 if (!broken_loop)
4079 /* Code to control the increment and predicate for the sequential
4080 loop goes in the CONT_BB. */
4081 gsi = gsi_last_bb (cont_bb);
4082 stmt = gsi_stmt (gsi);
4083 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4084 vmain = gimple_omp_continue_control_use (stmt);
4085 vback = gimple_omp_continue_control_def (stmt);
4087 if (POINTER_TYPE_P (type))
4088 t = fold_build_pointer_plus (vmain, fd->loop.step);
4089 else
4090 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4091 t = force_gimple_operand_gsi (&gsi, t,
4092 DECL_P (vback) && TREE_ADDRESSABLE (vback),
4093 NULL_TREE, true, GSI_SAME_STMT);
4094 stmt = gimple_build_assign (vback, t);
4095 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4097 t = build2 (fd->loop.cond_code, boolean_type_node,
4098 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4099 iend);
4100 stmt = gimple_build_cond_empty (t);
4101 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4103 /* Remove GIMPLE_OMP_CONTINUE. */
4104 gsi_remove (&gsi, true);
4106 if (fd->collapse > 1)
4108 basic_block last_bb, bb;
4110 last_bb = cont_bb;
4111 for (i = fd->collapse - 1; i >= 0; i--)
4113 tree vtype = TREE_TYPE (fd->loops[i].v);
4115 bb = create_empty_bb (last_bb);
4116 if (current_loops)
4117 add_bb_to_loop (bb, last_bb->loop_father);
4118 gsi = gsi_start_bb (bb);
4120 if (i < fd->collapse - 1)
4122 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4123 e->probability = REG_BR_PROB_BASE / 8;
4125 t = fd->loops[i + 1].n1;
4126 t = force_gimple_operand_gsi (&gsi, t,
4127 DECL_P (fd->loops[i + 1].v)
4128 && TREE_ADDRESSABLE
4129 (fd->loops[i + 1].v),
4130 NULL_TREE, false,
4131 GSI_CONTINUE_LINKING);
4132 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4133 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4135 else
4136 collapse_bb = bb;
4138 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4140 if (POINTER_TYPE_P (vtype))
4141 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4142 else
4143 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4144 fd->loops[i].step);
4145 t = force_gimple_operand_gsi (&gsi, t,
4146 DECL_P (fd->loops[i].v)
4147 && TREE_ADDRESSABLE (fd->loops[i].v),
4148 NULL_TREE, false,
4149 GSI_CONTINUE_LINKING);
4150 stmt = gimple_build_assign (fd->loops[i].v, t);
4151 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4153 if (i > 0)
4155 t = fd->loops[i].n2;
4156 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4157 false, GSI_CONTINUE_LINKING);
4158 tree v = fd->loops[i].v;
4159 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4160 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4161 false, GSI_CONTINUE_LINKING);
4162 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4163 v, t);
4164 stmt = gimple_build_cond_empty (t);
4165 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4166 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4167 e->probability = REG_BR_PROB_BASE * 7 / 8;
4169 else
4170 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4171 last_bb = bb;
4175 /* Emit code to get the next parallel iteration in L2_BB. */
4176 gsi = gsi_start_bb (l2_bb);
4178 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4179 build_fold_addr_expr (istart0),
4180 build_fold_addr_expr (iend0));
4181 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4182 false, GSI_CONTINUE_LINKING);
4183 if (TREE_TYPE (t) != boolean_type_node)
4184 t = fold_build2 (NE_EXPR, boolean_type_node,
4185 t, build_int_cst (TREE_TYPE (t), 0));
4186 stmt = gimple_build_cond_empty (t);
4187 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4190 /* Add the loop cleanup function. */
4191 gsi = gsi_last_bb (exit_bb);
4192 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4193 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4194 else
4195 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4196 stmt = gimple_build_call (t, 0);
4197 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4198 gsi_remove (&gsi, true);
4200 /* Connect the new blocks. */
4201 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4202 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4204 if (!broken_loop)
4206 gimple_seq phis;
4208 e = find_edge (cont_bb, l3_bb);
4209 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4211 phis = phi_nodes (l3_bb);
4212 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4214 gimple phi = gsi_stmt (gsi);
4215 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4216 PHI_ARG_DEF_FROM_EDGE (phi, e));
4218 remove_edge (e);
4220 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4221 if (current_loops)
4222 add_bb_to_loop (l2_bb, cont_bb->loop_father);
4223 if (fd->collapse > 1)
4225 e = find_edge (cont_bb, l1_bb);
4226 remove_edge (e);
4227 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4229 else
4231 e = find_edge (cont_bb, l1_bb);
4232 e->flags = EDGE_TRUE_VALUE;
4234 e->probability = REG_BR_PROB_BASE * 7 / 8;
4235 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4236 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4238 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4239 recompute_dominator (CDI_DOMINATORS, l2_bb));
4240 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4241 recompute_dominator (CDI_DOMINATORS, l3_bb));
4242 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4243 recompute_dominator (CDI_DOMINATORS, l0_bb));
4244 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4245 recompute_dominator (CDI_DOMINATORS, l1_bb));
4247 struct loop *outer_loop = alloc_loop ();
4248 outer_loop->header = l0_bb;
4249 outer_loop->latch = l2_bb;
4250 add_loop (outer_loop, l0_bb->loop_father);
4252 struct loop *loop = alloc_loop ();
4253 loop->header = l1_bb;
4254 /* The loop may have multiple latches. */
4255 add_loop (loop, outer_loop);
4260 /* A subroutine of expand_omp_for. Generate code for a parallel
4261 loop with static schedule and no specified chunk size. Given
4262 parameters:
4264 for (V = N1; V cond N2; V += STEP) BODY;
4266 where COND is "<" or ">", we generate pseudocode
4268 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4269 if (cond is <)
4270 adj = STEP - 1;
4271 else
4272 adj = STEP + 1;
4273 if ((__typeof (V)) -1 > 0 && cond is >)
4274 n = -(adj + N2 - N1) / -STEP;
4275 else
4276 n = (adj + N2 - N1) / STEP;
4277 q = n / nthreads;
4278 tt = n % nthreads;
4279 if (threadid < tt) goto L3; else goto L4;
4281 tt = 0;
4282 q = q + 1;
4284 s0 = q * threadid + tt;
4285 e0 = s0 + q;
4286 V = s0 * STEP + N1;
4287 if (s0 >= e0) goto L2; else goto L0;
4289 e = e0 * STEP + N1;
4291 BODY;
4292 V += STEP;
4293 if (V cond e) goto L1;
4297 static void
4298 expand_omp_for_static_nochunk (struct omp_region *region,
4299 struct omp_for_data *fd)
4301 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4302 tree type, itype, vmain, vback;
4303 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4304 basic_block body_bb, cont_bb;
4305 basic_block fin_bb;
4306 gimple_stmt_iterator gsi;
4307 gimple stmt;
4308 edge ep;
4310 itype = type = TREE_TYPE (fd->loop.v);
4311 if (POINTER_TYPE_P (type))
4312 itype = signed_type_for (type);
4314 entry_bb = region->entry;
4315 cont_bb = region->cont;
4316 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4317 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4318 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4319 body_bb = single_succ (seq_start_bb);
4320 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4321 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4322 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4323 exit_bb = region->exit;
4325 /* Iteration space partitioning goes in ENTRY_BB. */
4326 gsi = gsi_last_bb (entry_bb);
4327 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4329 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4330 fold_convert (type, fd->loop.n1),
4331 fold_convert (type, fd->loop.n2));
4332 if (TYPE_UNSIGNED (type)
4333 && (t == NULL_TREE || !integer_onep (t)))
4335 tree n1, n2;
4336 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4337 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4338 true, GSI_SAME_STMT);
4339 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4340 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4341 true, GSI_SAME_STMT);
4342 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4343 NULL_TREE, NULL_TREE);
4344 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4345 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4346 expand_omp_regimplify_p, NULL, NULL)
4347 || walk_tree (gimple_cond_rhs_ptr (stmt),
4348 expand_omp_regimplify_p, NULL, NULL))
4350 gsi = gsi_for_stmt (stmt);
4351 gimple_regimplify_operands (stmt, &gsi);
4353 ep = split_block (entry_bb, stmt);
4354 ep->flags = EDGE_TRUE_VALUE;
4355 entry_bb = ep->dest;
4356 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4357 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4358 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4359 if (gimple_in_ssa_p (cfun))
4361 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4362 for (gsi = gsi_start_phis (fin_bb);
4363 !gsi_end_p (gsi); gsi_next (&gsi))
4365 gimple phi = gsi_stmt (gsi);
4366 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4367 ep, UNKNOWN_LOCATION);
4370 gsi = gsi_last_bb (entry_bb);
4373 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4374 t = fold_convert (itype, t);
4375 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4376 true, GSI_SAME_STMT);
4378 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4379 t = fold_convert (itype, t);
4380 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4381 true, GSI_SAME_STMT);
4383 fd->loop.n1
4384 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4385 true, NULL_TREE, true, GSI_SAME_STMT);
4386 fd->loop.n2
4387 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4388 true, NULL_TREE, true, GSI_SAME_STMT);
4389 fd->loop.step
4390 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4391 true, NULL_TREE, true, GSI_SAME_STMT);
4393 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4394 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4395 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4396 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4397 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4398 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4399 fold_build1 (NEGATE_EXPR, itype, t),
4400 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4401 else
4402 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4403 t = fold_convert (itype, t);
4404 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4406 q = create_tmp_reg (itype, "q");
4407 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4408 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4409 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4411 tt = create_tmp_reg (itype, "tt");
4412 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4413 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4414 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4416 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4417 stmt = gimple_build_cond_empty (t);
4418 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4420 second_bb = split_block (entry_bb, stmt)->dest;
4421 gsi = gsi_last_bb (second_bb);
4422 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4424 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4425 GSI_SAME_STMT);
4426 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4427 build_int_cst (itype, 1));
4428 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4430 third_bb = split_block (second_bb, stmt)->dest;
4431 gsi = gsi_last_bb (third_bb);
4432 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4434 t = build2 (MULT_EXPR, itype, q, threadid);
4435 t = build2 (PLUS_EXPR, itype, t, tt);
4436 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4438 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4439 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4441 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4442 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4444 /* Remove the GIMPLE_OMP_FOR statement. */
4445 gsi_remove (&gsi, true);
4447 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4448 gsi = gsi_start_bb (seq_start_bb);
4450 t = fold_convert (itype, s0);
4451 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4452 if (POINTER_TYPE_P (type))
4453 t = fold_build_pointer_plus (fd->loop.n1, t);
4454 else
4455 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4456 t = force_gimple_operand_gsi (&gsi, t,
4457 DECL_P (fd->loop.v)
4458 && TREE_ADDRESSABLE (fd->loop.v),
4459 NULL_TREE, false, GSI_CONTINUE_LINKING);
4460 stmt = gimple_build_assign (fd->loop.v, t);
4461 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4463 t = fold_convert (itype, e0);
4464 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4465 if (POINTER_TYPE_P (type))
4466 t = fold_build_pointer_plus (fd->loop.n1, t);
4467 else
4468 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4469 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4470 false, GSI_CONTINUE_LINKING);
4472 /* The code controlling the sequential loop replaces the
4473 GIMPLE_OMP_CONTINUE. */
4474 gsi = gsi_last_bb (cont_bb);
4475 stmt = gsi_stmt (gsi);
4476 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4477 vmain = gimple_omp_continue_control_use (stmt);
4478 vback = gimple_omp_continue_control_def (stmt);
4480 if (POINTER_TYPE_P (type))
4481 t = fold_build_pointer_plus (vmain, fd->loop.step);
4482 else
4483 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4484 t = force_gimple_operand_gsi (&gsi, t,
4485 DECL_P (vback) && TREE_ADDRESSABLE (vback),
4486 NULL_TREE, true, GSI_SAME_STMT);
4487 stmt = gimple_build_assign (vback, t);
4488 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4490 t = build2 (fd->loop.cond_code, boolean_type_node,
4491 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
4492 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4494 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4495 gsi_remove (&gsi, true);
4497 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4498 gsi = gsi_last_bb (exit_bb);
4499 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4500 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4501 false, GSI_SAME_STMT);
4502 gsi_remove (&gsi, true);
4504 /* Connect all the blocks. */
4505 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4506 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4507 ep = find_edge (entry_bb, second_bb);
4508 ep->flags = EDGE_TRUE_VALUE;
4509 ep->probability = REG_BR_PROB_BASE / 4;
4510 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4511 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4513 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4514 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4516 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4517 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4518 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4519 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4520 recompute_dominator (CDI_DOMINATORS, body_bb));
4521 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4522 recompute_dominator (CDI_DOMINATORS, fin_bb));
4524 struct loop *loop = alloc_loop ();
4525 loop->header = body_bb;
4526 loop->latch = cont_bb;
4527 add_loop (loop, body_bb->loop_father);
4531 /* A subroutine of expand_omp_for. Generate code for a parallel
4532 loop with static schedule and a specified chunk size. Given
4533 parameters:
4535 for (V = N1; V cond N2; V += STEP) BODY;
4537 where COND is "<" or ">", we generate pseudocode
4539 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4540 if (cond is <)
4541 adj = STEP - 1;
4542 else
4543 adj = STEP + 1;
4544 if ((__typeof (V)) -1 > 0 && cond is >)
4545 n = -(adj + N2 - N1) / -STEP;
4546 else
4547 n = (adj + N2 - N1) / STEP;
4548 trip = 0;
4549 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4550 here so that V is defined
4551 if the loop is not entered
4553 s0 = (trip * nthreads + threadid) * CHUNK;
4554 e0 = min(s0 + CHUNK, n);
4555 if (s0 < n) goto L1; else goto L4;
4557 V = s0 * STEP + N1;
4558 e = e0 * STEP + N1;
4560 BODY;
4561 V += STEP;
4562 if (V cond e) goto L2; else goto L3;
4564 trip += 1;
4565 goto L0;
4569 static void
4570 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4572 tree n, s0, e0, e, t;
4573 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4574 tree type, itype, v_main, v_back, v_extra;
4575 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4576 basic_block trip_update_bb, cont_bb, fin_bb;
4577 gimple_stmt_iterator si;
4578 gimple stmt;
4579 edge se;
4581 itype = type = TREE_TYPE (fd->loop.v);
4582 if (POINTER_TYPE_P (type))
4583 itype = signed_type_for (type);
4585 entry_bb = region->entry;
4586 se = split_block (entry_bb, last_stmt (entry_bb));
4587 entry_bb = se->src;
4588 iter_part_bb = se->dest;
4589 cont_bb = region->cont;
4590 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4591 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4592 == FALLTHRU_EDGE (cont_bb)->dest);
4593 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4594 body_bb = single_succ (seq_start_bb);
4595 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4596 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4597 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4598 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4599 exit_bb = region->exit;
4601 /* Trip and adjustment setup goes in ENTRY_BB. */
4602 si = gsi_last_bb (entry_bb);
4603 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4605 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4606 fold_convert (type, fd->loop.n1),
4607 fold_convert (type, fd->loop.n2));
4608 if (TYPE_UNSIGNED (type)
4609 && (t == NULL_TREE || !integer_onep (t)))
4611 tree n1, n2;
4612 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4613 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
4614 true, GSI_SAME_STMT);
4615 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4616 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
4617 true, GSI_SAME_STMT);
4618 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4619 NULL_TREE, NULL_TREE);
4620 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4621 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4622 expand_omp_regimplify_p, NULL, NULL)
4623 || walk_tree (gimple_cond_rhs_ptr (stmt),
4624 expand_omp_regimplify_p, NULL, NULL))
4626 si = gsi_for_stmt (stmt);
4627 gimple_regimplify_operands (stmt, &si);
4629 se = split_block (entry_bb, stmt);
4630 se->flags = EDGE_TRUE_VALUE;
4631 entry_bb = se->dest;
4632 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4633 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
4634 se->probability = REG_BR_PROB_BASE / 2000 - 1;
4635 if (gimple_in_ssa_p (cfun))
4637 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4638 for (si = gsi_start_phis (fin_bb);
4639 !gsi_end_p (si); gsi_next (&si))
4641 gimple phi = gsi_stmt (si);
4642 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4643 se, UNKNOWN_LOCATION);
4646 si = gsi_last_bb (entry_bb);
4649 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4650 t = fold_convert (itype, t);
4651 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4652 true, GSI_SAME_STMT);
4654 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4655 t = fold_convert (itype, t);
4656 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4657 true, GSI_SAME_STMT);
4659 fd->loop.n1
4660 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4661 true, NULL_TREE, true, GSI_SAME_STMT);
4662 fd->loop.n2
4663 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4664 true, NULL_TREE, true, GSI_SAME_STMT);
4665 fd->loop.step
4666 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4667 true, NULL_TREE, true, GSI_SAME_STMT);
4668 fd->chunk_size
4669 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4670 true, NULL_TREE, true, GSI_SAME_STMT);
4672 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4673 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4674 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4675 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4676 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4677 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4678 fold_build1 (NEGATE_EXPR, itype, t),
4679 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4680 else
4681 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4682 t = fold_convert (itype, t);
4683 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4684 true, GSI_SAME_STMT);
4686 trip_var = create_tmp_reg (itype, ".trip");
4687 if (gimple_in_ssa_p (cfun))
4689 trip_init = make_ssa_name (trip_var, NULL);
4690 trip_main = make_ssa_name (trip_var, NULL);
4691 trip_back = make_ssa_name (trip_var, NULL);
4693 else
4695 trip_init = trip_var;
4696 trip_main = trip_var;
4697 trip_back = trip_var;
4700 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4701 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4703 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4704 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4705 if (POINTER_TYPE_P (type))
4706 t = fold_build_pointer_plus (fd->loop.n1, t);
4707 else
4708 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4709 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4710 true, GSI_SAME_STMT);
4712 /* Remove the GIMPLE_OMP_FOR. */
4713 gsi_remove (&si, true);
4715 /* Iteration space partitioning goes in ITER_PART_BB. */
4716 si = gsi_last_bb (iter_part_bb);
4718 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4719 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4720 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4721 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4722 false, GSI_CONTINUE_LINKING);
4724 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4725 t = fold_build2 (MIN_EXPR, itype, t, n);
4726 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4727 false, GSI_CONTINUE_LINKING);
4729 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4730 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4732 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4733 si = gsi_start_bb (seq_start_bb);
4735 t = fold_convert (itype, s0);
4736 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4737 if (POINTER_TYPE_P (type))
4738 t = fold_build_pointer_plus (fd->loop.n1, t);
4739 else
4740 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4741 t = force_gimple_operand_gsi (&si, t,
4742 DECL_P (fd->loop.v)
4743 && TREE_ADDRESSABLE (fd->loop.v),
4744 NULL_TREE, false, GSI_CONTINUE_LINKING);
4745 stmt = gimple_build_assign (fd->loop.v, t);
4746 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4748 t = fold_convert (itype, e0);
4749 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4750 if (POINTER_TYPE_P (type))
4751 t = fold_build_pointer_plus (fd->loop.n1, t);
4752 else
4753 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4754 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4755 false, GSI_CONTINUE_LINKING);
4757 /* The code controlling the sequential loop goes in CONT_BB,
4758 replacing the GIMPLE_OMP_CONTINUE. */
4759 si = gsi_last_bb (cont_bb);
4760 stmt = gsi_stmt (si);
4761 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4762 v_main = gimple_omp_continue_control_use (stmt);
4763 v_back = gimple_omp_continue_control_def (stmt);
4765 if (POINTER_TYPE_P (type))
4766 t = fold_build_pointer_plus (v_main, fd->loop.step);
4767 else
4768 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4769 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
4770 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4771 true, GSI_SAME_STMT);
4772 stmt = gimple_build_assign (v_back, t);
4773 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4775 t = build2 (fd->loop.cond_code, boolean_type_node,
4776 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
4777 ? t : v_back, e);
4778 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4780 /* Remove GIMPLE_OMP_CONTINUE. */
4781 gsi_remove (&si, true);
4783 /* Trip update code goes into TRIP_UPDATE_BB. */
4784 si = gsi_start_bb (trip_update_bb);
4786 t = build_int_cst (itype, 1);
4787 t = build2 (PLUS_EXPR, itype, trip_main, t);
4788 stmt = gimple_build_assign (trip_back, t);
4789 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4791 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4792 si = gsi_last_bb (exit_bb);
4793 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4794 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4795 false, GSI_SAME_STMT);
4796 gsi_remove (&si, true);
4798 /* Connect the new blocks. */
4799 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4800 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4802 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4803 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4805 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4807 if (gimple_in_ssa_p (cfun))
4809 gimple_stmt_iterator psi;
4810 gimple phi;
4811 edge re, ene;
4812 edge_var_map_vector *head;
4813 edge_var_map *vm;
4814 size_t i;
4816 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4817 remove arguments of the phi nodes in fin_bb. We need to create
4818 appropriate phi nodes in iter_part_bb instead. */
4819 se = single_pred_edge (fin_bb);
4820 re = single_succ_edge (trip_update_bb);
4821 head = redirect_edge_var_map_vector (re);
4822 ene = single_succ_edge (entry_bb);
4824 psi = gsi_start_phis (fin_bb);
4825 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4826 gsi_next (&psi), ++i)
4828 gimple nphi;
4829 source_location locus;
4831 phi = gsi_stmt (psi);
4832 t = gimple_phi_result (phi);
4833 gcc_assert (t == redirect_edge_var_map_result (vm));
4834 nphi = create_phi_node (t, iter_part_bb);
4836 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4837 locus = gimple_phi_arg_location_from_edge (phi, se);
4839 /* A special case -- fd->loop.v is not yet computed in
4840 iter_part_bb, we need to use v_extra instead. */
4841 if (t == fd->loop.v)
4842 t = v_extra;
4843 add_phi_arg (nphi, t, ene, locus);
4844 locus = redirect_edge_var_map_location (vm);
4845 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4847 gcc_assert (!gsi_end_p (psi) && i == head->length ());
4848 redirect_edge_var_map_clear (re);
4849 while (1)
4851 psi = gsi_start_phis (fin_bb);
4852 if (gsi_end_p (psi))
4853 break;
4854 remove_phi_node (&psi, false);
4857 /* Make phi node for trip. */
4858 phi = create_phi_node (trip_main, iter_part_bb);
4859 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4860 UNKNOWN_LOCATION);
4861 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4862 UNKNOWN_LOCATION);
4865 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4866 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4867 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4868 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4869 recompute_dominator (CDI_DOMINATORS, fin_bb));
4870 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4871 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4872 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4873 recompute_dominator (CDI_DOMINATORS, body_bb));
4875 struct loop *trip_loop = alloc_loop ();
4876 trip_loop->header = iter_part_bb;
4877 trip_loop->latch = trip_update_bb;
4878 add_loop (trip_loop, iter_part_bb->loop_father);
4880 struct loop *loop = alloc_loop ();
4881 loop->header = body_bb;
4882 loop->latch = cont_bb;
4883 add_loop (loop, trip_loop);
4887 /* Expand the OpenMP loop defined by REGION. */
4889 static void
4890 expand_omp_for (struct omp_region *region)
4892 struct omp_for_data fd;
4893 struct omp_for_data_loop *loops;
4895 loops
4896 = (struct omp_for_data_loop *)
4897 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4898 * sizeof (struct omp_for_data_loop));
4899 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4900 region->sched_kind = fd.sched_kind;
4902 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4903 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4904 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4905 if (region->cont)
4907 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4908 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4909 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4911 else
4912 /* If there isn't a continue then this is a degerate case where
4913 the introduction of abnormal edges during lowering will prevent
4914 original loops from being detected. Fix that up. */
4915 loops_state_set (LOOPS_NEED_FIXUP);
4917 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4918 && !fd.have_ordered
4919 && fd.collapse == 1
4920 && region->cont != NULL)
4922 if (fd.chunk_size == NULL)
4923 expand_omp_for_static_nochunk (region, &fd);
4924 else
4925 expand_omp_for_static_chunk (region, &fd);
4927 else
4929 int fn_index, start_ix, next_ix;
4931 if (fd.chunk_size == NULL
4932 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4933 fd.chunk_size = integer_zero_node;
4934 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4935 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4936 ? 3 : fd.sched_kind;
4937 fn_index += fd.have_ordered * 4;
4938 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4939 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4940 if (fd.iter_type == long_long_unsigned_type_node)
4942 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4943 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4944 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4945 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4947 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4948 (enum built_in_function) next_ix);
4951 if (gimple_in_ssa_p (cfun))
4952 update_ssa (TODO_update_ssa_only_virtuals);
4956 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4958 v = GOMP_sections_start (n);
4960 switch (v)
4962 case 0:
4963 goto L2;
4964 case 1:
4965 section 1;
4966 goto L1;
4967 case 2:
4969 case n:
4971 default:
4972 abort ();
4975 v = GOMP_sections_next ();
4976 goto L0;
4978 reduction;
4980 If this is a combined parallel sections, replace the call to
4981 GOMP_sections_start with call to GOMP_sections_next. */
4983 static void
4984 expand_omp_sections (struct omp_region *region)
4986 tree t, u, vin = NULL, vmain, vnext, l2;
4987 vec<tree> label_vec;
4988 unsigned len;
4989 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4990 gimple_stmt_iterator si, switch_si;
4991 gimple sections_stmt, stmt, cont;
4992 edge_iterator ei;
4993 edge e;
4994 struct omp_region *inner;
4995 unsigned i, casei;
4996 bool exit_reachable = region->cont != NULL;
4998 gcc_assert (region->exit != NULL);
4999 entry_bb = region->entry;
5000 l0_bb = single_succ (entry_bb);
5001 l1_bb = region->cont;
5002 l2_bb = region->exit;
5003 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5004 l2 = gimple_block_label (l2_bb);
5005 else
5007 /* This can happen if there are reductions. */
5008 len = EDGE_COUNT (l0_bb->succs);
5009 gcc_assert (len > 0);
5010 e = EDGE_SUCC (l0_bb, len - 1);
5011 si = gsi_last_bb (e->dest);
5012 l2 = NULL_TREE;
5013 if (gsi_end_p (si)
5014 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5015 l2 = gimple_block_label (e->dest);
5016 else
5017 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5019 si = gsi_last_bb (e->dest);
5020 if (gsi_end_p (si)
5021 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5023 l2 = gimple_block_label (e->dest);
5024 break;
5028 if (exit_reachable)
5029 default_bb = create_empty_bb (l1_bb->prev_bb);
5030 else
5031 default_bb = create_empty_bb (l0_bb);
5033 /* We will build a switch() with enough cases for all the
5034 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5035 and a default case to abort if something goes wrong. */
5036 len = EDGE_COUNT (l0_bb->succs);
5038 /* Use vec::quick_push on label_vec throughout, since we know the size
5039 in advance. */
5040 label_vec.create (len);
5042 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5043 GIMPLE_OMP_SECTIONS statement. */
5044 si = gsi_last_bb (entry_bb);
5045 sections_stmt = gsi_stmt (si);
5046 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5047 vin = gimple_omp_sections_control (sections_stmt);
5048 if (!is_combined_parallel (region))
5050 /* If we are not inside a combined parallel+sections region,
5051 call GOMP_sections_start. */
5052 t = build_int_cst (unsigned_type_node,
5053 exit_reachable ? len - 1 : len);
5054 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5055 stmt = gimple_build_call (u, 1, t);
5057 else
5059 /* Otherwise, call GOMP_sections_next. */
5060 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5061 stmt = gimple_build_call (u, 0);
5063 gimple_call_set_lhs (stmt, vin);
5064 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5065 gsi_remove (&si, true);
5067 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5068 L0_BB. */
5069 switch_si = gsi_last_bb (l0_bb);
5070 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5071 if (exit_reachable)
5073 cont = last_stmt (l1_bb);
5074 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5075 vmain = gimple_omp_continue_control_use (cont);
5076 vnext = gimple_omp_continue_control_def (cont);
5078 else
5080 vmain = vin;
5081 vnext = NULL_TREE;
5084 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5085 label_vec.quick_push (t);
5086 i = 1;
5088 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5089 for (inner = region->inner, casei = 1;
5090 inner;
5091 inner = inner->next, i++, casei++)
5093 basic_block s_entry_bb, s_exit_bb;
5095 /* Skip optional reduction region. */
5096 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5098 --i;
5099 --casei;
5100 continue;
5103 s_entry_bb = inner->entry;
5104 s_exit_bb = inner->exit;
5106 t = gimple_block_label (s_entry_bb);
5107 u = build_int_cst (unsigned_type_node, casei);
5108 u = build_case_label (u, NULL, t);
5109 label_vec.quick_push (u);
5111 si = gsi_last_bb (s_entry_bb);
5112 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5113 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5114 gsi_remove (&si, true);
5115 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5117 if (s_exit_bb == NULL)
5118 continue;
5120 si = gsi_last_bb (s_exit_bb);
5121 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5122 gsi_remove (&si, true);
5124 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5127 /* Error handling code goes in DEFAULT_BB. */
5128 t = gimple_block_label (default_bb);
5129 u = build_case_label (NULL, NULL, t);
5130 make_edge (l0_bb, default_bb, 0);
5131 if (current_loops)
5132 add_bb_to_loop (default_bb, current_loops->tree_root);
5134 stmt = gimple_build_switch (vmain, u, label_vec);
5135 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5136 gsi_remove (&switch_si, true);
5137 label_vec.release ();
5139 si = gsi_start_bb (default_bb);
5140 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5141 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5143 if (exit_reachable)
5145 tree bfn_decl;
5147 /* Code to get the next section goes in L1_BB. */
5148 si = gsi_last_bb (l1_bb);
5149 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5151 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5152 stmt = gimple_build_call (bfn_decl, 0);
5153 gimple_call_set_lhs (stmt, vnext);
5154 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5155 gsi_remove (&si, true);
5157 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5160 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5161 si = gsi_last_bb (l2_bb);
5162 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5163 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5164 else
5165 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5166 stmt = gimple_build_call (t, 0);
5167 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5168 gsi_remove (&si, true);
5170 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5174 /* Expand code for an OpenMP single directive. We've already expanded
5175 much of the code, here we simply place the GOMP_barrier call. */
5177 static void
5178 expand_omp_single (struct omp_region *region)
5180 basic_block entry_bb, exit_bb;
5181 gimple_stmt_iterator si;
5182 bool need_barrier = false;
5184 entry_bb = region->entry;
5185 exit_bb = region->exit;
5187 si = gsi_last_bb (entry_bb);
5188 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
5189 be removed. We need to ensure that the thread that entered the single
5190 does not exit before the data is copied out by the other threads. */
5191 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
5192 OMP_CLAUSE_COPYPRIVATE))
5193 need_barrier = true;
5194 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
5195 gsi_remove (&si, true);
5196 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5198 si = gsi_last_bb (exit_bb);
5199 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
5200 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5201 false, GSI_SAME_STMT);
5202 gsi_remove (&si, true);
5203 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5207 /* Generic expansion for OpenMP synchronization directives: master,
5208 ordered and critical. All we need to do here is remove the entry
5209 and exit markers for REGION. */
5211 static void
5212 expand_omp_synch (struct omp_region *region)
5214 basic_block entry_bb, exit_bb;
5215 gimple_stmt_iterator si;
5217 entry_bb = region->entry;
5218 exit_bb = region->exit;
5220 si = gsi_last_bb (entry_bb);
5221 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
5222 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
5223 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
5224 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
5225 gsi_remove (&si, true);
5226 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5228 if (exit_bb)
5230 si = gsi_last_bb (exit_bb);
5231 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5232 gsi_remove (&si, true);
5233 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5237 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5238 operation as a normal volatile load. */
5240 static bool
5241 expand_omp_atomic_load (basic_block load_bb, tree addr,
5242 tree loaded_val, int index)
5244 enum built_in_function tmpbase;
5245 gimple_stmt_iterator gsi;
5246 basic_block store_bb;
5247 location_t loc;
5248 gimple stmt;
5249 tree decl, call, type, itype;
5251 gsi = gsi_last_bb (load_bb);
5252 stmt = gsi_stmt (gsi);
5253 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5254 loc = gimple_location (stmt);
5256 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5257 is smaller than word size, then expand_atomic_load assumes that the load
5258 is atomic. We could avoid the builtin entirely in this case. */
5260 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5261 decl = builtin_decl_explicit (tmpbase);
5262 if (decl == NULL_TREE)
5263 return false;
5265 type = TREE_TYPE (loaded_val);
5266 itype = TREE_TYPE (TREE_TYPE (decl));
5268 call = build_call_expr_loc (loc, decl, 2, addr,
5269 build_int_cst (NULL, MEMMODEL_RELAXED));
5270 if (!useless_type_conversion_p (type, itype))
5271 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5272 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5274 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5275 gsi_remove (&gsi, true);
5277 store_bb = single_succ (load_bb);
5278 gsi = gsi_last_bb (store_bb);
5279 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5280 gsi_remove (&gsi, true);
5282 if (gimple_in_ssa_p (cfun))
5283 update_ssa (TODO_update_ssa_no_phi);
5285 return true;
5288 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5289 operation as a normal volatile store. */
5291 static bool
5292 expand_omp_atomic_store (basic_block load_bb, tree addr,
5293 tree loaded_val, tree stored_val, int index)
5295 enum built_in_function tmpbase;
5296 gimple_stmt_iterator gsi;
5297 basic_block store_bb = single_succ (load_bb);
5298 location_t loc;
5299 gimple stmt;
5300 tree decl, call, type, itype;
5301 enum machine_mode imode;
5302 bool exchange;
5304 gsi = gsi_last_bb (load_bb);
5305 stmt = gsi_stmt (gsi);
5306 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5308 /* If the load value is needed, then this isn't a store but an exchange. */
5309 exchange = gimple_omp_atomic_need_value_p (stmt);
5311 gsi = gsi_last_bb (store_bb);
5312 stmt = gsi_stmt (gsi);
5313 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5314 loc = gimple_location (stmt);
5316 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5317 is smaller than word size, then expand_atomic_store assumes that the store
5318 is atomic. We could avoid the builtin entirely in this case. */
5320 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5321 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5322 decl = builtin_decl_explicit (tmpbase);
5323 if (decl == NULL_TREE)
5324 return false;
5326 type = TREE_TYPE (stored_val);
5328 /* Dig out the type of the function's second argument. */
5329 itype = TREE_TYPE (decl);
5330 itype = TYPE_ARG_TYPES (itype);
5331 itype = TREE_CHAIN (itype);
5332 itype = TREE_VALUE (itype);
5333 imode = TYPE_MODE (itype);
5335 if (exchange && !can_atomic_exchange_p (imode, true))
5336 return false;
5338 if (!useless_type_conversion_p (itype, type))
5339 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5340 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5341 build_int_cst (NULL, MEMMODEL_RELAXED));
5342 if (exchange)
5344 if (!useless_type_conversion_p (type, itype))
5345 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5346 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5349 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5350 gsi_remove (&gsi, true);
5352 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5353 gsi = gsi_last_bb (load_bb);
5354 gsi_remove (&gsi, true);
5356 if (gimple_in_ssa_p (cfun))
5357 update_ssa (TODO_update_ssa_no_phi);
5359 return true;
5362 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5363 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5364 size of the data type, and thus usable to find the index of the builtin
5365 decl. Returns false if the expression is not of the proper form. */
5367 static bool
5368 expand_omp_atomic_fetch_op (basic_block load_bb,
5369 tree addr, tree loaded_val,
5370 tree stored_val, int index)
5372 enum built_in_function oldbase, newbase, tmpbase;
5373 tree decl, itype, call;
5374 tree lhs, rhs;
5375 basic_block store_bb = single_succ (load_bb);
5376 gimple_stmt_iterator gsi;
5377 gimple stmt;
5378 location_t loc;
5379 enum tree_code code;
5380 bool need_old, need_new;
5381 enum machine_mode imode;
5383 /* We expect to find the following sequences:
5385 load_bb:
5386 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5388 store_bb:
5389 val = tmp OP something; (or: something OP tmp)
5390 GIMPLE_OMP_STORE (val)
5392 ???FIXME: Allow a more flexible sequence.
5393 Perhaps use data flow to pick the statements.
5397 gsi = gsi_after_labels (store_bb);
5398 stmt = gsi_stmt (gsi);
5399 loc = gimple_location (stmt);
5400 if (!is_gimple_assign (stmt))
5401 return false;
5402 gsi_next (&gsi);
5403 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5404 return false;
5405 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5406 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5407 gcc_checking_assert (!need_old || !need_new);
5409 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5410 return false;
5412 /* Check for one of the supported fetch-op operations. */
5413 code = gimple_assign_rhs_code (stmt);
5414 switch (code)
5416 case PLUS_EXPR:
5417 case POINTER_PLUS_EXPR:
5418 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5419 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5420 break;
5421 case MINUS_EXPR:
5422 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5423 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5424 break;
5425 case BIT_AND_EXPR:
5426 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5427 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5428 break;
5429 case BIT_IOR_EXPR:
5430 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5431 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5432 break;
5433 case BIT_XOR_EXPR:
5434 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5435 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5436 break;
5437 default:
5438 return false;
5441 /* Make sure the expression is of the proper form. */
5442 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5443 rhs = gimple_assign_rhs2 (stmt);
5444 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5445 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5446 rhs = gimple_assign_rhs1 (stmt);
5447 else
5448 return false;
5450 tmpbase = ((enum built_in_function)
5451 ((need_new ? newbase : oldbase) + index + 1));
5452 decl = builtin_decl_explicit (tmpbase);
5453 if (decl == NULL_TREE)
5454 return false;
5455 itype = TREE_TYPE (TREE_TYPE (decl));
5456 imode = TYPE_MODE (itype);
5458 /* We could test all of the various optabs involved, but the fact of the
5459 matter is that (with the exception of i486 vs i586 and xadd) all targets
5460 that support any atomic operaton optab also implements compare-and-swap.
5461 Let optabs.c take care of expanding any compare-and-swap loop. */
5462 if (!can_compare_and_swap_p (imode, true))
5463 return false;
5465 gsi = gsi_last_bb (load_bb);
5466 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5468 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5469 It only requires that the operation happen atomically. Thus we can
5470 use the RELAXED memory model. */
5471 call = build_call_expr_loc (loc, decl, 3, addr,
5472 fold_convert_loc (loc, itype, rhs),
5473 build_int_cst (NULL, MEMMODEL_RELAXED));
5475 if (need_old || need_new)
5477 lhs = need_old ? loaded_val : stored_val;
5478 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5479 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5481 else
5482 call = fold_convert_loc (loc, void_type_node, call);
5483 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5484 gsi_remove (&gsi, true);
5486 gsi = gsi_last_bb (store_bb);
5487 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5488 gsi_remove (&gsi, true);
5489 gsi = gsi_last_bb (store_bb);
5490 gsi_remove (&gsi, true);
5492 if (gimple_in_ssa_p (cfun))
5493 update_ssa (TODO_update_ssa_no_phi);
5495 return true;
5498 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5500 oldval = *addr;
5501 repeat:
5502 newval = rhs; // with oldval replacing *addr in rhs
5503 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5504 if (oldval != newval)
5505 goto repeat;
5507 INDEX is log2 of the size of the data type, and thus usable to find the
5508 index of the builtin decl. */
5510 static bool
5511 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5512 tree addr, tree loaded_val, tree stored_val,
5513 int index)
5515 tree loadedi, storedi, initial, new_storedi, old_vali;
5516 tree type, itype, cmpxchg, iaddr;
5517 gimple_stmt_iterator si;
5518 basic_block loop_header = single_succ (load_bb);
5519 gimple phi, stmt;
5520 edge e;
5521 enum built_in_function fncode;
5523 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5524 order to use the RELAXED memory model effectively. */
5525 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5526 + index + 1);
5527 cmpxchg = builtin_decl_explicit (fncode);
5528 if (cmpxchg == NULL_TREE)
5529 return false;
5530 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5531 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5533 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5534 return false;
5536 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5537 si = gsi_last_bb (load_bb);
5538 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5540 /* For floating-point values, we'll need to view-convert them to integers
5541 so that we can perform the atomic compare and swap. Simplify the
5542 following code by always setting up the "i"ntegral variables. */
5543 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5545 tree iaddr_val;
5547 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5548 true), NULL);
5549 iaddr_val
5550 = force_gimple_operand_gsi (&si,
5551 fold_convert (TREE_TYPE (iaddr), addr),
5552 false, NULL_TREE, true, GSI_SAME_STMT);
5553 stmt = gimple_build_assign (iaddr, iaddr_val);
5554 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5555 loadedi = create_tmp_var (itype, NULL);
5556 if (gimple_in_ssa_p (cfun))
5557 loadedi = make_ssa_name (loadedi, NULL);
5559 else
5561 iaddr = addr;
5562 loadedi = loaded_val;
5565 initial
5566 = force_gimple_operand_gsi (&si,
5567 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5568 iaddr,
5569 build_int_cst (TREE_TYPE (iaddr), 0)),
5570 true, NULL_TREE, true, GSI_SAME_STMT);
5572 /* Move the value to the LOADEDI temporary. */
5573 if (gimple_in_ssa_p (cfun))
5575 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5576 phi = create_phi_node (loadedi, loop_header);
5577 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5578 initial);
5580 else
5581 gsi_insert_before (&si,
5582 gimple_build_assign (loadedi, initial),
5583 GSI_SAME_STMT);
5584 if (loadedi != loaded_val)
5586 gimple_stmt_iterator gsi2;
5587 tree x;
5589 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5590 gsi2 = gsi_start_bb (loop_header);
5591 if (gimple_in_ssa_p (cfun))
5593 gimple stmt;
5594 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5595 true, GSI_SAME_STMT);
5596 stmt = gimple_build_assign (loaded_val, x);
5597 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5599 else
5601 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5602 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5603 true, GSI_SAME_STMT);
5606 gsi_remove (&si, true);
5608 si = gsi_last_bb (store_bb);
5609 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5611 if (iaddr == addr)
5612 storedi = stored_val;
5613 else
5614 storedi =
5615 force_gimple_operand_gsi (&si,
5616 build1 (VIEW_CONVERT_EXPR, itype,
5617 stored_val), true, NULL_TREE, true,
5618 GSI_SAME_STMT);
5620 /* Build the compare&swap statement. */
5621 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5622 new_storedi = force_gimple_operand_gsi (&si,
5623 fold_convert (TREE_TYPE (loadedi),
5624 new_storedi),
5625 true, NULL_TREE,
5626 true, GSI_SAME_STMT);
5628 if (gimple_in_ssa_p (cfun))
5629 old_vali = loadedi;
5630 else
5632 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5633 stmt = gimple_build_assign (old_vali, loadedi);
5634 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5636 stmt = gimple_build_assign (loadedi, new_storedi);
5637 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5640 /* Note that we always perform the comparison as an integer, even for
5641 floating point. This allows the atomic operation to properly
5642 succeed even with NaNs and -0.0. */
5643 stmt = gimple_build_cond_empty
5644 (build2 (NE_EXPR, boolean_type_node,
5645 new_storedi, old_vali));
5646 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5648 /* Update cfg. */
5649 e = single_succ_edge (store_bb);
5650 e->flags &= ~EDGE_FALLTHRU;
5651 e->flags |= EDGE_FALSE_VALUE;
5653 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5655 /* Copy the new value to loadedi (we already did that before the condition
5656 if we are not in SSA). */
5657 if (gimple_in_ssa_p (cfun))
5659 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5660 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5663 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5664 gsi_remove (&si, true);
5666 struct loop *loop = alloc_loop ();
5667 loop->header = loop_header;
5668 loop->latch = store_bb;
5669 add_loop (loop, loop_header->loop_father);
5671 if (gimple_in_ssa_p (cfun))
5672 update_ssa (TODO_update_ssa_no_phi);
5674 return true;
5677 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5679 GOMP_atomic_start ();
5680 *addr = rhs;
5681 GOMP_atomic_end ();
5683 The result is not globally atomic, but works so long as all parallel
5684 references are within #pragma omp atomic directives. According to
5685 responses received from omp@openmp.org, appears to be within spec.
5686 Which makes sense, since that's how several other compilers handle
5687 this situation as well.
5688 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5689 expanding. STORED_VAL is the operand of the matching
5690 GIMPLE_OMP_ATOMIC_STORE.
5692 We replace
5693 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5694 loaded_val = *addr;
5696 and replace
5697 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5698 *addr = stored_val;
5701 static bool
5702 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5703 tree addr, tree loaded_val, tree stored_val)
5705 gimple_stmt_iterator si;
5706 gimple stmt;
5707 tree t;
5709 si = gsi_last_bb (load_bb);
5710 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5712 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5713 t = build_call_expr (t, 0);
5714 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5716 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5717 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5718 gsi_remove (&si, true);
5720 si = gsi_last_bb (store_bb);
5721 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5723 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5724 stored_val);
5725 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5727 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5728 t = build_call_expr (t, 0);
5729 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5730 gsi_remove (&si, true);
5732 if (gimple_in_ssa_p (cfun))
5733 update_ssa (TODO_update_ssa_no_phi);
5734 return true;
5737 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5738 using expand_omp_atomic_fetch_op. If it failed, we try to
5739 call expand_omp_atomic_pipeline, and if it fails too, the
5740 ultimate fallback is wrapping the operation in a mutex
5741 (expand_omp_atomic_mutex). REGION is the atomic region built
5742 by build_omp_regions_1(). */
5744 static void
5745 expand_omp_atomic (struct omp_region *region)
5747 basic_block load_bb = region->entry, store_bb = region->exit;
5748 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5749 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5750 tree addr = gimple_omp_atomic_load_rhs (load);
5751 tree stored_val = gimple_omp_atomic_store_val (store);
5752 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5753 HOST_WIDE_INT index;
5755 /* Make sure the type is one of the supported sizes. */
5756 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5757 index = exact_log2 (index);
5758 if (index >= 0 && index <= 4)
5760 unsigned int align = TYPE_ALIGN_UNIT (type);
5762 /* __sync builtins require strict data alignment. */
5763 if (exact_log2 (align) >= index)
5765 /* Atomic load. */
5766 if (loaded_val == stored_val
5767 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5768 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5769 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5770 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5771 return;
5773 /* Atomic store. */
5774 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5775 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5776 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5777 && store_bb == single_succ (load_bb)
5778 && first_stmt (store_bb) == store
5779 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5780 stored_val, index))
5781 return;
5783 /* When possible, use specialized atomic update functions. */
5784 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5785 && store_bb == single_succ (load_bb)
5786 && expand_omp_atomic_fetch_op (load_bb, addr,
5787 loaded_val, stored_val, index))
5788 return;
5790 /* If we don't have specialized __sync builtins, try and implement
5791 as a compare and swap loop. */
5792 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5793 loaded_val, stored_val, index))
5794 return;
5798 /* The ultimate fallback is wrapping the operation in a mutex. */
5799 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5803 /* Expand the parallel region tree rooted at REGION. Expansion
5804 proceeds in depth-first order. Innermost regions are expanded
5805 first. This way, parallel regions that require a new function to
5806 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5807 internal dependencies in their body. */
5809 static void
5810 expand_omp (struct omp_region *region)
5812 while (region)
5814 location_t saved_location;
5816 /* First, determine whether this is a combined parallel+workshare
5817 region. */
5818 if (region->type == GIMPLE_OMP_PARALLEL)
5819 determine_parallel_type (region);
5821 if (region->inner)
5822 expand_omp (region->inner);
5824 saved_location = input_location;
5825 if (gimple_has_location (last_stmt (region->entry)))
5826 input_location = gimple_location (last_stmt (region->entry));
5828 switch (region->type)
5830 case GIMPLE_OMP_PARALLEL:
5831 case GIMPLE_OMP_TASK:
5832 expand_omp_taskreg (region);
5833 break;
5835 case GIMPLE_OMP_FOR:
5836 expand_omp_for (region);
5837 break;
5839 case GIMPLE_OMP_SECTIONS:
5840 expand_omp_sections (region);
5841 break;
5843 case GIMPLE_OMP_SECTION:
5844 /* Individual omp sections are handled together with their
5845 parent GIMPLE_OMP_SECTIONS region. */
5846 break;
5848 case GIMPLE_OMP_SINGLE:
5849 expand_omp_single (region);
5850 break;
5852 case GIMPLE_OMP_MASTER:
5853 case GIMPLE_OMP_ORDERED:
5854 case GIMPLE_OMP_CRITICAL:
5855 expand_omp_synch (region);
5856 break;
5858 case GIMPLE_OMP_ATOMIC_LOAD:
5859 expand_omp_atomic (region);
5860 break;
5862 default:
5863 gcc_unreachable ();
5866 input_location = saved_location;
5867 region = region->next;
5872 /* Helper for build_omp_regions. Scan the dominator tree starting at
5873 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5874 true, the function ends once a single tree is built (otherwise, whole
5875 forest of OMP constructs may be built). */
5877 static void
5878 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5879 bool single_tree)
5881 gimple_stmt_iterator gsi;
5882 gimple stmt;
5883 basic_block son;
5885 gsi = gsi_last_bb (bb);
5886 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5888 struct omp_region *region;
5889 enum gimple_code code;
5891 stmt = gsi_stmt (gsi);
5892 code = gimple_code (stmt);
5893 if (code == GIMPLE_OMP_RETURN)
5895 /* STMT is the return point out of region PARENT. Mark it
5896 as the exit point and make PARENT the immediately
5897 enclosing region. */
5898 gcc_assert (parent);
5899 region = parent;
5900 region->exit = bb;
5901 parent = parent->outer;
5903 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5905 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5906 GIMPLE_OMP_RETURN, but matches with
5907 GIMPLE_OMP_ATOMIC_LOAD. */
5908 gcc_assert (parent);
5909 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5910 region = parent;
5911 region->exit = bb;
5912 parent = parent->outer;
5915 else if (code == GIMPLE_OMP_CONTINUE)
5917 gcc_assert (parent);
5918 parent->cont = bb;
5920 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5922 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5923 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5926 else
5928 /* Otherwise, this directive becomes the parent for a new
5929 region. */
5930 region = new_omp_region (bb, code, parent);
5931 parent = region;
5935 if (single_tree && !parent)
5936 return;
5938 for (son = first_dom_son (CDI_DOMINATORS, bb);
5939 son;
5940 son = next_dom_son (CDI_DOMINATORS, son))
5941 build_omp_regions_1 (son, parent, single_tree);
5944 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5945 root_omp_region. */
5947 static void
5948 build_omp_regions_root (basic_block root)
5950 gcc_assert (root_omp_region == NULL);
5951 build_omp_regions_1 (root, NULL, true);
5952 gcc_assert (root_omp_region != NULL);
5955 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5957 void
5958 omp_expand_local (basic_block head)
5960 build_omp_regions_root (head);
5961 if (dump_file && (dump_flags & TDF_DETAILS))
5963 fprintf (dump_file, "\nOMP region tree\n\n");
5964 dump_omp_region (dump_file, root_omp_region, 0);
5965 fprintf (dump_file, "\n");
5968 remove_exit_barriers (root_omp_region);
5969 expand_omp (root_omp_region);
5971 free_omp_regions ();
5974 /* Scan the CFG and build a tree of OMP regions. Return the root of
5975 the OMP region tree. */
5977 static void
5978 build_omp_regions (void)
5980 gcc_assert (root_omp_region == NULL);
5981 calculate_dominance_info (CDI_DOMINATORS);
5982 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5985 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5987 static unsigned int
5988 execute_expand_omp (void)
5990 build_omp_regions ();
5992 if (!root_omp_region)
5993 return 0;
5995 if (dump_file)
5997 fprintf (dump_file, "\nOMP region tree\n\n");
5998 dump_omp_region (dump_file, root_omp_region, 0);
5999 fprintf (dump_file, "\n");
6002 remove_exit_barriers (root_omp_region);
6004 expand_omp (root_omp_region);
6006 cleanup_tree_cfg ();
6008 free_omp_regions ();
6010 return 0;
6013 /* OMP expansion -- the default pass, run before creation of SSA form. */
6015 static bool
6016 gate_expand_omp (void)
6018 return (flag_openmp != 0 && !seen_error ());
6021 namespace {
6023 const pass_data pass_data_expand_omp =
6025 GIMPLE_PASS, /* type */
6026 "ompexp", /* name */
6027 OPTGROUP_NONE, /* optinfo_flags */
6028 true, /* has_gate */
6029 true, /* has_execute */
6030 TV_NONE, /* tv_id */
6031 PROP_gimple_any, /* properties_required */
6032 0, /* properties_provided */
6033 0, /* properties_destroyed */
6034 0, /* todo_flags_start */
6035 0, /* todo_flags_finish */
6038 class pass_expand_omp : public gimple_opt_pass
6040 public:
6041 pass_expand_omp(gcc::context *ctxt)
6042 : gimple_opt_pass(pass_data_expand_omp, ctxt)
6045 /* opt_pass methods: */
6046 bool gate () { return gate_expand_omp (); }
6047 unsigned int execute () { return execute_expand_omp (); }
6049 }; // class pass_expand_omp
6051 } // anon namespace
6053 gimple_opt_pass *
6054 make_pass_expand_omp (gcc::context *ctxt)
6056 return new pass_expand_omp (ctxt);
6059 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
6061 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6062 CTX is the enclosing OMP context for the current statement. */
6064 static void
6065 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6067 tree block, control;
6068 gimple_stmt_iterator tgsi;
6069 gimple stmt, new_stmt, bind, t;
6070 gimple_seq ilist, dlist, olist, new_body;
6071 struct gimplify_ctx gctx;
6073 stmt = gsi_stmt (*gsi_p);
6075 push_gimplify_context (&gctx);
6077 dlist = NULL;
6078 ilist = NULL;
6079 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6080 &ilist, &dlist, ctx);
6082 new_body = gimple_omp_body (stmt);
6083 gimple_omp_set_body (stmt, NULL);
6084 tgsi = gsi_start (new_body);
6085 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
6087 omp_context *sctx;
6088 gimple sec_start;
6090 sec_start = gsi_stmt (tgsi);
6091 sctx = maybe_lookup_ctx (sec_start);
6092 gcc_assert (sctx);
6094 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6095 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6096 GSI_CONTINUE_LINKING);
6097 gimple_omp_set_body (sec_start, NULL);
6099 if (gsi_one_before_end_p (tgsi))
6101 gimple_seq l = NULL;
6102 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
6103 &l, ctx);
6104 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
6105 gimple_omp_section_set_last (sec_start);
6108 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6109 GSI_CONTINUE_LINKING);
6112 block = make_node (BLOCK);
6113 bind = gimple_build_bind (NULL, new_body, block);
6115 olist = NULL;
6116 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
6118 block = make_node (BLOCK);
6119 new_stmt = gimple_build_bind (NULL, NULL, block);
6120 gsi_replace (gsi_p, new_stmt, true);
6122 pop_gimplify_context (new_stmt);
6123 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6124 BLOCK_VARS (block) = gimple_bind_vars (bind);
6125 if (BLOCK_VARS (block))
6126 TREE_USED (block) = 1;
6128 new_body = NULL;
6129 gimple_seq_add_seq (&new_body, ilist);
6130 gimple_seq_add_stmt (&new_body, stmt);
6131 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6132 gimple_seq_add_stmt (&new_body, bind);
6134 control = create_tmp_var (unsigned_type_node, ".section");
6135 t = gimple_build_omp_continue (control, control);
6136 gimple_omp_sections_set_control (stmt, control);
6137 gimple_seq_add_stmt (&new_body, t);
6139 gimple_seq_add_seq (&new_body, olist);
6140 gimple_seq_add_seq (&new_body, dlist);
6142 new_body = maybe_catch_exception (new_body);
6144 t = gimple_build_omp_return
6145 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6146 OMP_CLAUSE_NOWAIT));
6147 gimple_seq_add_stmt (&new_body, t);
6149 gimple_bind_set_body (new_stmt, new_body);
6153 /* A subroutine of lower_omp_single. Expand the simple form of
6154 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6156 if (GOMP_single_start ())
6157 BODY;
6158 [ GOMP_barrier (); ] -> unless 'nowait' is present.
6160 FIXME. It may be better to delay expanding the logic of this until
6161 pass_expand_omp. The expanded logic may make the job more difficult
6162 to a synchronization analysis pass. */
6164 static void
6165 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
6167 location_t loc = gimple_location (single_stmt);
6168 tree tlabel = create_artificial_label (loc);
6169 tree flabel = create_artificial_label (loc);
6170 gimple call, cond;
6171 tree lhs, decl;
6173 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
6174 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6175 call = gimple_build_call (decl, 0);
6176 gimple_call_set_lhs (call, lhs);
6177 gimple_seq_add_stmt (pre_p, call);
6179 cond = gimple_build_cond (EQ_EXPR, lhs,
6180 fold_convert_loc (loc, TREE_TYPE (lhs),
6181 boolean_true_node),
6182 tlabel, flabel);
6183 gimple_seq_add_stmt (pre_p, cond);
6184 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
6185 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
6186 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
6190 /* A subroutine of lower_omp_single. Expand the simple form of
6191 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
6193 #pragma omp single copyprivate (a, b, c)
6195 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
6198 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
6200 BODY;
6201 copyout.a = a;
6202 copyout.b = b;
6203 copyout.c = c;
6204 GOMP_single_copy_end (&copyout);
6206 else
6208 a = copyout_p->a;
6209 b = copyout_p->b;
6210 c = copyout_p->c;
6212 GOMP_barrier ();
6215 FIXME. It may be better to delay expanding the logic of this until
6216 pass_expand_omp. The expanded logic may make the job more difficult
6217 to a synchronization analysis pass. */
6219 static void
6220 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
6222 tree ptr_type, t, l0, l1, l2, bfn_decl;
6223 gimple_seq copyin_seq;
6224 location_t loc = gimple_location (single_stmt);
6226 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
6228 ptr_type = build_pointer_type (ctx->record_type);
6229 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
6231 l0 = create_artificial_label (loc);
6232 l1 = create_artificial_label (loc);
6233 l2 = create_artificial_label (loc);
6235 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
6236 t = build_call_expr_loc (loc, bfn_decl, 0);
6237 t = fold_convert_loc (loc, ptr_type, t);
6238 gimplify_assign (ctx->receiver_decl, t, pre_p);
6240 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
6241 build_int_cst (ptr_type, 0));
6242 t = build3 (COND_EXPR, void_type_node, t,
6243 build_and_jump (&l0), build_and_jump (&l1));
6244 gimplify_and_add (t, pre_p);
6246 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
6248 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
6250 copyin_seq = NULL;
6251 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
6252 &copyin_seq, ctx);
6254 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6255 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
6256 t = build_call_expr_loc (loc, bfn_decl, 1, t);
6257 gimplify_and_add (t, pre_p);
6259 t = build_and_jump (&l2);
6260 gimplify_and_add (t, pre_p);
6262 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
6264 gimple_seq_add_seq (pre_p, copyin_seq);
6266 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
6270 /* Expand code for an OpenMP single directive. */
6272 static void
6273 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6275 tree block;
6276 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6277 gimple_seq bind_body, dlist;
6278 struct gimplify_ctx gctx;
6280 push_gimplify_context (&gctx);
6282 block = make_node (BLOCK);
6283 bind = gimple_build_bind (NULL, NULL, block);
6284 gsi_replace (gsi_p, bind, true);
6285 bind_body = NULL;
6286 dlist = NULL;
6287 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6288 &bind_body, &dlist, ctx);
6289 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6291 gimple_seq_add_stmt (&bind_body, single_stmt);
6293 if (ctx->record_type)
6294 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6295 else
6296 lower_omp_single_simple (single_stmt, &bind_body);
6298 gimple_omp_set_body (single_stmt, NULL);
6300 gimple_seq_add_seq (&bind_body, dlist);
6302 bind_body = maybe_catch_exception (bind_body);
6304 t = gimple_build_omp_return
6305 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6306 OMP_CLAUSE_NOWAIT));
6307 gimple_seq_add_stmt (&bind_body, t);
6308 gimple_bind_set_body (bind, bind_body);
6310 pop_gimplify_context (bind);
6312 gimple_bind_append_vars (bind, ctx->block_vars);
6313 BLOCK_VARS (block) = ctx->block_vars;
6314 if (BLOCK_VARS (block))
6315 TREE_USED (block) = 1;
6319 /* Expand code for an OpenMP master directive. */
6321 static void
6322 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6324 tree block, lab = NULL, x, bfn_decl;
6325 gimple stmt = gsi_stmt (*gsi_p), bind;
6326 location_t loc = gimple_location (stmt);
6327 gimple_seq tseq;
6328 struct gimplify_ctx gctx;
6330 push_gimplify_context (&gctx);
6332 block = make_node (BLOCK);
6333 bind = gimple_build_bind (NULL, NULL, block);
6334 gsi_replace (gsi_p, bind, true);
6335 gimple_bind_add_stmt (bind, stmt);
6337 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6338 x = build_call_expr_loc (loc, bfn_decl, 0);
6339 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6340 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6341 tseq = NULL;
6342 gimplify_and_add (x, &tseq);
6343 gimple_bind_add_seq (bind, tseq);
6345 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6346 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6347 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6348 gimple_omp_set_body (stmt, NULL);
6350 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6352 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6354 pop_gimplify_context (bind);
6356 gimple_bind_append_vars (bind, ctx->block_vars);
6357 BLOCK_VARS (block) = ctx->block_vars;
6361 /* Expand code for an OpenMP ordered directive. */
6363 static void
6364 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6366 tree block;
6367 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6368 struct gimplify_ctx gctx;
6370 push_gimplify_context (&gctx);
6372 block = make_node (BLOCK);
6373 bind = gimple_build_bind (NULL, NULL, block);
6374 gsi_replace (gsi_p, bind, true);
6375 gimple_bind_add_stmt (bind, stmt);
6377 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6379 gimple_bind_add_stmt (bind, x);
6381 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6382 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6383 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6384 gimple_omp_set_body (stmt, NULL);
6386 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6387 gimple_bind_add_stmt (bind, x);
6389 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6391 pop_gimplify_context (bind);
6393 gimple_bind_append_vars (bind, ctx->block_vars);
6394 BLOCK_VARS (block) = gimple_bind_vars (bind);
6398 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6399 substitution of a couple of function calls. But in the NAMED case,
6400 requires that languages coordinate a symbol name. It is therefore
6401 best put here in common code. */
6403 static GTY((param1_is (tree), param2_is (tree)))
6404 splay_tree critical_name_mutexes;
6406 static void
6407 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6409 tree block;
6410 tree name, lock, unlock;
6411 gimple stmt = gsi_stmt (*gsi_p), bind;
6412 location_t loc = gimple_location (stmt);
6413 gimple_seq tbody;
6414 struct gimplify_ctx gctx;
6416 name = gimple_omp_critical_name (stmt);
6417 if (name)
6419 tree decl;
6420 splay_tree_node n;
6422 if (!critical_name_mutexes)
6423 critical_name_mutexes
6424 = splay_tree_new_ggc (splay_tree_compare_pointers,
6425 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6426 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6428 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6429 if (n == NULL)
6431 char *new_str;
6433 decl = create_tmp_var_raw (ptr_type_node, NULL);
6435 new_str = ACONCAT ((".gomp_critical_user_",
6436 IDENTIFIER_POINTER (name), NULL));
6437 DECL_NAME (decl) = get_identifier (new_str);
6438 TREE_PUBLIC (decl) = 1;
6439 TREE_STATIC (decl) = 1;
6440 DECL_COMMON (decl) = 1;
6441 DECL_ARTIFICIAL (decl) = 1;
6442 DECL_IGNORED_P (decl) = 1;
6443 varpool_finalize_decl (decl);
6445 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6446 (splay_tree_value) decl);
6448 else
6449 decl = (tree) n->value;
6451 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6452 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6454 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6455 unlock = build_call_expr_loc (loc, unlock, 1,
6456 build_fold_addr_expr_loc (loc, decl));
6458 else
6460 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6461 lock = build_call_expr_loc (loc, lock, 0);
6463 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6464 unlock = build_call_expr_loc (loc, unlock, 0);
6467 push_gimplify_context (&gctx);
6469 block = make_node (BLOCK);
6470 bind = gimple_build_bind (NULL, NULL, block);
6471 gsi_replace (gsi_p, bind, true);
6472 gimple_bind_add_stmt (bind, stmt);
6474 tbody = gimple_bind_body (bind);
6475 gimplify_and_add (lock, &tbody);
6476 gimple_bind_set_body (bind, tbody);
6478 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6479 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6480 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6481 gimple_omp_set_body (stmt, NULL);
6483 tbody = gimple_bind_body (bind);
6484 gimplify_and_add (unlock, &tbody);
6485 gimple_bind_set_body (bind, tbody);
6487 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6489 pop_gimplify_context (bind);
6490 gimple_bind_append_vars (bind, ctx->block_vars);
6491 BLOCK_VARS (block) = gimple_bind_vars (bind);
6495 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6496 for a lastprivate clause. Given a loop control predicate of (V
6497 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6498 is appended to *DLIST, iterator initialization is appended to
6499 *BODY_P. */
6501 static void
6502 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6503 gimple_seq *dlist, struct omp_context *ctx)
6505 tree clauses, cond, vinit;
6506 enum tree_code cond_code;
6507 gimple_seq stmts;
6509 cond_code = fd->loop.cond_code;
6510 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6512 /* When possible, use a strict equality expression. This can let VRP
6513 type optimizations deduce the value and remove a copy. */
6514 if (host_integerp (fd->loop.step, 0))
6516 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6517 if (step == 1 || step == -1)
6518 cond_code = EQ_EXPR;
6521 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6523 clauses = gimple_omp_for_clauses (fd->for_stmt);
6524 stmts = NULL;
6525 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6526 if (!gimple_seq_empty_p (stmts))
6528 gimple_seq_add_seq (&stmts, *dlist);
6529 *dlist = stmts;
6531 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6532 vinit = fd->loop.n1;
6533 if (cond_code == EQ_EXPR
6534 && host_integerp (fd->loop.n2, 0)
6535 && ! integer_zerop (fd->loop.n2))
6536 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6538 /* Initialize the iterator variable, so that threads that don't execute
6539 any iterations don't execute the lastprivate clauses by accident. */
6540 gimplify_assign (fd->loop.v, vinit, body_p);
6545 /* Lower code for an OpenMP loop directive. */
6547 static void
6548 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6550 tree *rhs_p, block;
6551 struct omp_for_data fd;
6552 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6553 gimple_seq omp_for_body, body, dlist;
6554 size_t i;
6555 struct gimplify_ctx gctx;
6557 push_gimplify_context (&gctx);
6559 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6560 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6562 block = make_node (BLOCK);
6563 new_stmt = gimple_build_bind (NULL, NULL, block);
6564 /* Replace at gsi right away, so that 'stmt' is no member
6565 of a sequence anymore as we're going to add to to a different
6566 one below. */
6567 gsi_replace (gsi_p, new_stmt, true);
6569 /* Move declaration of temporaries in the loop body before we make
6570 it go away. */
6571 omp_for_body = gimple_omp_body (stmt);
6572 if (!gimple_seq_empty_p (omp_for_body)
6573 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6575 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6576 gimple_bind_append_vars (new_stmt, vars);
6579 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6580 dlist = NULL;
6581 body = NULL;
6582 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6583 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6585 /* Lower the header expressions. At this point, we can assume that
6586 the header is of the form:
6588 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6590 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6591 using the .omp_data_s mapping, if needed. */
6592 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6594 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6595 if (!is_gimple_min_invariant (*rhs_p))
6596 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6598 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6599 if (!is_gimple_min_invariant (*rhs_p))
6600 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6602 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6603 if (!is_gimple_min_invariant (*rhs_p))
6604 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6607 /* Once lowered, extract the bounds and clauses. */
6608 extract_omp_for_data (stmt, &fd, NULL);
6610 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6612 gimple_seq_add_stmt (&body, stmt);
6613 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6615 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6616 fd.loop.v));
6618 /* After the loop, add exit clauses. */
6619 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6620 gimple_seq_add_seq (&body, dlist);
6622 body = maybe_catch_exception (body);
6624 /* Region exit marker goes at the end of the loop body. */
6625 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6627 pop_gimplify_context (new_stmt);
6629 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6630 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6631 if (BLOCK_VARS (block))
6632 TREE_USED (block) = 1;
6634 gimple_bind_set_body (new_stmt, body);
6635 gimple_omp_set_body (stmt, NULL);
6636 gimple_omp_for_set_pre_body (stmt, NULL);
6639 /* Callback for walk_stmts. Check if the current statement only contains
6640 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6642 static tree
6643 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6644 bool *handled_ops_p,
6645 struct walk_stmt_info *wi)
6647 int *info = (int *) wi->info;
6648 gimple stmt = gsi_stmt (*gsi_p);
6650 *handled_ops_p = true;
6651 switch (gimple_code (stmt))
6653 WALK_SUBSTMTS;
6655 case GIMPLE_OMP_FOR:
6656 case GIMPLE_OMP_SECTIONS:
6657 *info = *info == 0 ? 1 : -1;
6658 break;
6659 default:
6660 *info = -1;
6661 break;
6663 return NULL;
6666 struct omp_taskcopy_context
6668 /* This field must be at the beginning, as we do "inheritance": Some
6669 callback functions for tree-inline.c (e.g., omp_copy_decl)
6670 receive a copy_body_data pointer that is up-casted to an
6671 omp_context pointer. */
6672 copy_body_data cb;
6673 omp_context *ctx;
6676 static tree
6677 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6679 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6681 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6682 return create_tmp_var (TREE_TYPE (var), NULL);
6684 return var;
6687 static tree
6688 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6690 tree name, new_fields = NULL, type, f;
6692 type = lang_hooks.types.make_type (RECORD_TYPE);
6693 name = DECL_NAME (TYPE_NAME (orig_type));
6694 name = build_decl (gimple_location (tcctx->ctx->stmt),
6695 TYPE_DECL, name, type);
6696 TYPE_NAME (type) = name;
6698 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6700 tree new_f = copy_node (f);
6701 DECL_CONTEXT (new_f) = type;
6702 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6703 TREE_CHAIN (new_f) = new_fields;
6704 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6705 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6706 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6707 &tcctx->cb, NULL);
6708 new_fields = new_f;
6709 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6711 TYPE_FIELDS (type) = nreverse (new_fields);
6712 layout_type (type);
6713 return type;
6716 /* Create task copyfn. */
6718 static void
6719 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6721 struct function *child_cfun;
6722 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6723 tree record_type, srecord_type, bind, list;
6724 bool record_needs_remap = false, srecord_needs_remap = false;
6725 splay_tree_node n;
6726 struct omp_taskcopy_context tcctx;
6727 struct gimplify_ctx gctx;
6728 location_t loc = gimple_location (task_stmt);
6730 child_fn = gimple_omp_task_copy_fn (task_stmt);
6731 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6732 gcc_assert (child_cfun->cfg == NULL);
6733 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6735 /* Reset DECL_CONTEXT on function arguments. */
6736 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6737 DECL_CONTEXT (t) = child_fn;
6739 /* Populate the function. */
6740 push_gimplify_context (&gctx);
6741 push_cfun (child_cfun);
6743 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6744 TREE_SIDE_EFFECTS (bind) = 1;
6745 list = NULL;
6746 DECL_SAVED_TREE (child_fn) = bind;
6747 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6749 /* Remap src and dst argument types if needed. */
6750 record_type = ctx->record_type;
6751 srecord_type = ctx->srecord_type;
6752 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6753 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6755 record_needs_remap = true;
6756 break;
6758 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6759 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6761 srecord_needs_remap = true;
6762 break;
6765 if (record_needs_remap || srecord_needs_remap)
6767 memset (&tcctx, '\0', sizeof (tcctx));
6768 tcctx.cb.src_fn = ctx->cb.src_fn;
6769 tcctx.cb.dst_fn = child_fn;
6770 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6771 gcc_checking_assert (tcctx.cb.src_node);
6772 tcctx.cb.dst_node = tcctx.cb.src_node;
6773 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6774 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6775 tcctx.cb.eh_lp_nr = 0;
6776 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6777 tcctx.cb.decl_map = pointer_map_create ();
6778 tcctx.ctx = ctx;
6780 if (record_needs_remap)
6781 record_type = task_copyfn_remap_type (&tcctx, record_type);
6782 if (srecord_needs_remap)
6783 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6785 else
6786 tcctx.cb.decl_map = NULL;
6788 arg = DECL_ARGUMENTS (child_fn);
6789 TREE_TYPE (arg) = build_pointer_type (record_type);
6790 sarg = DECL_CHAIN (arg);
6791 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6793 /* First pass: initialize temporaries used in record_type and srecord_type
6794 sizes and field offsets. */
6795 if (tcctx.cb.decl_map)
6796 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6797 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6799 tree *p;
6801 decl = OMP_CLAUSE_DECL (c);
6802 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6803 if (p == NULL)
6804 continue;
6805 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6806 sf = (tree) n->value;
6807 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6808 src = build_simple_mem_ref_loc (loc, sarg);
6809 src = omp_build_component_ref (src, sf);
6810 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6811 append_to_statement_list (t, &list);
6814 /* Second pass: copy shared var pointers and copy construct non-VLA
6815 firstprivate vars. */
6816 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6817 switch (OMP_CLAUSE_CODE (c))
6819 case OMP_CLAUSE_SHARED:
6820 decl = OMP_CLAUSE_DECL (c);
6821 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6822 if (n == NULL)
6823 break;
6824 f = (tree) n->value;
6825 if (tcctx.cb.decl_map)
6826 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6827 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6828 sf = (tree) n->value;
6829 if (tcctx.cb.decl_map)
6830 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6831 src = build_simple_mem_ref_loc (loc, sarg);
6832 src = omp_build_component_ref (src, sf);
6833 dst = build_simple_mem_ref_loc (loc, arg);
6834 dst = omp_build_component_ref (dst, f);
6835 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6836 append_to_statement_list (t, &list);
6837 break;
6838 case OMP_CLAUSE_FIRSTPRIVATE:
6839 decl = OMP_CLAUSE_DECL (c);
6840 if (is_variable_sized (decl))
6841 break;
6842 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6843 if (n == NULL)
6844 break;
6845 f = (tree) n->value;
6846 if (tcctx.cb.decl_map)
6847 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6848 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6849 if (n != NULL)
6851 sf = (tree) n->value;
6852 if (tcctx.cb.decl_map)
6853 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6854 src = build_simple_mem_ref_loc (loc, sarg);
6855 src = omp_build_component_ref (src, sf);
6856 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6857 src = build_simple_mem_ref_loc (loc, src);
6859 else
6860 src = decl;
6861 dst = build_simple_mem_ref_loc (loc, arg);
6862 dst = omp_build_component_ref (dst, f);
6863 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6864 append_to_statement_list (t, &list);
6865 break;
6866 case OMP_CLAUSE_PRIVATE:
6867 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6868 break;
6869 decl = OMP_CLAUSE_DECL (c);
6870 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6871 f = (tree) n->value;
6872 if (tcctx.cb.decl_map)
6873 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6874 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6875 if (n != NULL)
6877 sf = (tree) n->value;
6878 if (tcctx.cb.decl_map)
6879 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6880 src = build_simple_mem_ref_loc (loc, sarg);
6881 src = omp_build_component_ref (src, sf);
6882 if (use_pointer_for_field (decl, NULL))
6883 src = build_simple_mem_ref_loc (loc, src);
6885 else
6886 src = decl;
6887 dst = build_simple_mem_ref_loc (loc, arg);
6888 dst = omp_build_component_ref (dst, f);
6889 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6890 append_to_statement_list (t, &list);
6891 break;
6892 default:
6893 break;
6896 /* Last pass: handle VLA firstprivates. */
6897 if (tcctx.cb.decl_map)
6898 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6899 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6901 tree ind, ptr, df;
6903 decl = OMP_CLAUSE_DECL (c);
6904 if (!is_variable_sized (decl))
6905 continue;
6906 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6907 if (n == NULL)
6908 continue;
6909 f = (tree) n->value;
6910 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6911 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6912 ind = DECL_VALUE_EXPR (decl);
6913 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6914 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6915 n = splay_tree_lookup (ctx->sfield_map,
6916 (splay_tree_key) TREE_OPERAND (ind, 0));
6917 sf = (tree) n->value;
6918 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6919 src = build_simple_mem_ref_loc (loc, sarg);
6920 src = omp_build_component_ref (src, sf);
6921 src = build_simple_mem_ref_loc (loc, src);
6922 dst = build_simple_mem_ref_loc (loc, arg);
6923 dst = omp_build_component_ref (dst, f);
6924 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6925 append_to_statement_list (t, &list);
6926 n = splay_tree_lookup (ctx->field_map,
6927 (splay_tree_key) TREE_OPERAND (ind, 0));
6928 df = (tree) n->value;
6929 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6930 ptr = build_simple_mem_ref_loc (loc, arg);
6931 ptr = omp_build_component_ref (ptr, df);
6932 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6933 build_fold_addr_expr_loc (loc, dst));
6934 append_to_statement_list (t, &list);
6937 t = build1 (RETURN_EXPR, void_type_node, NULL);
6938 append_to_statement_list (t, &list);
6940 if (tcctx.cb.decl_map)
6941 pointer_map_destroy (tcctx.cb.decl_map);
6942 pop_gimplify_context (NULL);
6943 BIND_EXPR_BODY (bind) = list;
6944 pop_cfun ();
6947 /* Lower the OpenMP parallel or task directive in the current statement
6948 in GSI_P. CTX holds context information for the directive. */
6950 static void
6951 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6953 tree clauses;
6954 tree child_fn, t;
6955 gimple stmt = gsi_stmt (*gsi_p);
6956 gimple par_bind, bind;
6957 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6958 struct gimplify_ctx gctx;
6959 location_t loc = gimple_location (stmt);
6961 clauses = gimple_omp_taskreg_clauses (stmt);
6962 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6963 par_body = gimple_bind_body (par_bind);
6964 child_fn = ctx->cb.dst_fn;
6965 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6966 && !gimple_omp_parallel_combined_p (stmt))
6968 struct walk_stmt_info wi;
6969 int ws_num = 0;
6971 memset (&wi, 0, sizeof (wi));
6972 wi.info = &ws_num;
6973 wi.val_only = true;
6974 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6975 if (ws_num == 1)
6976 gimple_omp_parallel_set_combined_p (stmt, true);
6978 if (ctx->srecord_type)
6979 create_task_copyfn (stmt, ctx);
6981 push_gimplify_context (&gctx);
6983 par_olist = NULL;
6984 par_ilist = NULL;
6985 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6986 lower_omp (&par_body, ctx);
6987 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6988 lower_reduction_clauses (clauses, &par_olist, ctx);
6990 /* Declare all the variables created by mapping and the variables
6991 declared in the scope of the parallel body. */
6992 record_vars_into (ctx->block_vars, child_fn);
6993 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6995 if (ctx->record_type)
6997 ctx->sender_decl
6998 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6999 : ctx->record_type, ".omp_data_o");
7000 DECL_NAMELESS (ctx->sender_decl) = 1;
7001 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
7002 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
7005 olist = NULL;
7006 ilist = NULL;
7007 lower_send_clauses (clauses, &ilist, &olist, ctx);
7008 lower_send_shared_vars (&ilist, &olist, ctx);
7010 /* Once all the expansions are done, sequence all the different
7011 fragments inside gimple_omp_body. */
7013 new_body = NULL;
7015 if (ctx->record_type)
7017 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7018 /* fixup_child_record_type might have changed receiver_decl's type. */
7019 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
7020 gimple_seq_add_stmt (&new_body,
7021 gimple_build_assign (ctx->receiver_decl, t));
7024 gimple_seq_add_seq (&new_body, par_ilist);
7025 gimple_seq_add_seq (&new_body, par_body);
7026 gimple_seq_add_seq (&new_body, par_olist);
7027 new_body = maybe_catch_exception (new_body);
7028 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7029 gimple_omp_set_body (stmt, new_body);
7031 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
7032 gsi_replace (gsi_p, bind, true);
7033 gimple_bind_add_seq (bind, ilist);
7034 gimple_bind_add_stmt (bind, stmt);
7035 gimple_bind_add_seq (bind, olist);
7037 pop_gimplify_context (NULL);
7040 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
7041 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7042 of OpenMP context, but with task_shared_vars set. */
7044 static tree
7045 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7046 void *data)
7048 tree t = *tp;
7050 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7051 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
7052 return t;
7054 if (task_shared_vars
7055 && DECL_P (t)
7056 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
7057 return t;
7059 /* If a global variable has been privatized, TREE_CONSTANT on
7060 ADDR_EXPR might be wrong. */
7061 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
7062 recompute_tree_invariant_for_addr_expr (t);
7064 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7065 return NULL_TREE;
7068 static void
7069 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7071 gimple stmt = gsi_stmt (*gsi_p);
7072 struct walk_stmt_info wi;
7074 if (gimple_has_location (stmt))
7075 input_location = gimple_location (stmt);
7077 if (task_shared_vars)
7078 memset (&wi, '\0', sizeof (wi));
7080 /* If we have issued syntax errors, avoid doing any heavy lifting.
7081 Just replace the OpenMP directives with a NOP to avoid
7082 confusing RTL expansion. */
7083 if (seen_error () && is_gimple_omp (stmt))
7085 gsi_replace (gsi_p, gimple_build_nop (), true);
7086 return;
7089 switch (gimple_code (stmt))
7091 case GIMPLE_COND:
7092 if ((ctx || task_shared_vars)
7093 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7094 ctx ? NULL : &wi, NULL)
7095 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7096 ctx ? NULL : &wi, NULL)))
7097 gimple_regimplify_operands (stmt, gsi_p);
7098 break;
7099 case GIMPLE_CATCH:
7100 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
7101 break;
7102 case GIMPLE_EH_FILTER:
7103 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
7104 break;
7105 case GIMPLE_TRY:
7106 lower_omp (gimple_try_eval_ptr (stmt), ctx);
7107 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
7108 break;
7109 case GIMPLE_TRANSACTION:
7110 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7111 break;
7112 case GIMPLE_BIND:
7113 lower_omp (gimple_bind_body_ptr (stmt), ctx);
7114 break;
7115 case GIMPLE_OMP_PARALLEL:
7116 case GIMPLE_OMP_TASK:
7117 ctx = maybe_lookup_ctx (stmt);
7118 lower_omp_taskreg (gsi_p, ctx);
7119 break;
7120 case GIMPLE_OMP_FOR:
7121 ctx = maybe_lookup_ctx (stmt);
7122 gcc_assert (ctx);
7123 lower_omp_for (gsi_p, ctx);
7124 break;
7125 case GIMPLE_OMP_SECTIONS:
7126 ctx = maybe_lookup_ctx (stmt);
7127 gcc_assert (ctx);
7128 lower_omp_sections (gsi_p, ctx);
7129 break;
7130 case GIMPLE_OMP_SINGLE:
7131 ctx = maybe_lookup_ctx (stmt);
7132 gcc_assert (ctx);
7133 lower_omp_single (gsi_p, ctx);
7134 break;
7135 case GIMPLE_OMP_MASTER:
7136 ctx = maybe_lookup_ctx (stmt);
7137 gcc_assert (ctx);
7138 lower_omp_master (gsi_p, ctx);
7139 break;
7140 case GIMPLE_OMP_ORDERED:
7141 ctx = maybe_lookup_ctx (stmt);
7142 gcc_assert (ctx);
7143 lower_omp_ordered (gsi_p, ctx);
7144 break;
7145 case GIMPLE_OMP_CRITICAL:
7146 ctx = maybe_lookup_ctx (stmt);
7147 gcc_assert (ctx);
7148 lower_omp_critical (gsi_p, ctx);
7149 break;
7150 case GIMPLE_OMP_ATOMIC_LOAD:
7151 if ((ctx || task_shared_vars)
7152 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7153 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7154 gimple_regimplify_operands (stmt, gsi_p);
7155 break;
7156 default:
7157 if ((ctx || task_shared_vars)
7158 && walk_gimple_op (stmt, lower_omp_regimplify_p,
7159 ctx ? NULL : &wi))
7160 gimple_regimplify_operands (stmt, gsi_p);
7161 break;
7165 static void
7166 lower_omp (gimple_seq *body, omp_context *ctx)
7168 location_t saved_location = input_location;
7169 gimple_stmt_iterator gsi;
7170 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
7171 lower_omp_1 (&gsi, ctx);
7172 input_location = saved_location;
7175 /* Main entry point. */
7177 static unsigned int
7178 execute_lower_omp (void)
7180 gimple_seq body;
7182 /* This pass always runs, to provide PROP_gimple_lomp.
7183 But there is nothing to do unless -fopenmp is given. */
7184 if (flag_openmp == 0)
7185 return 0;
7187 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
7188 delete_omp_context);
7190 body = gimple_body (current_function_decl);
7191 scan_omp (&body, NULL);
7192 gcc_assert (taskreg_nesting_level == 0);
7194 if (all_contexts->root)
7196 struct gimplify_ctx gctx;
7198 if (task_shared_vars)
7199 push_gimplify_context (&gctx);
7200 lower_omp (&body, NULL);
7201 if (task_shared_vars)
7202 pop_gimplify_context (NULL);
7205 if (all_contexts)
7207 splay_tree_delete (all_contexts);
7208 all_contexts = NULL;
7210 BITMAP_FREE (task_shared_vars);
7211 return 0;
7214 namespace {
7216 const pass_data pass_data_lower_omp =
7218 GIMPLE_PASS, /* type */
7219 "omplower", /* name */
7220 OPTGROUP_NONE, /* optinfo_flags */
7221 false, /* has_gate */
7222 true, /* has_execute */
7223 TV_NONE, /* tv_id */
7224 PROP_gimple_any, /* properties_required */
7225 PROP_gimple_lomp, /* properties_provided */
7226 0, /* properties_destroyed */
7227 0, /* todo_flags_start */
7228 0, /* todo_flags_finish */
7231 class pass_lower_omp : public gimple_opt_pass
7233 public:
7234 pass_lower_omp(gcc::context *ctxt)
7235 : gimple_opt_pass(pass_data_lower_omp, ctxt)
7238 /* opt_pass methods: */
7239 unsigned int execute () { return execute_lower_omp (); }
7241 }; // class pass_lower_omp
7243 } // anon namespace
7245 gimple_opt_pass *
7246 make_pass_lower_omp (gcc::context *ctxt)
7248 return new pass_lower_omp (ctxt);
7251 /* The following is a utility to diagnose OpenMP structured block violations.
7252 It is not part of the "omplower" pass, as that's invoked too late. It
7253 should be invoked by the respective front ends after gimplification. */
7255 static splay_tree all_labels;
7257 /* Check for mismatched contexts and generate an error if needed. Return
7258 true if an error is detected. */
7260 static bool
7261 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
7262 gimple branch_ctx, gimple label_ctx)
7264 if (label_ctx == branch_ctx)
7265 return false;
7269 Previously we kept track of the label's entire context in diagnose_sb_[12]
7270 so we could traverse it and issue a correct "exit" or "enter" error
7271 message upon a structured block violation.
7273 We built the context by building a list with tree_cons'ing, but there is
7274 no easy counterpart in gimple tuples. It seems like far too much work
7275 for issuing exit/enter error messages. If someone really misses the
7276 distinct error message... patches welcome.
7279 #if 0
7280 /* Try to avoid confusing the user by producing and error message
7281 with correct "exit" or "enter" verbiage. We prefer "exit"
7282 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7283 if (branch_ctx == NULL)
7284 exit_p = false;
7285 else
7287 while (label_ctx)
7289 if (TREE_VALUE (label_ctx) == branch_ctx)
7291 exit_p = false;
7292 break;
7294 label_ctx = TREE_CHAIN (label_ctx);
7298 if (exit_p)
7299 error ("invalid exit from OpenMP structured block");
7300 else
7301 error ("invalid entry to OpenMP structured block");
7302 #endif
7304 /* If it's obvious we have an invalid entry, be specific about the error. */
7305 if (branch_ctx == NULL)
7306 error ("invalid entry to OpenMP structured block");
7307 else
7308 /* Otherwise, be vague and lazy, but efficient. */
7309 error ("invalid branch to/from an OpenMP structured block");
7311 gsi_replace (gsi_p, gimple_build_nop (), false);
7312 return true;
7315 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7316 where each label is found. */
7318 static tree
7319 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7320 struct walk_stmt_info *wi)
7322 gimple context = (gimple) wi->info;
7323 gimple inner_context;
7324 gimple stmt = gsi_stmt (*gsi_p);
7326 *handled_ops_p = true;
7328 switch (gimple_code (stmt))
7330 WALK_SUBSTMTS;
7332 case GIMPLE_OMP_PARALLEL:
7333 case GIMPLE_OMP_TASK:
7334 case GIMPLE_OMP_SECTIONS:
7335 case GIMPLE_OMP_SINGLE:
7336 case GIMPLE_OMP_SECTION:
7337 case GIMPLE_OMP_MASTER:
7338 case GIMPLE_OMP_ORDERED:
7339 case GIMPLE_OMP_CRITICAL:
7340 /* The minimal context here is just the current OMP construct. */
7341 inner_context = stmt;
7342 wi->info = inner_context;
7343 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7344 wi->info = context;
7345 break;
7347 case GIMPLE_OMP_FOR:
7348 inner_context = stmt;
7349 wi->info = inner_context;
7350 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7351 walk them. */
7352 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7353 diagnose_sb_1, NULL, wi);
7354 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7355 wi->info = context;
7356 break;
7358 case GIMPLE_LABEL:
7359 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7360 (splay_tree_value) context);
7361 break;
7363 default:
7364 break;
7367 return NULL_TREE;
7370 /* Pass 2: Check each branch and see if its context differs from that of
7371 the destination label's context. */
7373 static tree
7374 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7375 struct walk_stmt_info *wi)
7377 gimple context = (gimple) wi->info;
7378 splay_tree_node n;
7379 gimple stmt = gsi_stmt (*gsi_p);
7381 *handled_ops_p = true;
7383 switch (gimple_code (stmt))
7385 WALK_SUBSTMTS;
7387 case GIMPLE_OMP_PARALLEL:
7388 case GIMPLE_OMP_TASK:
7389 case GIMPLE_OMP_SECTIONS:
7390 case GIMPLE_OMP_SINGLE:
7391 case GIMPLE_OMP_SECTION:
7392 case GIMPLE_OMP_MASTER:
7393 case GIMPLE_OMP_ORDERED:
7394 case GIMPLE_OMP_CRITICAL:
7395 wi->info = stmt;
7396 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7397 wi->info = context;
7398 break;
7400 case GIMPLE_OMP_FOR:
7401 wi->info = stmt;
7402 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7403 walk them. */
7404 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7405 diagnose_sb_2, NULL, wi);
7406 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7407 wi->info = context;
7408 break;
7410 case GIMPLE_COND:
7412 tree lab = gimple_cond_true_label (stmt);
7413 if (lab)
7415 n = splay_tree_lookup (all_labels,
7416 (splay_tree_key) lab);
7417 diagnose_sb_0 (gsi_p, context,
7418 n ? (gimple) n->value : NULL);
7420 lab = gimple_cond_false_label (stmt);
7421 if (lab)
7423 n = splay_tree_lookup (all_labels,
7424 (splay_tree_key) lab);
7425 diagnose_sb_0 (gsi_p, context,
7426 n ? (gimple) n->value : NULL);
7429 break;
7431 case GIMPLE_GOTO:
7433 tree lab = gimple_goto_dest (stmt);
7434 if (TREE_CODE (lab) != LABEL_DECL)
7435 break;
7437 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7438 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7440 break;
7442 case GIMPLE_SWITCH:
7444 unsigned int i;
7445 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7447 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7448 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7449 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7450 break;
7453 break;
7455 case GIMPLE_RETURN:
7456 diagnose_sb_0 (gsi_p, context, NULL);
7457 break;
7459 default:
7460 break;
7463 return NULL_TREE;
7466 static unsigned int
7467 diagnose_omp_structured_block_errors (void)
7469 struct walk_stmt_info wi;
7470 gimple_seq body = gimple_body (current_function_decl);
7472 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7474 memset (&wi, 0, sizeof (wi));
7475 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7477 memset (&wi, 0, sizeof (wi));
7478 wi.want_locations = true;
7479 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7481 gimple_set_body (current_function_decl, body);
7483 splay_tree_delete (all_labels);
7484 all_labels = NULL;
7486 return 0;
7489 static bool
7490 gate_diagnose_omp_blocks (void)
7492 return flag_openmp != 0;
7495 namespace {
7497 const pass_data pass_data_diagnose_omp_blocks =
7499 GIMPLE_PASS, /* type */
7500 "*diagnose_omp_blocks", /* name */
7501 OPTGROUP_NONE, /* optinfo_flags */
7502 true, /* has_gate */
7503 true, /* has_execute */
7504 TV_NONE, /* tv_id */
7505 PROP_gimple_any, /* properties_required */
7506 0, /* properties_provided */
7507 0, /* properties_destroyed */
7508 0, /* todo_flags_start */
7509 0, /* todo_flags_finish */
7512 class pass_diagnose_omp_blocks : public gimple_opt_pass
7514 public:
7515 pass_diagnose_omp_blocks(gcc::context *ctxt)
7516 : gimple_opt_pass(pass_data_diagnose_omp_blocks, ctxt)
7519 /* opt_pass methods: */
7520 bool gate () { return gate_diagnose_omp_blocks (); }
7521 unsigned int execute () {
7522 return diagnose_omp_structured_block_errors ();
7525 }; // class pass_diagnose_omp_blocks
7527 } // anon namespace
7529 gimple_opt_pass *
7530 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
7532 return new pass_diagnose_omp_blocks (ctxt);
7535 #include "gt-omp-low.h"