* include/ext/array_allocator.h: Replace uses of
[official-gcc.git] / gcc / omp-low.c
blob54997072df4bbe3faa17bb286136b593bb5226b6
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
53 expressions.
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
62 typedef struct omp_context
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
72 gimple stmt;
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104 } omp_context;
107 struct omp_for_data_loop
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
113 /* A structure describing the main elements of a parallel loop. */
115 struct omp_for_data
117 struct omp_for_data_loop loop;
118 tree chunk_size;
119 gimple for_stmt;
120 tree pre, iter_type;
121 int collapse;
122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
124 struct omp_for_data_loop *loops;
128 static splay_tree all_contexts;
129 static int taskreg_nesting_level;
130 struct omp_region *root_omp_region;
131 static bitmap task_shared_vars;
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
136 #define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 default:
261 break;
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
314 else
315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
319 break;
320 case GE_EXPR:
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
323 else
324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
328 break;
329 default:
330 gcc_unreachable ();
333 t = gimple_omp_for_incr (for_stmt, i);
334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
337 case PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case POINTER_PLUS_EXPR:
341 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
342 break;
343 case MINUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 loop->step = fold_build1_loc (loc,
346 NEGATE_EXPR, TREE_TYPE (loop->step),
347 loop->step);
348 break;
349 default:
350 gcc_unreachable ();
353 if (iter_type != long_long_unsigned_type_node)
355 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
356 iter_type = long_long_unsigned_type_node;
357 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
358 && TYPE_PRECISION (TREE_TYPE (loop->v))
359 >= TYPE_PRECISION (iter_type))
361 tree n;
363 if (loop->cond_code == LT_EXPR)
364 n = fold_build2_loc (loc,
365 PLUS_EXPR, TREE_TYPE (loop->v),
366 loop->n2, loop->step);
367 else
368 n = loop->n1;
369 if (TREE_CODE (n) != INTEGER_CST
370 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
371 iter_type = long_long_unsigned_type_node;
373 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
374 > TYPE_PRECISION (iter_type))
376 tree n1, n2;
378 if (loop->cond_code == LT_EXPR)
380 n1 = loop->n1;
381 n2 = fold_build2_loc (loc,
382 PLUS_EXPR, TREE_TYPE (loop->v),
383 loop->n2, loop->step);
385 else
387 n1 = fold_build2_loc (loc,
388 MINUS_EXPR, TREE_TYPE (loop->v),
389 loop->n2, loop->step);
390 n2 = loop->n1;
392 if (TREE_CODE (n1) != INTEGER_CST
393 || TREE_CODE (n2) != INTEGER_CST
394 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
395 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
396 iter_type = long_long_unsigned_type_node;
400 if (collapse_count && *collapse_count == NULL)
402 if ((i == 0 || count != NULL_TREE)
403 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
404 && TREE_CONSTANT (loop->n1)
405 && TREE_CONSTANT (loop->n2)
406 && TREE_CODE (loop->step) == INTEGER_CST)
408 tree itype = TREE_TYPE (loop->v);
410 if (POINTER_TYPE_P (itype))
411 itype = signed_type_for (itype);
412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
426 else
427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
430 if (count != NULL_TREE)
431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
439 else
440 count = NULL_TREE;
444 if (count)
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
458 if (count)
459 *collapse_count = fold_convert_loc (loc, iter_type, count);
460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
464 if (fd->collapse > 1)
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
475 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
489 Is lowered into:
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
517 static bool
518 workshare_safe_to_combine_p (basic_block ws_entry_bb)
520 struct omp_for_data fd;
521 gimple ws_stmt = last_stmt (ws_entry_bb);
523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
524 return true;
526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
528 extract_omp_for_data (ws_stmt, &fd, NULL);
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
546 return true;
550 /* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
554 static vec<tree, va_gc> *
555 get_ws_args_for (gimple ws_stmt)
557 tree t;
558 location_t loc = gimple_location (ws_stmt);
559 vec<tree, va_gc> *ws_args;
561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
563 struct omp_for_data fd;
565 extract_omp_for_data (ws_stmt, &fd, NULL);
567 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 ws_args->quick_push (t);
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
573 ws_args->quick_push (t);
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 ws_args->quick_push (t);
578 if (fd.chunk_size)
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 ws_args->quick_push (t);
584 return ws_args;
586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
588 /* Number of sections is equal to the number of edges from the
589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
593 vec_alloc (ws_args, 1);
594 ws_args->quick_push (t);
595 return ws_args;
598 gcc_unreachable ();
602 /* Discover whether REGION is a combined parallel+workshare region. */
604 static void
605 determine_parallel_type (struct omp_region *region)
607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
610 if (region == NULL || region->inner == NULL
611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
613 return;
615 /* We only support parallel+for and parallel+sections. */
616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
619 return;
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
630 && workshare_safe_to_combine_p (ws_entry_bb)
631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
635 gimple ws_stmt = last_stmt (ws_entry_bb);
637 if (region->inner->type == GIMPLE_OMP_FOR)
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
648 tree clauses = gimple_omp_for_clauses (ws_stmt);
649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
662 region->ws_args = get_ws_args_for (ws_stmt);
667 /* Return true if EXPR is variable sized. */
669 static inline bool
670 is_variable_sized (const_tree expr)
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675 /* Return true if DECL is a reference type. */
677 static inline bool
678 is_reference (tree decl)
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
683 /* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
687 static inline tree
688 lookup_decl (tree var, omp_context *ctx)
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
695 static inline tree
696 maybe_lookup_decl (const_tree var, omp_context *ctx)
698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
703 static inline tree
704 lookup_field (tree var, omp_context *ctx)
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
711 static inline tree
712 lookup_sfield (tree var, omp_context *ctx)
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
721 static inline tree
722 maybe_lookup_field (tree var, omp_context *ctx)
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
729 /* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
732 static bool
733 use_pointer_for_field (tree decl, omp_context *shared_ctx)
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
738 /* We can only use copy-in/copy-out semantics for shared variables
739 when we know the value is not accessible from an outer scope. */
740 if (shared_ctx)
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
754 return true;
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
768 omp_context *up;
770 for (up = shared_ctx->outer; up; up = up->outer)
771 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
772 break;
774 if (up)
776 tree c;
778 for (c = gimple_omp_taskreg_clauses (up->stmt);
779 c; c = OMP_CLAUSE_CHAIN (c))
780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c) == decl)
782 break;
784 if (c)
785 goto maybe_mark_addressable_and_ret;
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
795 tree outer;
796 maybe_mark_addressable_and_ret:
797 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
808 return true;
812 return false;
815 /* Create a new VAR_DECL and copy information from VAR to it. */
817 tree
818 copy_var_decl (tree var, tree name, tree type)
820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828 TREE_USED (copy) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831 return copy;
834 /* Construct a new automatic decl similar to VAR. */
836 static tree
837 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 tree copy = copy_var_decl (var, name, type);
841 DECL_CONTEXT (copy) = current_function_decl;
842 DECL_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
845 return copy;
848 static tree
849 omp_copy_decl_1 (tree var, omp_context *ctx)
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
854 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
855 as appropriate. */
856 static tree
857 omp_build_component_ref (tree obj, tree field)
859 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
860 if (TREE_THIS_VOLATILE (field))
861 TREE_THIS_VOLATILE (ret) |= 1;
862 if (TREE_READONLY (field))
863 TREE_READONLY (ret) |= 1;
864 return ret;
867 /* Build tree nodes to access the field for VAR on the receiver side. */
869 static tree
870 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
872 tree x, field = lookup_field (var, ctx);
874 /* If the receiver record type was remapped in the child function,
875 remap the field into the new record type. */
876 x = maybe_lookup_field (field, ctx);
877 if (x != NULL)
878 field = x;
880 x = build_simple_mem_ref (ctx->receiver_decl);
881 x = omp_build_component_ref (x, field);
882 if (by_ref)
883 x = build_simple_mem_ref (x);
885 return x;
888 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
889 of a parallel, this is a component reference; for workshare constructs
890 this is some variable. */
892 static tree
893 build_outer_var_ref (tree var, omp_context *ctx)
895 tree x;
897 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
898 x = var;
899 else if (is_variable_sized (var))
901 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
902 x = build_outer_var_ref (x, ctx);
903 x = build_simple_mem_ref (x);
905 else if (is_taskreg_ctx (ctx))
907 bool by_ref = use_pointer_for_field (var, NULL);
908 x = build_receiver_ref (var, by_ref, ctx);
910 else if (ctx->outer)
911 x = lookup_decl (var, ctx->outer);
912 else if (is_reference (var))
913 /* This can happen with orphaned constructs. If var is reference, it is
914 possible it is shared and as such valid. */
915 x = var;
916 else
917 gcc_unreachable ();
919 if (is_reference (var))
920 x = build_simple_mem_ref (x);
922 return x;
925 /* Build tree nodes to access the field for VAR on the sender side. */
927 static tree
928 build_sender_ref (tree var, omp_context *ctx)
930 tree field = lookup_sfield (var, ctx);
931 return omp_build_component_ref (ctx->sender_decl, field);
934 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
936 static void
937 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
939 tree field, type, sfield = NULL_TREE;
941 gcc_assert ((mask & 1) == 0
942 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
943 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
944 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
946 type = TREE_TYPE (var);
947 if (by_ref)
948 type = build_pointer_type (type);
949 else if ((mask & 3) == 1 && is_reference (var))
950 type = TREE_TYPE (type);
952 field = build_decl (DECL_SOURCE_LOCATION (var),
953 FIELD_DECL, DECL_NAME (var), type);
955 /* Remember what variable this field was created for. This does have a
956 side effect of making dwarf2out ignore this member, so for helpful
957 debugging we clear it later in delete_omp_context. */
958 DECL_ABSTRACT_ORIGIN (field) = var;
959 if (type == TREE_TYPE (var))
961 DECL_ALIGN (field) = DECL_ALIGN (var);
962 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
963 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
965 else
966 DECL_ALIGN (field) = TYPE_ALIGN (type);
968 if ((mask & 3) == 3)
970 insert_field_into_struct (ctx->record_type, field);
971 if (ctx->srecord_type)
973 sfield = build_decl (DECL_SOURCE_LOCATION (var),
974 FIELD_DECL, DECL_NAME (var), type);
975 DECL_ABSTRACT_ORIGIN (sfield) = var;
976 DECL_ALIGN (sfield) = DECL_ALIGN (field);
977 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
978 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
979 insert_field_into_struct (ctx->srecord_type, sfield);
982 else
984 if (ctx->srecord_type == NULL_TREE)
986 tree t;
988 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
989 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
990 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
992 sfield = build_decl (DECL_SOURCE_LOCATION (var),
993 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
994 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
995 insert_field_into_struct (ctx->srecord_type, sfield);
996 splay_tree_insert (ctx->sfield_map,
997 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
998 (splay_tree_value) sfield);
1001 sfield = field;
1002 insert_field_into_struct ((mask & 1) ? ctx->record_type
1003 : ctx->srecord_type, field);
1006 if (mask & 1)
1007 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1008 (splay_tree_value) field);
1009 if ((mask & 2) && ctx->sfield_map)
1010 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1011 (splay_tree_value) sfield);
1014 static tree
1015 install_var_local (tree var, omp_context *ctx)
1017 tree new_var = omp_copy_decl_1 (var, ctx);
1018 insert_decl_map (&ctx->cb, var, new_var);
1019 return new_var;
1022 /* Adjust the replacement for DECL in CTX for the new context. This means
1023 copying the DECL_VALUE_EXPR, and fixing up the type. */
1025 static void
1026 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1028 tree new_decl, size;
1030 new_decl = lookup_decl (decl, ctx);
1032 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1034 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1035 && DECL_HAS_VALUE_EXPR_P (decl))
1037 tree ve = DECL_VALUE_EXPR (decl);
1038 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1039 SET_DECL_VALUE_EXPR (new_decl, ve);
1040 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1043 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1045 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1046 if (size == error_mark_node)
1047 size = TYPE_SIZE (TREE_TYPE (new_decl));
1048 DECL_SIZE (new_decl) = size;
1050 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1051 if (size == error_mark_node)
1052 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1053 DECL_SIZE_UNIT (new_decl) = size;
1057 /* The callback for remap_decl. Search all containing contexts for a
1058 mapping of the variable; this avoids having to duplicate the splay
1059 tree ahead of time. We know a mapping doesn't already exist in the
1060 given context. Create new mappings to implement default semantics. */
1062 static tree
1063 omp_copy_decl (tree var, copy_body_data *cb)
1065 omp_context *ctx = (omp_context *) cb;
1066 tree new_var;
1068 if (TREE_CODE (var) == LABEL_DECL)
1070 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1071 DECL_CONTEXT (new_var) = current_function_decl;
1072 insert_decl_map (&ctx->cb, var, new_var);
1073 return new_var;
1076 while (!is_taskreg_ctx (ctx))
1078 ctx = ctx->outer;
1079 if (ctx == NULL)
1080 return var;
1081 new_var = maybe_lookup_decl (var, ctx);
1082 if (new_var)
1083 return new_var;
1086 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1087 return var;
1089 return error_mark_node;
1093 /* Return the parallel region associated with STMT. */
1095 /* Debugging dumps for parallel regions. */
1096 void dump_omp_region (FILE *, struct omp_region *, int);
1097 void debug_omp_region (struct omp_region *);
1098 void debug_all_omp_regions (void);
1100 /* Dump the parallel region tree rooted at REGION. */
1102 void
1103 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1105 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1106 gimple_code_name[region->type]);
1108 if (region->inner)
1109 dump_omp_region (file, region->inner, indent + 4);
1111 if (region->cont)
1113 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1114 region->cont->index);
1117 if (region->exit)
1118 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1119 region->exit->index);
1120 else
1121 fprintf (file, "%*s[no exit marker]\n", indent, "");
1123 if (region->next)
1124 dump_omp_region (file, region->next, indent);
1127 DEBUG_FUNCTION void
1128 debug_omp_region (struct omp_region *region)
1130 dump_omp_region (stderr, region, 0);
1133 DEBUG_FUNCTION void
1134 debug_all_omp_regions (void)
1136 dump_omp_region (stderr, root_omp_region, 0);
1140 /* Create a new parallel region starting at STMT inside region PARENT. */
1142 struct omp_region *
1143 new_omp_region (basic_block bb, enum gimple_code type,
1144 struct omp_region *parent)
1146 struct omp_region *region = XCNEW (struct omp_region);
1148 region->outer = parent;
1149 region->entry = bb;
1150 region->type = type;
1152 if (parent)
1154 /* This is a nested region. Add it to the list of inner
1155 regions in PARENT. */
1156 region->next = parent->inner;
1157 parent->inner = region;
1159 else
1161 /* This is a toplevel region. Add it to the list of toplevel
1162 regions in ROOT_OMP_REGION. */
1163 region->next = root_omp_region;
1164 root_omp_region = region;
1167 return region;
1170 /* Release the memory associated with the region tree rooted at REGION. */
1172 static void
1173 free_omp_region_1 (struct omp_region *region)
1175 struct omp_region *i, *n;
1177 for (i = region->inner; i ; i = n)
1179 n = i->next;
1180 free_omp_region_1 (i);
1183 free (region);
1186 /* Release the memory for the entire omp region tree. */
1188 void
1189 free_omp_regions (void)
1191 struct omp_region *r, *n;
1192 for (r = root_omp_region; r ; r = n)
1194 n = r->next;
1195 free_omp_region_1 (r);
1197 root_omp_region = NULL;
1201 /* Create a new context, with OUTER_CTX being the surrounding context. */
1203 static omp_context *
1204 new_omp_context (gimple stmt, omp_context *outer_ctx)
1206 omp_context *ctx = XCNEW (omp_context);
1208 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1209 (splay_tree_value) ctx);
1210 ctx->stmt = stmt;
1212 if (outer_ctx)
1214 ctx->outer = outer_ctx;
1215 ctx->cb = outer_ctx->cb;
1216 ctx->cb.block = NULL;
1217 ctx->depth = outer_ctx->depth + 1;
1219 else
1221 ctx->cb.src_fn = current_function_decl;
1222 ctx->cb.dst_fn = current_function_decl;
1223 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1224 gcc_checking_assert (ctx->cb.src_node);
1225 ctx->cb.dst_node = ctx->cb.src_node;
1226 ctx->cb.src_cfun = cfun;
1227 ctx->cb.copy_decl = omp_copy_decl;
1228 ctx->cb.eh_lp_nr = 0;
1229 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1230 ctx->depth = 1;
1233 ctx->cb.decl_map = pointer_map_create ();
1235 return ctx;
1238 static gimple_seq maybe_catch_exception (gimple_seq);
1240 /* Finalize task copyfn. */
1242 static void
1243 finalize_task_copyfn (gimple task_stmt)
1245 struct function *child_cfun;
1246 tree child_fn;
1247 gimple_seq seq = NULL, new_seq;
1248 gimple bind;
1250 child_fn = gimple_omp_task_copy_fn (task_stmt);
1251 if (child_fn == NULL_TREE)
1252 return;
1254 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1256 /* Inform the callgraph about the new function. */
1257 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1258 = cfun->curr_properties & ~PROP_loops;
1260 push_cfun (child_cfun);
1261 bind = gimplify_body (child_fn, false);
1262 gimple_seq_add_stmt (&seq, bind);
1263 new_seq = maybe_catch_exception (seq);
1264 if (new_seq != seq)
1266 bind = gimple_build_bind (NULL, new_seq, NULL);
1267 seq = NULL;
1268 gimple_seq_add_stmt (&seq, bind);
1270 gimple_set_body (child_fn, seq);
1271 pop_cfun ();
1273 cgraph_add_new_function (child_fn, false);
1276 /* Destroy a omp_context data structures. Called through the splay tree
1277 value delete callback. */
1279 static void
1280 delete_omp_context (splay_tree_value value)
1282 omp_context *ctx = (omp_context *) value;
1284 pointer_map_destroy (ctx->cb.decl_map);
1286 if (ctx->field_map)
1287 splay_tree_delete (ctx->field_map);
1288 if (ctx->sfield_map)
1289 splay_tree_delete (ctx->sfield_map);
1291 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1292 it produces corrupt debug information. */
1293 if (ctx->record_type)
1295 tree t;
1296 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1297 DECL_ABSTRACT_ORIGIN (t) = NULL;
1299 if (ctx->srecord_type)
1301 tree t;
1302 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1303 DECL_ABSTRACT_ORIGIN (t) = NULL;
1306 if (is_task_ctx (ctx))
1307 finalize_task_copyfn (ctx->stmt);
1309 XDELETE (ctx);
1312 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1313 context. */
1315 static void
1316 fixup_child_record_type (omp_context *ctx)
1318 tree f, type = ctx->record_type;
1320 /* ??? It isn't sufficient to just call remap_type here, because
1321 variably_modified_type_p doesn't work the way we expect for
1322 record types. Testing each field for whether it needs remapping
1323 and creating a new record by hand works, however. */
1324 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1325 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1326 break;
1327 if (f)
1329 tree name, new_fields = NULL;
1331 type = lang_hooks.types.make_type (RECORD_TYPE);
1332 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1333 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1334 TYPE_DECL, name, type);
1335 TYPE_NAME (type) = name;
1337 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1339 tree new_f = copy_node (f);
1340 DECL_CONTEXT (new_f) = type;
1341 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1342 DECL_CHAIN (new_f) = new_fields;
1343 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1344 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1345 &ctx->cb, NULL);
1346 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1347 &ctx->cb, NULL);
1348 new_fields = new_f;
1350 /* Arrange to be able to look up the receiver field
1351 given the sender field. */
1352 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1353 (splay_tree_value) new_f);
1355 TYPE_FIELDS (type) = nreverse (new_fields);
1356 layout_type (type);
1359 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1362 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1363 specified by CLAUSES. */
1365 static void
1366 scan_sharing_clauses (tree clauses, omp_context *ctx)
1368 tree c, decl;
1369 bool scan_array_reductions = false;
1371 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1373 bool by_ref;
1375 switch (OMP_CLAUSE_CODE (c))
1377 case OMP_CLAUSE_PRIVATE:
1378 decl = OMP_CLAUSE_DECL (c);
1379 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1380 goto do_private;
1381 else if (!is_variable_sized (decl))
1382 install_var_local (decl, ctx);
1383 break;
1385 case OMP_CLAUSE_SHARED:
1386 gcc_assert (is_taskreg_ctx (ctx));
1387 decl = OMP_CLAUSE_DECL (c);
1388 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1389 || !is_variable_sized (decl));
1390 /* Global variables don't need to be copied,
1391 the receiver side will use them directly. */
1392 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1393 break;
1394 by_ref = use_pointer_for_field (decl, ctx);
1395 if (! TREE_READONLY (decl)
1396 || TREE_ADDRESSABLE (decl)
1397 || by_ref
1398 || is_reference (decl))
1400 install_var_field (decl, by_ref, 3, ctx);
1401 install_var_local (decl, ctx);
1402 break;
1404 /* We don't need to copy const scalar vars back. */
1405 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1406 goto do_private;
1408 case OMP_CLAUSE_LASTPRIVATE:
1409 /* Let the corresponding firstprivate clause create
1410 the variable. */
1411 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1412 break;
1413 /* FALLTHRU */
1415 case OMP_CLAUSE_FIRSTPRIVATE:
1416 case OMP_CLAUSE_REDUCTION:
1417 decl = OMP_CLAUSE_DECL (c);
1418 do_private:
1419 if (is_variable_sized (decl))
1421 if (is_task_ctx (ctx))
1422 install_var_field (decl, false, 1, ctx);
1423 break;
1425 else if (is_taskreg_ctx (ctx))
1427 bool global
1428 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1429 by_ref = use_pointer_for_field (decl, NULL);
1431 if (is_task_ctx (ctx)
1432 && (global || by_ref || is_reference (decl)))
1434 install_var_field (decl, false, 1, ctx);
1435 if (!global)
1436 install_var_field (decl, by_ref, 2, ctx);
1438 else if (!global)
1439 install_var_field (decl, by_ref, 3, ctx);
1441 install_var_local (decl, ctx);
1442 break;
1444 case OMP_CLAUSE_COPYPRIVATE:
1445 case OMP_CLAUSE_COPYIN:
1446 decl = OMP_CLAUSE_DECL (c);
1447 by_ref = use_pointer_for_field (decl, NULL);
1448 install_var_field (decl, by_ref, 3, ctx);
1449 break;
1451 case OMP_CLAUSE_DEFAULT:
1452 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1453 break;
1455 case OMP_CLAUSE_FINAL:
1456 case OMP_CLAUSE_IF:
1457 case OMP_CLAUSE_NUM_THREADS:
1458 case OMP_CLAUSE_SCHEDULE:
1459 if (ctx->outer)
1460 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1461 break;
1463 case OMP_CLAUSE_NOWAIT:
1464 case OMP_CLAUSE_ORDERED:
1465 case OMP_CLAUSE_COLLAPSE:
1466 case OMP_CLAUSE_UNTIED:
1467 case OMP_CLAUSE_MERGEABLE:
1468 break;
1470 default:
1471 gcc_unreachable ();
1475 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1477 switch (OMP_CLAUSE_CODE (c))
1479 case OMP_CLAUSE_LASTPRIVATE:
1480 /* Let the corresponding firstprivate clause create
1481 the variable. */
1482 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1483 scan_array_reductions = true;
1484 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1485 break;
1486 /* FALLTHRU */
1488 case OMP_CLAUSE_PRIVATE:
1489 case OMP_CLAUSE_FIRSTPRIVATE:
1490 case OMP_CLAUSE_REDUCTION:
1491 decl = OMP_CLAUSE_DECL (c);
1492 if (is_variable_sized (decl))
1493 install_var_local (decl, ctx);
1494 fixup_remapped_decl (decl, ctx,
1495 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1496 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1497 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1498 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1499 scan_array_reductions = true;
1500 break;
1502 case OMP_CLAUSE_SHARED:
1503 decl = OMP_CLAUSE_DECL (c);
1504 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1505 fixup_remapped_decl (decl, ctx, false);
1506 break;
1508 case OMP_CLAUSE_COPYPRIVATE:
1509 case OMP_CLAUSE_COPYIN:
1510 case OMP_CLAUSE_DEFAULT:
1511 case OMP_CLAUSE_IF:
1512 case OMP_CLAUSE_NUM_THREADS:
1513 case OMP_CLAUSE_SCHEDULE:
1514 case OMP_CLAUSE_NOWAIT:
1515 case OMP_CLAUSE_ORDERED:
1516 case OMP_CLAUSE_COLLAPSE:
1517 case OMP_CLAUSE_UNTIED:
1518 case OMP_CLAUSE_FINAL:
1519 case OMP_CLAUSE_MERGEABLE:
1520 break;
1522 default:
1523 gcc_unreachable ();
1527 if (scan_array_reductions)
1528 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1529 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1530 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1532 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1533 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1535 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1536 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1537 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1540 /* Create a new name for omp child function. Returns an identifier. */
1542 static GTY(()) unsigned int tmp_ompfn_id_num;
1544 static tree
1545 create_omp_child_function_name (bool task_copy)
1547 return (clone_function_name (current_function_decl,
1548 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1551 /* Build a decl for the omp child function. It'll not contain a body
1552 yet, just the bare decl. */
1554 static void
1555 create_omp_child_function (omp_context *ctx, bool task_copy)
1557 tree decl, type, name, t;
1559 name = create_omp_child_function_name (task_copy);
1560 if (task_copy)
1561 type = build_function_type_list (void_type_node, ptr_type_node,
1562 ptr_type_node, NULL_TREE);
1563 else
1564 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1566 decl = build_decl (gimple_location (ctx->stmt),
1567 FUNCTION_DECL, name, type);
1569 if (!task_copy)
1570 ctx->cb.dst_fn = decl;
1571 else
1572 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1574 TREE_STATIC (decl) = 1;
1575 TREE_USED (decl) = 1;
1576 DECL_ARTIFICIAL (decl) = 1;
1577 DECL_NAMELESS (decl) = 1;
1578 DECL_IGNORED_P (decl) = 0;
1579 TREE_PUBLIC (decl) = 0;
1580 DECL_UNINLINABLE (decl) = 1;
1581 DECL_EXTERNAL (decl) = 0;
1582 DECL_CONTEXT (decl) = NULL_TREE;
1583 DECL_INITIAL (decl) = make_node (BLOCK);
1585 t = build_decl (DECL_SOURCE_LOCATION (decl),
1586 RESULT_DECL, NULL_TREE, void_type_node);
1587 DECL_ARTIFICIAL (t) = 1;
1588 DECL_IGNORED_P (t) = 1;
1589 DECL_CONTEXT (t) = decl;
1590 DECL_RESULT (decl) = t;
1592 t = build_decl (DECL_SOURCE_LOCATION (decl),
1593 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_NAMELESS (t) = 1;
1596 DECL_ARG_TYPE (t) = ptr_type_node;
1597 DECL_CONTEXT (t) = current_function_decl;
1598 TREE_USED (t) = 1;
1599 DECL_ARGUMENTS (decl) = t;
1600 if (!task_copy)
1601 ctx->receiver_decl = t;
1602 else
1604 t = build_decl (DECL_SOURCE_LOCATION (decl),
1605 PARM_DECL, get_identifier (".omp_data_o"),
1606 ptr_type_node);
1607 DECL_ARTIFICIAL (t) = 1;
1608 DECL_NAMELESS (t) = 1;
1609 DECL_ARG_TYPE (t) = ptr_type_node;
1610 DECL_CONTEXT (t) = current_function_decl;
1611 TREE_USED (t) = 1;
1612 TREE_ADDRESSABLE (t) = 1;
1613 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1614 DECL_ARGUMENTS (decl) = t;
1617 /* Allocate memory for the function structure. The call to
1618 allocate_struct_function clobbers CFUN, so we need to restore
1619 it afterward. */
1620 push_struct_function (decl);
1621 cfun->function_end_locus = gimple_location (ctx->stmt);
1622 pop_cfun ();
1626 /* Scan an OpenMP parallel directive. */
1628 static void
1629 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1631 omp_context *ctx;
1632 tree name;
1633 gimple stmt = gsi_stmt (*gsi);
1635 /* Ignore parallel directives with empty bodies, unless there
1636 are copyin clauses. */
1637 if (optimize > 0
1638 && empty_body_p (gimple_omp_body (stmt))
1639 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1640 OMP_CLAUSE_COPYIN) == NULL)
1642 gsi_replace (gsi, gimple_build_nop (), false);
1643 return;
1646 ctx = new_omp_context (stmt, outer_ctx);
1647 if (taskreg_nesting_level > 1)
1648 ctx->is_nested = true;
1649 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1650 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1651 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1652 name = create_tmp_var_name (".omp_data_s");
1653 name = build_decl (gimple_location (stmt),
1654 TYPE_DECL, name, ctx->record_type);
1655 DECL_ARTIFICIAL (name) = 1;
1656 DECL_NAMELESS (name) = 1;
1657 TYPE_NAME (ctx->record_type) = name;
1658 create_omp_child_function (ctx, false);
1659 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1661 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1662 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1664 if (TYPE_FIELDS (ctx->record_type) == NULL)
1665 ctx->record_type = ctx->receiver_decl = NULL;
1666 else
1668 layout_type (ctx->record_type);
1669 fixup_child_record_type (ctx);
1673 /* Scan an OpenMP task directive. */
1675 static void
1676 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1678 omp_context *ctx;
1679 tree name, t;
1680 gimple stmt = gsi_stmt (*gsi);
1681 location_t loc = gimple_location (stmt);
1683 /* Ignore task directives with empty bodies. */
1684 if (optimize > 0
1685 && empty_body_p (gimple_omp_body (stmt)))
1687 gsi_replace (gsi, gimple_build_nop (), false);
1688 return;
1691 ctx = new_omp_context (stmt, outer_ctx);
1692 if (taskreg_nesting_level > 1)
1693 ctx->is_nested = true;
1694 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1695 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1696 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1697 name = create_tmp_var_name (".omp_data_s");
1698 name = build_decl (gimple_location (stmt),
1699 TYPE_DECL, name, ctx->record_type);
1700 DECL_ARTIFICIAL (name) = 1;
1701 DECL_NAMELESS (name) = 1;
1702 TYPE_NAME (ctx->record_type) = name;
1703 create_omp_child_function (ctx, false);
1704 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1706 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1708 if (ctx->srecord_type)
1710 name = create_tmp_var_name (".omp_data_a");
1711 name = build_decl (gimple_location (stmt),
1712 TYPE_DECL, name, ctx->srecord_type);
1713 DECL_ARTIFICIAL (name) = 1;
1714 DECL_NAMELESS (name) = 1;
1715 TYPE_NAME (ctx->srecord_type) = name;
1716 create_omp_child_function (ctx, true);
1719 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1721 if (TYPE_FIELDS (ctx->record_type) == NULL)
1723 ctx->record_type = ctx->receiver_decl = NULL;
1724 t = build_int_cst (long_integer_type_node, 0);
1725 gimple_omp_task_set_arg_size (stmt, t);
1726 t = build_int_cst (long_integer_type_node, 1);
1727 gimple_omp_task_set_arg_align (stmt, t);
1729 else
1731 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1732 /* Move VLA fields to the end. */
1733 p = &TYPE_FIELDS (ctx->record_type);
1734 while (*p)
1735 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1736 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1738 *q = *p;
1739 *p = TREE_CHAIN (*p);
1740 TREE_CHAIN (*q) = NULL_TREE;
1741 q = &TREE_CHAIN (*q);
1743 else
1744 p = &DECL_CHAIN (*p);
1745 *p = vla_fields;
1746 layout_type (ctx->record_type);
1747 fixup_child_record_type (ctx);
1748 if (ctx->srecord_type)
1749 layout_type (ctx->srecord_type);
1750 t = fold_convert_loc (loc, long_integer_type_node,
1751 TYPE_SIZE_UNIT (ctx->record_type));
1752 gimple_omp_task_set_arg_size (stmt, t);
1753 t = build_int_cst (long_integer_type_node,
1754 TYPE_ALIGN_UNIT (ctx->record_type));
1755 gimple_omp_task_set_arg_align (stmt, t);
1760 /* Scan an OpenMP loop directive. */
1762 static void
1763 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1765 omp_context *ctx;
1766 size_t i;
1768 ctx = new_omp_context (stmt, outer_ctx);
1770 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1772 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1773 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1775 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1776 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1777 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1778 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1780 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1783 /* Scan an OpenMP sections directive. */
1785 static void
1786 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1788 omp_context *ctx;
1790 ctx = new_omp_context (stmt, outer_ctx);
1791 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1792 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1795 /* Scan an OpenMP single directive. */
1797 static void
1798 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1800 omp_context *ctx;
1801 tree name;
1803 ctx = new_omp_context (stmt, outer_ctx);
1804 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1805 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1806 name = create_tmp_var_name (".omp_copy_s");
1807 name = build_decl (gimple_location (stmt),
1808 TYPE_DECL, name, ctx->record_type);
1809 TYPE_NAME (ctx->record_type) = name;
1811 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1812 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1814 if (TYPE_FIELDS (ctx->record_type) == NULL)
1815 ctx->record_type = NULL;
1816 else
1817 layout_type (ctx->record_type);
1821 /* Check OpenMP nesting restrictions. */
1822 static bool
1823 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1825 switch (gimple_code (stmt))
1827 case GIMPLE_OMP_FOR:
1828 case GIMPLE_OMP_SECTIONS:
1829 case GIMPLE_OMP_SINGLE:
1830 case GIMPLE_CALL:
1831 for (; ctx != NULL; ctx = ctx->outer)
1832 switch (gimple_code (ctx->stmt))
1834 case GIMPLE_OMP_FOR:
1835 case GIMPLE_OMP_SECTIONS:
1836 case GIMPLE_OMP_SINGLE:
1837 case GIMPLE_OMP_ORDERED:
1838 case GIMPLE_OMP_MASTER:
1839 case GIMPLE_OMP_TASK:
1840 if (is_gimple_call (stmt))
1842 error_at (gimple_location (stmt),
1843 "barrier region may not be closely nested inside "
1844 "of work-sharing, critical, ordered, master or "
1845 "explicit task region");
1846 return false;
1848 error_at (gimple_location (stmt),
1849 "work-sharing region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or explicit "
1851 "task region");
1852 return false;
1853 case GIMPLE_OMP_PARALLEL:
1854 return true;
1855 default:
1856 break;
1858 break;
1859 case GIMPLE_OMP_MASTER:
1860 for (; ctx != NULL; ctx = ctx->outer)
1861 switch (gimple_code (ctx->stmt))
1863 case GIMPLE_OMP_FOR:
1864 case GIMPLE_OMP_SECTIONS:
1865 case GIMPLE_OMP_SINGLE:
1866 case GIMPLE_OMP_TASK:
1867 error_at (gimple_location (stmt),
1868 "master region may not be closely nested inside "
1869 "of work-sharing or explicit task region");
1870 return false;
1871 case GIMPLE_OMP_PARALLEL:
1872 return true;
1873 default:
1874 break;
1876 break;
1877 case GIMPLE_OMP_ORDERED:
1878 for (; ctx != NULL; ctx = ctx->outer)
1879 switch (gimple_code (ctx->stmt))
1881 case GIMPLE_OMP_CRITICAL:
1882 case GIMPLE_OMP_TASK:
1883 error_at (gimple_location (stmt),
1884 "ordered region may not be closely nested inside "
1885 "of critical or explicit task region");
1886 return false;
1887 case GIMPLE_OMP_FOR:
1888 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1889 OMP_CLAUSE_ORDERED) == NULL)
1891 error_at (gimple_location (stmt),
1892 "ordered region must be closely nested inside "
1893 "a loop region with an ordered clause");
1894 return false;
1896 return true;
1897 case GIMPLE_OMP_PARALLEL:
1898 return true;
1899 default:
1900 break;
1902 break;
1903 case GIMPLE_OMP_CRITICAL:
1904 for (; ctx != NULL; ctx = ctx->outer)
1905 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1906 && (gimple_omp_critical_name (stmt)
1907 == gimple_omp_critical_name (ctx->stmt)))
1909 error_at (gimple_location (stmt),
1910 "critical region may not be nested inside a critical "
1911 "region with the same name");
1912 return false;
1914 break;
1915 default:
1916 break;
1918 return true;
1922 /* Helper function scan_omp.
1924 Callback for walk_tree or operators in walk_gimple_stmt used to
1925 scan for OpenMP directives in TP. */
1927 static tree
1928 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1930 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1931 omp_context *ctx = (omp_context *) wi->info;
1932 tree t = *tp;
1934 switch (TREE_CODE (t))
1936 case VAR_DECL:
1937 case PARM_DECL:
1938 case LABEL_DECL:
1939 case RESULT_DECL:
1940 if (ctx)
1941 *tp = remap_decl (t, &ctx->cb);
1942 break;
1944 default:
1945 if (ctx && TYPE_P (t))
1946 *tp = remap_type (t, &ctx->cb);
1947 else if (!DECL_P (t))
1949 *walk_subtrees = 1;
1950 if (ctx)
1952 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1953 if (tem != TREE_TYPE (t))
1955 if (TREE_CODE (t) == INTEGER_CST)
1956 *tp = build_int_cst_wide (tem,
1957 TREE_INT_CST_LOW (t),
1958 TREE_INT_CST_HIGH (t));
1959 else
1960 TREE_TYPE (t) = tem;
1964 break;
1967 return NULL_TREE;
1971 /* Helper function for scan_omp.
1973 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1974 the current statement in GSI. */
1976 static tree
1977 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1978 struct walk_stmt_info *wi)
1980 gimple stmt = gsi_stmt (*gsi);
1981 omp_context *ctx = (omp_context *) wi->info;
1983 if (gimple_has_location (stmt))
1984 input_location = gimple_location (stmt);
1986 /* Check the OpenMP nesting restrictions. */
1987 if (ctx != NULL)
1989 bool remove = false;
1990 if (is_gimple_omp (stmt))
1991 remove = !check_omp_nesting_restrictions (stmt, ctx);
1992 else if (is_gimple_call (stmt))
1994 tree fndecl = gimple_call_fndecl (stmt);
1995 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1996 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1997 remove = !check_omp_nesting_restrictions (stmt, ctx);
1999 if (remove)
2001 stmt = gimple_build_nop ();
2002 gsi_replace (gsi, stmt, false);
2006 *handled_ops_p = true;
2008 switch (gimple_code (stmt))
2010 case GIMPLE_OMP_PARALLEL:
2011 taskreg_nesting_level++;
2012 scan_omp_parallel (gsi, ctx);
2013 taskreg_nesting_level--;
2014 break;
2016 case GIMPLE_OMP_TASK:
2017 taskreg_nesting_level++;
2018 scan_omp_task (gsi, ctx);
2019 taskreg_nesting_level--;
2020 break;
2022 case GIMPLE_OMP_FOR:
2023 scan_omp_for (stmt, ctx);
2024 break;
2026 case GIMPLE_OMP_SECTIONS:
2027 scan_omp_sections (stmt, ctx);
2028 break;
2030 case GIMPLE_OMP_SINGLE:
2031 scan_omp_single (stmt, ctx);
2032 break;
2034 case GIMPLE_OMP_SECTION:
2035 case GIMPLE_OMP_MASTER:
2036 case GIMPLE_OMP_ORDERED:
2037 case GIMPLE_OMP_CRITICAL:
2038 ctx = new_omp_context (stmt, ctx);
2039 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2040 break;
2042 case GIMPLE_BIND:
2044 tree var;
2046 *handled_ops_p = false;
2047 if (ctx)
2048 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2049 insert_decl_map (&ctx->cb, var, var);
2051 break;
2052 default:
2053 *handled_ops_p = false;
2054 break;
2057 return NULL_TREE;
2061 /* Scan all the statements starting at the current statement. CTX
2062 contains context information about the OpenMP directives and
2063 clauses found during the scan. */
2065 static void
2066 scan_omp (gimple_seq *body_p, omp_context *ctx)
2068 location_t saved_location;
2069 struct walk_stmt_info wi;
2071 memset (&wi, 0, sizeof (wi));
2072 wi.info = ctx;
2073 wi.want_locations = true;
2075 saved_location = input_location;
2076 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2077 input_location = saved_location;
2080 /* Re-gimplification and code generation routines. */
2082 /* Build a call to GOMP_barrier. */
2084 static tree
2085 build_omp_barrier (void)
2087 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2090 /* If a context was created for STMT when it was scanned, return it. */
2092 static omp_context *
2093 maybe_lookup_ctx (gimple stmt)
2095 splay_tree_node n;
2096 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2097 return n ? (omp_context *) n->value : NULL;
2101 /* Find the mapping for DECL in CTX or the immediately enclosing
2102 context that has a mapping for DECL.
2104 If CTX is a nested parallel directive, we may have to use the decl
2105 mappings created in CTX's parent context. Suppose that we have the
2106 following parallel nesting (variable UIDs showed for clarity):
2108 iD.1562 = 0;
2109 #omp parallel shared(iD.1562) -> outer parallel
2110 iD.1562 = iD.1562 + 1;
2112 #omp parallel shared (iD.1562) -> inner parallel
2113 iD.1562 = iD.1562 - 1;
2115 Each parallel structure will create a distinct .omp_data_s structure
2116 for copying iD.1562 in/out of the directive:
2118 outer parallel .omp_data_s.1.i -> iD.1562
2119 inner parallel .omp_data_s.2.i -> iD.1562
2121 A shared variable mapping will produce a copy-out operation before
2122 the parallel directive and a copy-in operation after it. So, in
2123 this case we would have:
2125 iD.1562 = 0;
2126 .omp_data_o.1.i = iD.1562;
2127 #omp parallel shared(iD.1562) -> outer parallel
2128 .omp_data_i.1 = &.omp_data_o.1
2129 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2131 .omp_data_o.2.i = iD.1562; -> **
2132 #omp parallel shared(iD.1562) -> inner parallel
2133 .omp_data_i.2 = &.omp_data_o.2
2134 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2137 ** This is a problem. The symbol iD.1562 cannot be referenced
2138 inside the body of the outer parallel region. But since we are
2139 emitting this copy operation while expanding the inner parallel
2140 directive, we need to access the CTX structure of the outer
2141 parallel directive to get the correct mapping:
2143 .omp_data_o.2.i = .omp_data_i.1->i
2145 Since there may be other workshare or parallel directives enclosing
2146 the parallel directive, it may be necessary to walk up the context
2147 parent chain. This is not a problem in general because nested
2148 parallelism happens only rarely. */
2150 static tree
2151 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2153 tree t;
2154 omp_context *up;
2156 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2157 t = maybe_lookup_decl (decl, up);
2159 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2161 return t ? t : decl;
2165 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2166 in outer contexts. */
2168 static tree
2169 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2171 tree t = NULL;
2172 omp_context *up;
2174 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2175 t = maybe_lookup_decl (decl, up);
2177 return t ? t : decl;
2181 /* Construct the initialization value for reduction CLAUSE. */
2183 tree
2184 omp_reduction_init (tree clause, tree type)
2186 location_t loc = OMP_CLAUSE_LOCATION (clause);
2187 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2189 case PLUS_EXPR:
2190 case MINUS_EXPR:
2191 case BIT_IOR_EXPR:
2192 case BIT_XOR_EXPR:
2193 case TRUTH_OR_EXPR:
2194 case TRUTH_ORIF_EXPR:
2195 case TRUTH_XOR_EXPR:
2196 case NE_EXPR:
2197 return build_zero_cst (type);
2199 case MULT_EXPR:
2200 case TRUTH_AND_EXPR:
2201 case TRUTH_ANDIF_EXPR:
2202 case EQ_EXPR:
2203 return fold_convert_loc (loc, type, integer_one_node);
2205 case BIT_AND_EXPR:
2206 return fold_convert_loc (loc, type, integer_minus_one_node);
2208 case MAX_EXPR:
2209 if (SCALAR_FLOAT_TYPE_P (type))
2211 REAL_VALUE_TYPE max, min;
2212 if (HONOR_INFINITIES (TYPE_MODE (type)))
2214 real_inf (&max);
2215 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2217 else
2218 real_maxval (&min, 1, TYPE_MODE (type));
2219 return build_real (type, min);
2221 else
2223 gcc_assert (INTEGRAL_TYPE_P (type));
2224 return TYPE_MIN_VALUE (type);
2227 case MIN_EXPR:
2228 if (SCALAR_FLOAT_TYPE_P (type))
2230 REAL_VALUE_TYPE max;
2231 if (HONOR_INFINITIES (TYPE_MODE (type)))
2232 real_inf (&max);
2233 else
2234 real_maxval (&max, 0, TYPE_MODE (type));
2235 return build_real (type, max);
2237 else
2239 gcc_assert (INTEGRAL_TYPE_P (type));
2240 return TYPE_MAX_VALUE (type);
2243 default:
2244 gcc_unreachable ();
2248 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2249 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2250 private variables. Initialization statements go in ILIST, while calls
2251 to destructors go in DLIST. */
2253 static void
2254 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2255 omp_context *ctx)
2257 tree c, dtor, copyin_seq, x, ptr;
2258 bool copyin_by_ref = false;
2259 bool lastprivate_firstprivate = false;
2260 int pass;
2262 copyin_seq = NULL;
2264 /* Do all the fixed sized types in the first pass, and the variable sized
2265 types in the second pass. This makes sure that the scalar arguments to
2266 the variable sized types are processed before we use them in the
2267 variable sized operations. */
2268 for (pass = 0; pass < 2; ++pass)
2270 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2272 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2273 tree var, new_var;
2274 bool by_ref;
2275 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2277 switch (c_kind)
2279 case OMP_CLAUSE_PRIVATE:
2280 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2281 continue;
2282 break;
2283 case OMP_CLAUSE_SHARED:
2284 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2286 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2287 continue;
2289 case OMP_CLAUSE_FIRSTPRIVATE:
2290 case OMP_CLAUSE_COPYIN:
2291 case OMP_CLAUSE_REDUCTION:
2292 break;
2293 case OMP_CLAUSE_LASTPRIVATE:
2294 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2296 lastprivate_firstprivate = true;
2297 if (pass != 0)
2298 continue;
2300 break;
2301 default:
2302 continue;
2305 new_var = var = OMP_CLAUSE_DECL (c);
2306 if (c_kind != OMP_CLAUSE_COPYIN)
2307 new_var = lookup_decl (var, ctx);
2309 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2311 if (pass != 0)
2312 continue;
2314 else if (is_variable_sized (var))
2316 /* For variable sized types, we need to allocate the
2317 actual storage here. Call alloca and store the
2318 result in the pointer decl that we created elsewhere. */
2319 if (pass == 0)
2320 continue;
2322 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2324 gimple stmt;
2325 tree tmp, atmp;
2327 ptr = DECL_VALUE_EXPR (new_var);
2328 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2329 ptr = TREE_OPERAND (ptr, 0);
2330 gcc_assert (DECL_P (ptr));
2331 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2333 /* void *tmp = __builtin_alloca */
2334 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2335 stmt = gimple_build_call (atmp, 1, x);
2336 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2337 gimple_add_tmp_var (tmp);
2338 gimple_call_set_lhs (stmt, tmp);
2340 gimple_seq_add_stmt (ilist, stmt);
2342 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2343 gimplify_assign (ptr, x, ilist);
2346 else if (is_reference (var))
2348 /* For references that are being privatized for Fortran,
2349 allocate new backing storage for the new pointer
2350 variable. This allows us to avoid changing all the
2351 code that expects a pointer to something that expects
2352 a direct variable. Note that this doesn't apply to
2353 C++, since reference types are disallowed in data
2354 sharing clauses there, except for NRV optimized
2355 return values. */
2356 if (pass == 0)
2357 continue;
2359 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2360 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2362 x = build_receiver_ref (var, false, ctx);
2363 x = build_fold_addr_expr_loc (clause_loc, x);
2365 else if (TREE_CONSTANT (x))
2367 const char *name = NULL;
2368 if (DECL_NAME (var))
2369 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2371 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2372 name);
2373 gimple_add_tmp_var (x);
2374 TREE_ADDRESSABLE (x) = 1;
2375 x = build_fold_addr_expr_loc (clause_loc, x);
2377 else
2379 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2380 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2383 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2384 gimplify_assign (new_var, x, ilist);
2386 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2388 else if (c_kind == OMP_CLAUSE_REDUCTION
2389 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2391 if (pass == 0)
2392 continue;
2394 else if (pass != 0)
2395 continue;
2397 switch (OMP_CLAUSE_CODE (c))
2399 case OMP_CLAUSE_SHARED:
2400 /* Shared global vars are just accessed directly. */
2401 if (is_global_var (new_var))
2402 break;
2403 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2404 needs to be delayed until after fixup_child_record_type so
2405 that we get the correct type during the dereference. */
2406 by_ref = use_pointer_for_field (var, ctx);
2407 x = build_receiver_ref (var, by_ref, ctx);
2408 SET_DECL_VALUE_EXPR (new_var, x);
2409 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 /* ??? If VAR is not passed by reference, and the variable
2412 hasn't been initialized yet, then we'll get a warning for
2413 the store into the omp_data_s structure. Ideally, we'd be
2414 able to notice this and not store anything at all, but
2415 we're generating code too early. Suppress the warning. */
2416 if (!by_ref)
2417 TREE_NO_WARNING (var) = 1;
2418 break;
2420 case OMP_CLAUSE_LASTPRIVATE:
2421 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2422 break;
2423 /* FALLTHRU */
2425 case OMP_CLAUSE_PRIVATE:
2426 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2427 x = build_outer_var_ref (var, ctx);
2428 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2430 if (is_task_ctx (ctx))
2431 x = build_receiver_ref (var, false, ctx);
2432 else
2433 x = build_outer_var_ref (var, ctx);
2435 else
2436 x = NULL;
2437 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2438 if (x)
2439 gimplify_and_add (x, ilist);
2440 /* FALLTHRU */
2442 do_dtor:
2443 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2444 if (x)
2446 gimple_seq tseq = NULL;
2448 dtor = x;
2449 gimplify_stmt (&dtor, &tseq);
2450 gimple_seq_add_seq (dlist, tseq);
2452 break;
2454 case OMP_CLAUSE_FIRSTPRIVATE:
2455 if (is_task_ctx (ctx))
2457 if (is_reference (var) || is_variable_sized (var))
2458 goto do_dtor;
2459 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2460 ctx))
2461 || use_pointer_for_field (var, NULL))
2463 x = build_receiver_ref (var, false, ctx);
2464 SET_DECL_VALUE_EXPR (new_var, x);
2465 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2466 goto do_dtor;
2469 x = build_outer_var_ref (var, ctx);
2470 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2471 gimplify_and_add (x, ilist);
2472 goto do_dtor;
2473 break;
2475 case OMP_CLAUSE_COPYIN:
2476 by_ref = use_pointer_for_field (var, NULL);
2477 x = build_receiver_ref (var, by_ref, ctx);
2478 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2479 append_to_statement_list (x, &copyin_seq);
2480 copyin_by_ref |= by_ref;
2481 break;
2483 case OMP_CLAUSE_REDUCTION:
2484 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2486 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2487 x = build_outer_var_ref (var, ctx);
2489 if (is_reference (var))
2490 x = build_fold_addr_expr_loc (clause_loc, x);
2491 SET_DECL_VALUE_EXPR (placeholder, x);
2492 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2493 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2494 gimple_seq_add_seq (ilist,
2495 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2496 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2497 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2499 else
2501 x = omp_reduction_init (c, TREE_TYPE (new_var));
2502 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2503 gimplify_assign (new_var, x, ilist);
2505 break;
2507 default:
2508 gcc_unreachable ();
2513 /* The copyin sequence is not to be executed by the main thread, since
2514 that would result in self-copies. Perhaps not visible to scalars,
2515 but it certainly is to C++ operator=. */
2516 if (copyin_seq)
2518 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2520 x = build2 (NE_EXPR, boolean_type_node, x,
2521 build_int_cst (TREE_TYPE (x), 0));
2522 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2523 gimplify_and_add (x, ilist);
2526 /* If any copyin variable is passed by reference, we must ensure the
2527 master thread doesn't modify it before it is copied over in all
2528 threads. Similarly for variables in both firstprivate and
2529 lastprivate clauses we need to ensure the lastprivate copying
2530 happens after firstprivate copying in all threads. */
2531 if (copyin_by_ref || lastprivate_firstprivate)
2532 gimplify_and_add (build_omp_barrier (), ilist);
2536 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2537 both parallel and workshare constructs. PREDICATE may be NULL if it's
2538 always true. */
2540 static void
2541 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2542 omp_context *ctx)
2544 tree x, c, label = NULL;
2545 bool par_clauses = false;
2547 /* Early exit if there are no lastprivate clauses. */
2548 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2549 if (clauses == NULL)
2551 /* If this was a workshare clause, see if it had been combined
2552 with its parallel. In that case, look for the clauses on the
2553 parallel statement itself. */
2554 if (is_parallel_ctx (ctx))
2555 return;
2557 ctx = ctx->outer;
2558 if (ctx == NULL || !is_parallel_ctx (ctx))
2559 return;
2561 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2562 OMP_CLAUSE_LASTPRIVATE);
2563 if (clauses == NULL)
2564 return;
2565 par_clauses = true;
2568 if (predicate)
2570 gimple stmt;
2571 tree label_true, arm1, arm2;
2573 label = create_artificial_label (UNKNOWN_LOCATION);
2574 label_true = create_artificial_label (UNKNOWN_LOCATION);
2575 arm1 = TREE_OPERAND (predicate, 0);
2576 arm2 = TREE_OPERAND (predicate, 1);
2577 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2578 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2579 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2580 label_true, label);
2581 gimple_seq_add_stmt (stmt_list, stmt);
2582 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2585 for (c = clauses; c ;)
2587 tree var, new_var;
2588 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2590 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2592 var = OMP_CLAUSE_DECL (c);
2593 new_var = lookup_decl (var, ctx);
2595 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2597 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2598 gimple_seq_add_seq (stmt_list,
2599 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2601 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2603 x = build_outer_var_ref (var, ctx);
2604 if (is_reference (var))
2605 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2606 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2607 gimplify_and_add (x, stmt_list);
2609 c = OMP_CLAUSE_CHAIN (c);
2610 if (c == NULL && !par_clauses)
2612 /* If this was a workshare clause, see if it had been combined
2613 with its parallel. In that case, continue looking for the
2614 clauses also on the parallel statement itself. */
2615 if (is_parallel_ctx (ctx))
2616 break;
2618 ctx = ctx->outer;
2619 if (ctx == NULL || !is_parallel_ctx (ctx))
2620 break;
2622 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2623 OMP_CLAUSE_LASTPRIVATE);
2624 par_clauses = true;
2628 if (label)
2629 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2633 /* Generate code to implement the REDUCTION clauses. */
2635 static void
2636 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2638 gimple_seq sub_seq = NULL;
2639 gimple stmt;
2640 tree x, c;
2641 int count = 0;
2643 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2644 update in that case, otherwise use a lock. */
2645 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2646 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2648 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2650 /* Never use OMP_ATOMIC for array reductions. */
2651 count = -1;
2652 break;
2654 count++;
2657 if (count == 0)
2658 return;
2660 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2662 tree var, ref, new_var;
2663 enum tree_code code;
2664 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2666 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2667 continue;
2669 var = OMP_CLAUSE_DECL (c);
2670 new_var = lookup_decl (var, ctx);
2671 if (is_reference (var))
2672 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2673 ref = build_outer_var_ref (var, ctx);
2674 code = OMP_CLAUSE_REDUCTION_CODE (c);
2676 /* reduction(-:var) sums up the partial results, so it acts
2677 identically to reduction(+:var). */
2678 if (code == MINUS_EXPR)
2679 code = PLUS_EXPR;
2681 if (count == 1)
2683 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2685 addr = save_expr (addr);
2686 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2687 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2688 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2689 gimplify_and_add (x, stmt_seqp);
2690 return;
2693 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2695 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2697 if (is_reference (var))
2698 ref = build_fold_addr_expr_loc (clause_loc, ref);
2699 SET_DECL_VALUE_EXPR (placeholder, ref);
2700 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2701 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2702 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2704 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2706 else
2708 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2709 ref = build_outer_var_ref (var, ctx);
2710 gimplify_assign (ref, x, &sub_seq);
2714 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2716 gimple_seq_add_stmt (stmt_seqp, stmt);
2718 gimple_seq_add_seq (stmt_seqp, sub_seq);
2720 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2722 gimple_seq_add_stmt (stmt_seqp, stmt);
2726 /* Generate code to implement the COPYPRIVATE clauses. */
2728 static void
2729 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2730 omp_context *ctx)
2732 tree c;
2734 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2736 tree var, new_var, ref, x;
2737 bool by_ref;
2738 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2740 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2741 continue;
2743 var = OMP_CLAUSE_DECL (c);
2744 by_ref = use_pointer_for_field (var, NULL);
2746 ref = build_sender_ref (var, ctx);
2747 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2748 if (by_ref)
2750 x = build_fold_addr_expr_loc (clause_loc, new_var);
2751 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2753 gimplify_assign (ref, x, slist);
2755 ref = build_receiver_ref (var, false, ctx);
2756 if (by_ref)
2758 ref = fold_convert_loc (clause_loc,
2759 build_pointer_type (TREE_TYPE (new_var)),
2760 ref);
2761 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2763 if (is_reference (var))
2765 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2766 ref = build_simple_mem_ref_loc (clause_loc, ref);
2767 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2769 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2770 gimplify_and_add (x, rlist);
2775 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2776 and REDUCTION from the sender (aka parent) side. */
2778 static void
2779 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2780 omp_context *ctx)
2782 tree c;
2784 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2786 tree val, ref, x, var;
2787 bool by_ref, do_in = false, do_out = false;
2788 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2790 switch (OMP_CLAUSE_CODE (c))
2792 case OMP_CLAUSE_PRIVATE:
2793 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2794 break;
2795 continue;
2796 case OMP_CLAUSE_FIRSTPRIVATE:
2797 case OMP_CLAUSE_COPYIN:
2798 case OMP_CLAUSE_LASTPRIVATE:
2799 case OMP_CLAUSE_REDUCTION:
2800 break;
2801 default:
2802 continue;
2805 val = OMP_CLAUSE_DECL (c);
2806 var = lookup_decl_in_outer_ctx (val, ctx);
2808 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2809 && is_global_var (var))
2810 continue;
2811 if (is_variable_sized (val))
2812 continue;
2813 by_ref = use_pointer_for_field (val, NULL);
2815 switch (OMP_CLAUSE_CODE (c))
2817 case OMP_CLAUSE_PRIVATE:
2818 case OMP_CLAUSE_FIRSTPRIVATE:
2819 case OMP_CLAUSE_COPYIN:
2820 do_in = true;
2821 break;
2823 case OMP_CLAUSE_LASTPRIVATE:
2824 if (by_ref || is_reference (val))
2826 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2827 continue;
2828 do_in = true;
2830 else
2832 do_out = true;
2833 if (lang_hooks.decls.omp_private_outer_ref (val))
2834 do_in = true;
2836 break;
2838 case OMP_CLAUSE_REDUCTION:
2839 do_in = true;
2840 do_out = !(by_ref || is_reference (val));
2841 break;
2843 default:
2844 gcc_unreachable ();
2847 if (do_in)
2849 ref = build_sender_ref (val, ctx);
2850 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2851 gimplify_assign (ref, x, ilist);
2852 if (is_task_ctx (ctx))
2853 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2856 if (do_out)
2858 ref = build_sender_ref (val, ctx);
2859 gimplify_assign (var, ref, olist);
2864 /* Generate code to implement SHARED from the sender (aka parent)
2865 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2866 list things that got automatically shared. */
2868 static void
2869 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2871 tree var, ovar, nvar, f, x, record_type;
2873 if (ctx->record_type == NULL)
2874 return;
2876 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2877 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2879 ovar = DECL_ABSTRACT_ORIGIN (f);
2880 nvar = maybe_lookup_decl (ovar, ctx);
2881 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2882 continue;
2884 /* If CTX is a nested parallel directive. Find the immediately
2885 enclosing parallel or workshare construct that contains a
2886 mapping for OVAR. */
2887 var = lookup_decl_in_outer_ctx (ovar, ctx);
2889 if (use_pointer_for_field (ovar, ctx))
2891 x = build_sender_ref (ovar, ctx);
2892 var = build_fold_addr_expr (var);
2893 gimplify_assign (x, var, ilist);
2895 else
2897 x = build_sender_ref (ovar, ctx);
2898 gimplify_assign (x, var, ilist);
2900 if (!TREE_READONLY (var)
2901 /* We don't need to receive a new reference to a result
2902 or parm decl. In fact we may not store to it as we will
2903 invalidate any pending RSO and generate wrong gimple
2904 during inlining. */
2905 && !((TREE_CODE (var) == RESULT_DECL
2906 || TREE_CODE (var) == PARM_DECL)
2907 && DECL_BY_REFERENCE (var)))
2909 x = build_sender_ref (ovar, ctx);
2910 gimplify_assign (var, x, olist);
2917 /* A convenience function to build an empty GIMPLE_COND with just the
2918 condition. */
2920 static gimple
2921 gimple_build_cond_empty (tree cond)
2923 enum tree_code pred_code;
2924 tree lhs, rhs;
2926 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2927 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2931 /* Build the function calls to GOMP_parallel_start etc to actually
2932 generate the parallel operation. REGION is the parallel region
2933 being expanded. BB is the block where to insert the code. WS_ARGS
2934 will be set if this is a call to a combined parallel+workshare
2935 construct, it contains the list of additional arguments needed by
2936 the workshare construct. */
2938 static void
2939 expand_parallel_call (struct omp_region *region, basic_block bb,
2940 gimple entry_stmt, vec<tree, va_gc> *ws_args)
2942 tree t, t1, t2, val, cond, c, clauses;
2943 gimple_stmt_iterator gsi;
2944 gimple stmt;
2945 enum built_in_function start_ix;
2946 int start_ix2;
2947 location_t clause_loc;
2948 vec<tree, va_gc> *args;
2950 clauses = gimple_omp_parallel_clauses (entry_stmt);
2952 /* Determine what flavor of GOMP_parallel_start we will be
2953 emitting. */
2954 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2955 if (is_combined_parallel (region))
2957 switch (region->inner->type)
2959 case GIMPLE_OMP_FOR:
2960 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2961 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2962 + (region->inner->sched_kind
2963 == OMP_CLAUSE_SCHEDULE_RUNTIME
2964 ? 3 : region->inner->sched_kind));
2965 start_ix = (enum built_in_function)start_ix2;
2966 break;
2967 case GIMPLE_OMP_SECTIONS:
2968 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2969 break;
2970 default:
2971 gcc_unreachable ();
2975 /* By default, the value of NUM_THREADS is zero (selected at run time)
2976 and there is no conditional. */
2977 cond = NULL_TREE;
2978 val = build_int_cst (unsigned_type_node, 0);
2980 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2981 if (c)
2982 cond = OMP_CLAUSE_IF_EXPR (c);
2984 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2985 if (c)
2987 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2988 clause_loc = OMP_CLAUSE_LOCATION (c);
2990 else
2991 clause_loc = gimple_location (entry_stmt);
2993 /* Ensure 'val' is of the correct type. */
2994 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2996 /* If we found the clause 'if (cond)', build either
2997 (cond != 0) or (cond ? val : 1u). */
2998 if (cond)
3000 gimple_stmt_iterator gsi;
3002 cond = gimple_boolify (cond);
3004 if (integer_zerop (val))
3005 val = fold_build2_loc (clause_loc,
3006 EQ_EXPR, unsigned_type_node, cond,
3007 build_int_cst (TREE_TYPE (cond), 0));
3008 else
3010 basic_block cond_bb, then_bb, else_bb;
3011 edge e, e_then, e_else;
3012 tree tmp_then, tmp_else, tmp_join, tmp_var;
3014 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3015 if (gimple_in_ssa_p (cfun))
3017 tmp_then = make_ssa_name (tmp_var, NULL);
3018 tmp_else = make_ssa_name (tmp_var, NULL);
3019 tmp_join = make_ssa_name (tmp_var, NULL);
3021 else
3023 tmp_then = tmp_var;
3024 tmp_else = tmp_var;
3025 tmp_join = tmp_var;
3028 e = split_block (bb, NULL);
3029 cond_bb = e->src;
3030 bb = e->dest;
3031 remove_edge (e);
3033 then_bb = create_empty_bb (cond_bb);
3034 else_bb = create_empty_bb (then_bb);
3035 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3036 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3038 stmt = gimple_build_cond_empty (cond);
3039 gsi = gsi_start_bb (cond_bb);
3040 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3042 gsi = gsi_start_bb (then_bb);
3043 stmt = gimple_build_assign (tmp_then, val);
3044 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3046 gsi = gsi_start_bb (else_bb);
3047 stmt = gimple_build_assign
3048 (tmp_else, build_int_cst (unsigned_type_node, 1));
3049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3051 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3052 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3053 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3054 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3056 if (gimple_in_ssa_p (cfun))
3058 gimple phi = create_phi_node (tmp_join, bb);
3059 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3060 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3063 val = tmp_join;
3066 gsi = gsi_start_bb (bb);
3067 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3068 false, GSI_CONTINUE_LINKING);
3071 gsi = gsi_last_bb (bb);
3072 t = gimple_omp_parallel_data_arg (entry_stmt);
3073 if (t == NULL)
3074 t1 = null_pointer_node;
3075 else
3076 t1 = build_fold_addr_expr (t);
3077 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3079 vec_alloc (args, 3 + vec_safe_length (ws_args));
3080 args->quick_push (t2);
3081 args->quick_push (t1);
3082 args->quick_push (val);
3083 if (ws_args)
3084 args->splice (*ws_args);
3086 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3087 builtin_decl_explicit (start_ix), args);
3089 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3090 false, GSI_CONTINUE_LINKING);
3092 t = gimple_omp_parallel_data_arg (entry_stmt);
3093 if (t == NULL)
3094 t = null_pointer_node;
3095 else
3096 t = build_fold_addr_expr (t);
3097 t = build_call_expr_loc (gimple_location (entry_stmt),
3098 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3099 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3100 false, GSI_CONTINUE_LINKING);
3102 t = build_call_expr_loc (gimple_location (entry_stmt),
3103 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3105 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3106 false, GSI_CONTINUE_LINKING);
3110 /* Build the function call to GOMP_task to actually
3111 generate the task operation. BB is the block where to insert the code. */
3113 static void
3114 expand_task_call (basic_block bb, gimple entry_stmt)
3116 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3117 gimple_stmt_iterator gsi;
3118 location_t loc = gimple_location (entry_stmt);
3120 clauses = gimple_omp_task_clauses (entry_stmt);
3122 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3123 if (c)
3124 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3125 else
3126 cond = boolean_true_node;
3128 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3129 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3130 flags = build_int_cst (unsigned_type_node,
3131 (c ? 1 : 0) + (c2 ? 4 : 0));
3133 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3134 if (c)
3136 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3137 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3138 build_int_cst (unsigned_type_node, 2),
3139 build_int_cst (unsigned_type_node, 0));
3140 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3143 gsi = gsi_last_bb (bb);
3144 t = gimple_omp_task_data_arg (entry_stmt);
3145 if (t == NULL)
3146 t2 = null_pointer_node;
3147 else
3148 t2 = build_fold_addr_expr_loc (loc, t);
3149 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3150 t = gimple_omp_task_copy_fn (entry_stmt);
3151 if (t == NULL)
3152 t3 = null_pointer_node;
3153 else
3154 t3 = build_fold_addr_expr_loc (loc, t);
3156 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3157 7, t1, t2, t3,
3158 gimple_omp_task_arg_size (entry_stmt),
3159 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3161 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3162 false, GSI_CONTINUE_LINKING);
3166 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3167 catch handler and return it. This prevents programs from violating the
3168 structured block semantics with throws. */
3170 static gimple_seq
3171 maybe_catch_exception (gimple_seq body)
3173 gimple g;
3174 tree decl;
3176 if (!flag_exceptions)
3177 return body;
3179 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3180 decl = lang_hooks.eh_protect_cleanup_actions ();
3181 else
3182 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3184 g = gimple_build_eh_must_not_throw (decl);
3185 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3186 GIMPLE_TRY_CATCH);
3188 return gimple_seq_alloc_with_stmt (g);
3191 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3193 static tree
3194 vec2chain (vec<tree, va_gc> *v)
3196 tree chain = NULL_TREE, t;
3197 unsigned ix;
3199 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3201 DECL_CHAIN (t) = chain;
3202 chain = t;
3205 return chain;
3209 /* Remove barriers in REGION->EXIT's block. Note that this is only
3210 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3211 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3212 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3213 removed. */
3215 static void
3216 remove_exit_barrier (struct omp_region *region)
3218 gimple_stmt_iterator gsi;
3219 basic_block exit_bb;
3220 edge_iterator ei;
3221 edge e;
3222 gimple stmt;
3223 int any_addressable_vars = -1;
3225 exit_bb = region->exit;
3227 /* If the parallel region doesn't return, we don't have REGION->EXIT
3228 block at all. */
3229 if (! exit_bb)
3230 return;
3232 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3233 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3234 statements that can appear in between are extremely limited -- no
3235 memory operations at all. Here, we allow nothing at all, so the
3236 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3237 gsi = gsi_last_bb (exit_bb);
3238 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3239 gsi_prev (&gsi);
3240 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3241 return;
3243 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3245 gsi = gsi_last_bb (e->src);
3246 if (gsi_end_p (gsi))
3247 continue;
3248 stmt = gsi_stmt (gsi);
3249 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3250 && !gimple_omp_return_nowait_p (stmt))
3252 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3253 in many cases. If there could be tasks queued, the barrier
3254 might be needed to let the tasks run before some local
3255 variable of the parallel that the task uses as shared
3256 runs out of scope. The task can be spawned either
3257 from within current function (this would be easy to check)
3258 or from some function it calls and gets passed an address
3259 of such a variable. */
3260 if (any_addressable_vars < 0)
3262 gimple parallel_stmt = last_stmt (region->entry);
3263 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3264 tree local_decls, block, decl;
3265 unsigned ix;
3267 any_addressable_vars = 0;
3268 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3269 if (TREE_ADDRESSABLE (decl))
3271 any_addressable_vars = 1;
3272 break;
3274 for (block = gimple_block (stmt);
3275 !any_addressable_vars
3276 && block
3277 && TREE_CODE (block) == BLOCK;
3278 block = BLOCK_SUPERCONTEXT (block))
3280 for (local_decls = BLOCK_VARS (block);
3281 local_decls;
3282 local_decls = DECL_CHAIN (local_decls))
3283 if (TREE_ADDRESSABLE (local_decls))
3285 any_addressable_vars = 1;
3286 break;
3288 if (block == gimple_block (parallel_stmt))
3289 break;
3292 if (!any_addressable_vars)
3293 gimple_omp_return_set_nowait (stmt);
3298 static void
3299 remove_exit_barriers (struct omp_region *region)
3301 if (region->type == GIMPLE_OMP_PARALLEL)
3302 remove_exit_barrier (region);
3304 if (region->inner)
3306 region = region->inner;
3307 remove_exit_barriers (region);
3308 while (region->next)
3310 region = region->next;
3311 remove_exit_barriers (region);
3316 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3317 calls. These can't be declared as const functions, but
3318 within one parallel body they are constant, so they can be
3319 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3320 which are declared const. Similarly for task body, except
3321 that in untied task omp_get_thread_num () can change at any task
3322 scheduling point. */
3324 static void
3325 optimize_omp_library_calls (gimple entry_stmt)
3327 basic_block bb;
3328 gimple_stmt_iterator gsi;
3329 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3330 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3331 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3332 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3333 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3334 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3335 OMP_CLAUSE_UNTIED) != NULL);
3337 FOR_EACH_BB (bb)
3338 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3340 gimple call = gsi_stmt (gsi);
3341 tree decl;
3343 if (is_gimple_call (call)
3344 && (decl = gimple_call_fndecl (call))
3345 && DECL_EXTERNAL (decl)
3346 && TREE_PUBLIC (decl)
3347 && DECL_INITIAL (decl) == NULL)
3349 tree built_in;
3351 if (DECL_NAME (decl) == thr_num_id)
3353 /* In #pragma omp task untied omp_get_thread_num () can change
3354 during the execution of the task region. */
3355 if (untied_task)
3356 continue;
3357 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3359 else if (DECL_NAME (decl) == num_thr_id)
3360 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3361 else
3362 continue;
3364 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3365 || gimple_call_num_args (call) != 0)
3366 continue;
3368 if (flag_exceptions && !TREE_NOTHROW (decl))
3369 continue;
3371 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3372 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3373 TREE_TYPE (TREE_TYPE (built_in))))
3374 continue;
3376 gimple_call_set_fndecl (call, built_in);
3381 /* Expand the OpenMP parallel or task directive starting at REGION. */
3383 static void
3384 expand_omp_taskreg (struct omp_region *region)
3386 basic_block entry_bb, exit_bb, new_bb;
3387 struct function *child_cfun;
3388 tree child_fn, block, t;
3389 gimple_stmt_iterator gsi;
3390 gimple entry_stmt, stmt;
3391 edge e;
3392 vec<tree, va_gc> *ws_args;
3394 entry_stmt = last_stmt (region->entry);
3395 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3396 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3398 entry_bb = region->entry;
3399 exit_bb = region->exit;
3401 if (is_combined_parallel (region))
3402 ws_args = region->ws_args;
3403 else
3404 ws_args = NULL;
3406 if (child_cfun->cfg)
3408 /* Due to inlining, it may happen that we have already outlined
3409 the region, in which case all we need to do is make the
3410 sub-graph unreachable and emit the parallel call. */
3411 edge entry_succ_e, exit_succ_e;
3412 gimple_stmt_iterator gsi;
3414 entry_succ_e = single_succ_edge (entry_bb);
3416 gsi = gsi_last_bb (entry_bb);
3417 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3418 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3419 gsi_remove (&gsi, true);
3421 new_bb = entry_bb;
3422 if (exit_bb)
3424 exit_succ_e = single_succ_edge (exit_bb);
3425 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3427 remove_edge_and_dominated_blocks (entry_succ_e);
3429 else
3431 unsigned srcidx, dstidx, num;
3433 /* If the parallel region needs data sent from the parent
3434 function, then the very first statement (except possible
3435 tree profile counter updates) of the parallel body
3436 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3437 &.OMP_DATA_O is passed as an argument to the child function,
3438 we need to replace it with the argument as seen by the child
3439 function.
3441 In most cases, this will end up being the identity assignment
3442 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3443 a function call that has been inlined, the original PARM_DECL
3444 .OMP_DATA_I may have been converted into a different local
3445 variable. In which case, we need to keep the assignment. */
3446 if (gimple_omp_taskreg_data_arg (entry_stmt))
3448 basic_block entry_succ_bb = single_succ (entry_bb);
3449 gimple_stmt_iterator gsi;
3450 tree arg, narg;
3451 gimple parcopy_stmt = NULL;
3453 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3455 gimple stmt;
3457 gcc_assert (!gsi_end_p (gsi));
3458 stmt = gsi_stmt (gsi);
3459 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3460 continue;
3462 if (gimple_num_ops (stmt) == 2)
3464 tree arg = gimple_assign_rhs1 (stmt);
3466 /* We're ignore the subcode because we're
3467 effectively doing a STRIP_NOPS. */
3469 if (TREE_CODE (arg) == ADDR_EXPR
3470 && TREE_OPERAND (arg, 0)
3471 == gimple_omp_taskreg_data_arg (entry_stmt))
3473 parcopy_stmt = stmt;
3474 break;
3479 gcc_assert (parcopy_stmt != NULL);
3480 arg = DECL_ARGUMENTS (child_fn);
3482 if (!gimple_in_ssa_p (cfun))
3484 if (gimple_assign_lhs (parcopy_stmt) == arg)
3485 gsi_remove (&gsi, true);
3486 else
3488 /* ?? Is setting the subcode really necessary ?? */
3489 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3490 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3493 else
3495 /* If we are in ssa form, we must load the value from the default
3496 definition of the argument. That should not be defined now,
3497 since the argument is not used uninitialized. */
3498 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3499 narg = make_ssa_name (arg, gimple_build_nop ());
3500 set_ssa_default_def (cfun, arg, narg);
3501 /* ?? Is setting the subcode really necessary ?? */
3502 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3503 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3504 update_stmt (parcopy_stmt);
3508 /* Declare local variables needed in CHILD_CFUN. */
3509 block = DECL_INITIAL (child_fn);
3510 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3511 /* The gimplifier could record temporaries in parallel/task block
3512 rather than in containing function's local_decls chain,
3513 which would mean cgraph missed finalizing them. Do it now. */
3514 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3515 if (TREE_CODE (t) == VAR_DECL
3516 && TREE_STATIC (t)
3517 && !DECL_EXTERNAL (t))
3518 varpool_finalize_decl (t);
3519 DECL_SAVED_TREE (child_fn) = NULL;
3520 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3521 gimple_set_body (child_fn, NULL);
3522 TREE_USED (block) = 1;
3524 /* Reset DECL_CONTEXT on function arguments. */
3525 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3526 DECL_CONTEXT (t) = child_fn;
3528 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3529 so that it can be moved to the child function. */
3530 gsi = gsi_last_bb (entry_bb);
3531 stmt = gsi_stmt (gsi);
3532 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3533 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3534 gsi_remove (&gsi, true);
3535 e = split_block (entry_bb, stmt);
3536 entry_bb = e->dest;
3537 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3539 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3540 if (exit_bb)
3542 gsi = gsi_last_bb (exit_bb);
3543 gcc_assert (!gsi_end_p (gsi)
3544 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3545 stmt = gimple_build_return (NULL);
3546 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3547 gsi_remove (&gsi, true);
3550 /* Move the parallel region into CHILD_CFUN. */
3552 if (gimple_in_ssa_p (cfun))
3554 init_tree_ssa (child_cfun);
3555 init_ssa_operands (child_cfun);
3556 child_cfun->gimple_df->in_ssa_p = true;
3557 block = NULL_TREE;
3559 else
3560 block = gimple_block (entry_stmt);
3562 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3563 if (exit_bb)
3564 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3566 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3567 num = vec_safe_length (child_cfun->local_decls);
3568 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3570 t = (*child_cfun->local_decls)[srcidx];
3571 if (DECL_CONTEXT (t) == cfun->decl)
3572 continue;
3573 if (srcidx != dstidx)
3574 (*child_cfun->local_decls)[dstidx] = t;
3575 dstidx++;
3577 if (dstidx != num)
3578 vec_safe_truncate (child_cfun->local_decls, dstidx);
3580 /* Inform the callgraph about the new function. */
3581 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3582 = cfun->curr_properties & ~PROP_loops;
3583 cgraph_add_new_function (child_fn, true);
3585 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3586 fixed in a following pass. */
3587 push_cfun (child_cfun);
3588 if (optimize)
3589 optimize_omp_library_calls (entry_stmt);
3590 rebuild_cgraph_edges ();
3592 /* Some EH regions might become dead, see PR34608. If
3593 pass_cleanup_cfg isn't the first pass to happen with the
3594 new child, these dead EH edges might cause problems.
3595 Clean them up now. */
3596 if (flag_exceptions)
3598 basic_block bb;
3599 bool changed = false;
3601 FOR_EACH_BB (bb)
3602 changed |= gimple_purge_dead_eh_edges (bb);
3603 if (changed)
3604 cleanup_tree_cfg ();
3606 if (gimple_in_ssa_p (cfun))
3607 update_ssa (TODO_update_ssa);
3608 pop_cfun ();
3611 /* Emit a library call to launch the children threads. */
3612 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3613 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3614 else
3615 expand_task_call (new_bb, entry_stmt);
3616 if (gimple_in_ssa_p (cfun))
3617 update_ssa (TODO_update_ssa_only_virtuals);
3621 /* A subroutine of expand_omp_for. Generate code for a parallel
3622 loop with any schedule. Given parameters:
3624 for (V = N1; V cond N2; V += STEP) BODY;
3626 where COND is "<" or ">", we generate pseudocode
3628 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3629 if (more) goto L0; else goto L3;
3631 V = istart0;
3632 iend = iend0;
3634 BODY;
3635 V += STEP;
3636 if (V cond iend) goto L1; else goto L2;
3638 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3641 If this is a combined omp parallel loop, instead of the call to
3642 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3644 For collapsed loops, given parameters:
3645 collapse(3)
3646 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3647 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3648 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3649 BODY;
3651 we generate pseudocode
3653 if (cond3 is <)
3654 adj = STEP3 - 1;
3655 else
3656 adj = STEP3 + 1;
3657 count3 = (adj + N32 - N31) / STEP3;
3658 if (cond2 is <)
3659 adj = STEP2 - 1;
3660 else
3661 adj = STEP2 + 1;
3662 count2 = (adj + N22 - N21) / STEP2;
3663 if (cond1 is <)
3664 adj = STEP1 - 1;
3665 else
3666 adj = STEP1 + 1;
3667 count1 = (adj + N12 - N11) / STEP1;
3668 count = count1 * count2 * count3;
3669 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3670 if (more) goto L0; else goto L3;
3672 V = istart0;
3673 T = V;
3674 V3 = N31 + (T % count3) * STEP3;
3675 T = T / count3;
3676 V2 = N21 + (T % count2) * STEP2;
3677 T = T / count2;
3678 V1 = N11 + T * STEP1;
3679 iend = iend0;
3681 BODY;
3682 V += 1;
3683 if (V < iend) goto L10; else goto L2;
3684 L10:
3685 V3 += STEP3;
3686 if (V3 cond3 N32) goto L1; else goto L11;
3687 L11:
3688 V3 = N31;
3689 V2 += STEP2;
3690 if (V2 cond2 N22) goto L1; else goto L12;
3691 L12:
3692 V2 = N21;
3693 V1 += STEP1;
3694 goto L1;
3696 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3701 static void
3702 expand_omp_for_generic (struct omp_region *region,
3703 struct omp_for_data *fd,
3704 enum built_in_function start_fn,
3705 enum built_in_function next_fn)
3707 tree type, istart0, iend0, iend;
3708 tree t, vmain, vback, bias = NULL_TREE;
3709 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3710 basic_block l2_bb = NULL, l3_bb = NULL;
3711 gimple_stmt_iterator gsi;
3712 gimple stmt;
3713 bool in_combined_parallel = is_combined_parallel (region);
3714 bool broken_loop = region->cont == NULL;
3715 edge e, ne;
3716 tree *counts = NULL;
3717 int i;
3719 gcc_assert (!broken_loop || !in_combined_parallel);
3720 gcc_assert (fd->iter_type == long_integer_type_node
3721 || !in_combined_parallel);
3723 type = TREE_TYPE (fd->loop.v);
3724 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3725 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3726 TREE_ADDRESSABLE (istart0) = 1;
3727 TREE_ADDRESSABLE (iend0) = 1;
3729 /* See if we need to bias by LLONG_MIN. */
3730 if (fd->iter_type == long_long_unsigned_type_node
3731 && TREE_CODE (type) == INTEGER_TYPE
3732 && !TYPE_UNSIGNED (type))
3734 tree n1, n2;
3736 if (fd->loop.cond_code == LT_EXPR)
3738 n1 = fd->loop.n1;
3739 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3741 else
3743 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3744 n2 = fd->loop.n1;
3746 if (TREE_CODE (n1) != INTEGER_CST
3747 || TREE_CODE (n2) != INTEGER_CST
3748 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3749 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3752 entry_bb = region->entry;
3753 cont_bb = region->cont;
3754 collapse_bb = NULL;
3755 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3756 gcc_assert (broken_loop
3757 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3758 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3759 l1_bb = single_succ (l0_bb);
3760 if (!broken_loop)
3762 l2_bb = create_empty_bb (cont_bb);
3763 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3764 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3766 else
3767 l2_bb = NULL;
3768 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3769 exit_bb = region->exit;
3771 gsi = gsi_last_bb (entry_bb);
3773 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3774 if (fd->collapse > 1)
3776 /* collapsed loops need work for expansion in SSA form. */
3777 gcc_assert (!gimple_in_ssa_p (cfun));
3778 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3779 for (i = 0; i < fd->collapse; i++)
3781 tree itype = TREE_TYPE (fd->loops[i].v);
3783 if (POINTER_TYPE_P (itype))
3784 itype = signed_type_for (itype);
3785 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3786 ? -1 : 1));
3787 t = fold_build2 (PLUS_EXPR, itype,
3788 fold_convert (itype, fd->loops[i].step), t);
3789 t = fold_build2 (PLUS_EXPR, itype, t,
3790 fold_convert (itype, fd->loops[i].n2));
3791 t = fold_build2 (MINUS_EXPR, itype, t,
3792 fold_convert (itype, fd->loops[i].n1));
3793 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3794 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3795 fold_build1 (NEGATE_EXPR, itype, t),
3796 fold_build1 (NEGATE_EXPR, itype,
3797 fold_convert (itype,
3798 fd->loops[i].step)));
3799 else
3800 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3801 fold_convert (itype, fd->loops[i].step));
3802 t = fold_convert (type, t);
3803 if (TREE_CODE (t) == INTEGER_CST)
3804 counts[i] = t;
3805 else
3807 counts[i] = create_tmp_reg (type, ".count");
3808 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3809 true, GSI_SAME_STMT);
3810 stmt = gimple_build_assign (counts[i], t);
3811 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3813 if (SSA_VAR_P (fd->loop.n2))
3815 if (i == 0)
3816 t = counts[0];
3817 else
3819 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3820 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3821 true, GSI_SAME_STMT);
3823 stmt = gimple_build_assign (fd->loop.n2, t);
3824 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3828 if (in_combined_parallel)
3830 /* In a combined parallel loop, emit a call to
3831 GOMP_loop_foo_next. */
3832 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3833 build_fold_addr_expr (istart0),
3834 build_fold_addr_expr (iend0));
3836 else
3838 tree t0, t1, t2, t3, t4;
3839 /* If this is not a combined parallel loop, emit a call to
3840 GOMP_loop_foo_start in ENTRY_BB. */
3841 t4 = build_fold_addr_expr (iend0);
3842 t3 = build_fold_addr_expr (istart0);
3843 t2 = fold_convert (fd->iter_type, fd->loop.step);
3844 if (POINTER_TYPE_P (type)
3845 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3847 /* Avoid casting pointers to integer of a different size. */
3848 tree itype = signed_type_for (type);
3849 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3850 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3852 else
3854 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3855 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3857 if (bias)
3859 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3860 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3862 if (fd->iter_type == long_integer_type_node)
3864 if (fd->chunk_size)
3866 t = fold_convert (fd->iter_type, fd->chunk_size);
3867 t = build_call_expr (builtin_decl_explicit (start_fn),
3868 6, t0, t1, t2, t, t3, t4);
3870 else
3871 t = build_call_expr (builtin_decl_explicit (start_fn),
3872 5, t0, t1, t2, t3, t4);
3874 else
3876 tree t5;
3877 tree c_bool_type;
3878 tree bfn_decl;
3880 /* The GOMP_loop_ull_*start functions have additional boolean
3881 argument, true for < loops and false for > loops.
3882 In Fortran, the C bool type can be different from
3883 boolean_type_node. */
3884 bfn_decl = builtin_decl_explicit (start_fn);
3885 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3886 t5 = build_int_cst (c_bool_type,
3887 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3888 if (fd->chunk_size)
3890 tree bfn_decl = builtin_decl_explicit (start_fn);
3891 t = fold_convert (fd->iter_type, fd->chunk_size);
3892 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3894 else
3895 t = build_call_expr (builtin_decl_explicit (start_fn),
3896 6, t5, t0, t1, t2, t3, t4);
3899 if (TREE_TYPE (t) != boolean_type_node)
3900 t = fold_build2 (NE_EXPR, boolean_type_node,
3901 t, build_int_cst (TREE_TYPE (t), 0));
3902 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3903 true, GSI_SAME_STMT);
3904 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3906 /* Remove the GIMPLE_OMP_FOR statement. */
3907 gsi_remove (&gsi, true);
3909 /* Iteration setup for sequential loop goes in L0_BB. */
3910 gsi = gsi_start_bb (l0_bb);
3911 t = istart0;
3912 if (bias)
3913 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3914 if (POINTER_TYPE_P (type))
3915 t = fold_convert (signed_type_for (type), t);
3916 t = fold_convert (type, t);
3917 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3918 false, GSI_CONTINUE_LINKING);
3919 stmt = gimple_build_assign (fd->loop.v, t);
3920 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3922 t = iend0;
3923 if (bias)
3924 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3925 if (POINTER_TYPE_P (type))
3926 t = fold_convert (signed_type_for (type), t);
3927 t = fold_convert (type, t);
3928 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3929 false, GSI_CONTINUE_LINKING);
3930 if (fd->collapse > 1)
3932 tree tem = create_tmp_reg (type, ".tem");
3933 stmt = gimple_build_assign (tem, fd->loop.v);
3934 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3935 for (i = fd->collapse - 1; i >= 0; i--)
3937 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3938 itype = vtype;
3939 if (POINTER_TYPE_P (vtype))
3940 itype = signed_type_for (vtype);
3941 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3942 t = fold_convert (itype, t);
3943 t = fold_build2 (MULT_EXPR, itype, t,
3944 fold_convert (itype, fd->loops[i].step));
3945 if (POINTER_TYPE_P (vtype))
3946 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3947 else
3948 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3949 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3950 false, GSI_CONTINUE_LINKING);
3951 stmt = gimple_build_assign (fd->loops[i].v, t);
3952 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3953 if (i != 0)
3955 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3956 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3957 false, GSI_CONTINUE_LINKING);
3958 stmt = gimple_build_assign (tem, t);
3959 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3964 if (!broken_loop)
3966 /* Code to control the increment and predicate for the sequential
3967 loop goes in the CONT_BB. */
3968 gsi = gsi_last_bb (cont_bb);
3969 stmt = gsi_stmt (gsi);
3970 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3971 vmain = gimple_omp_continue_control_use (stmt);
3972 vback = gimple_omp_continue_control_def (stmt);
3974 if (POINTER_TYPE_P (type))
3975 t = fold_build_pointer_plus (vmain, fd->loop.step);
3976 else
3977 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3978 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3979 true, GSI_SAME_STMT);
3980 stmt = gimple_build_assign (vback, t);
3981 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3983 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3984 stmt = gimple_build_cond_empty (t);
3985 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3987 /* Remove GIMPLE_OMP_CONTINUE. */
3988 gsi_remove (&gsi, true);
3990 if (fd->collapse > 1)
3992 basic_block last_bb, bb;
3994 last_bb = cont_bb;
3995 for (i = fd->collapse - 1; i >= 0; i--)
3997 tree vtype = TREE_TYPE (fd->loops[i].v);
3999 bb = create_empty_bb (last_bb);
4000 gsi = gsi_start_bb (bb);
4002 if (i < fd->collapse - 1)
4004 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4005 e->probability = REG_BR_PROB_BASE / 8;
4007 t = fd->loops[i + 1].n1;
4008 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4009 false, GSI_CONTINUE_LINKING);
4010 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4013 else
4014 collapse_bb = bb;
4016 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4018 if (POINTER_TYPE_P (vtype))
4019 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4020 else
4021 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4022 fd->loops[i].step);
4023 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4024 false, GSI_CONTINUE_LINKING);
4025 stmt = gimple_build_assign (fd->loops[i].v, t);
4026 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4028 if (i > 0)
4030 t = fd->loops[i].n2;
4031 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4032 false, GSI_CONTINUE_LINKING);
4033 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4034 fd->loops[i].v, t);
4035 stmt = gimple_build_cond_empty (t);
4036 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4037 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4038 e->probability = REG_BR_PROB_BASE * 7 / 8;
4040 else
4041 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4042 last_bb = bb;
4046 /* Emit code to get the next parallel iteration in L2_BB. */
4047 gsi = gsi_start_bb (l2_bb);
4049 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4050 build_fold_addr_expr (istart0),
4051 build_fold_addr_expr (iend0));
4052 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4053 false, GSI_CONTINUE_LINKING);
4054 if (TREE_TYPE (t) != boolean_type_node)
4055 t = fold_build2 (NE_EXPR, boolean_type_node,
4056 t, build_int_cst (TREE_TYPE (t), 0));
4057 stmt = gimple_build_cond_empty (t);
4058 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4061 /* Add the loop cleanup function. */
4062 gsi = gsi_last_bb (exit_bb);
4063 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4064 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4065 else
4066 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4067 stmt = gimple_build_call (t, 0);
4068 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4069 gsi_remove (&gsi, true);
4071 /* Connect the new blocks. */
4072 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4073 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4075 if (!broken_loop)
4077 gimple_seq phis;
4079 e = find_edge (cont_bb, l3_bb);
4080 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4082 phis = phi_nodes (l3_bb);
4083 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4085 gimple phi = gsi_stmt (gsi);
4086 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4087 PHI_ARG_DEF_FROM_EDGE (phi, e));
4089 remove_edge (e);
4091 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4092 if (fd->collapse > 1)
4094 e = find_edge (cont_bb, l1_bb);
4095 remove_edge (e);
4096 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4098 else
4100 e = find_edge (cont_bb, l1_bb);
4101 e->flags = EDGE_TRUE_VALUE;
4103 e->probability = REG_BR_PROB_BASE * 7 / 8;
4104 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4105 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4107 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4108 recompute_dominator (CDI_DOMINATORS, l2_bb));
4109 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4110 recompute_dominator (CDI_DOMINATORS, l3_bb));
4111 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4112 recompute_dominator (CDI_DOMINATORS, l0_bb));
4113 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4114 recompute_dominator (CDI_DOMINATORS, l1_bb));
4119 /* A subroutine of expand_omp_for. Generate code for a parallel
4120 loop with static schedule and no specified chunk size. Given
4121 parameters:
4123 for (V = N1; V cond N2; V += STEP) BODY;
4125 where COND is "<" or ">", we generate pseudocode
4127 if (cond is <)
4128 adj = STEP - 1;
4129 else
4130 adj = STEP + 1;
4131 if ((__typeof (V)) -1 > 0 && cond is >)
4132 n = -(adj + N2 - N1) / -STEP;
4133 else
4134 n = (adj + N2 - N1) / STEP;
4135 q = n / nthreads;
4136 tt = n % nthreads;
4137 if (threadid < tt) goto L3; else goto L4;
4139 tt = 0;
4140 q = q + 1;
4142 s0 = q * threadid + tt;
4143 e0 = s0 + q;
4144 V = s0 * STEP + N1;
4145 if (s0 >= e0) goto L2; else goto L0;
4147 e = e0 * STEP + N1;
4149 BODY;
4150 V += STEP;
4151 if (V cond e) goto L1;
4155 static void
4156 expand_omp_for_static_nochunk (struct omp_region *region,
4157 struct omp_for_data *fd)
4159 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4160 tree type, itype, vmain, vback;
4161 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4162 basic_block body_bb, cont_bb;
4163 basic_block fin_bb;
4164 gimple_stmt_iterator gsi;
4165 gimple stmt;
4166 edge ep;
4168 itype = type = TREE_TYPE (fd->loop.v);
4169 if (POINTER_TYPE_P (type))
4170 itype = signed_type_for (type);
4172 entry_bb = region->entry;
4173 cont_bb = region->cont;
4174 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4175 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4176 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4177 body_bb = single_succ (seq_start_bb);
4178 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4179 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4180 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4181 exit_bb = region->exit;
4183 /* Iteration space partitioning goes in ENTRY_BB. */
4184 gsi = gsi_last_bb (entry_bb);
4185 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4187 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4188 t = fold_convert (itype, t);
4189 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4190 true, GSI_SAME_STMT);
4192 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4193 t = fold_convert (itype, t);
4194 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4195 true, GSI_SAME_STMT);
4197 fd->loop.n1
4198 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4199 true, NULL_TREE, true, GSI_SAME_STMT);
4200 fd->loop.n2
4201 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4202 true, NULL_TREE, true, GSI_SAME_STMT);
4203 fd->loop.step
4204 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4205 true, NULL_TREE, true, GSI_SAME_STMT);
4207 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4208 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4209 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4210 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4211 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4212 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4213 fold_build1 (NEGATE_EXPR, itype, t),
4214 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4215 else
4216 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4217 t = fold_convert (itype, t);
4218 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4220 q = create_tmp_reg (itype, "q");
4221 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4222 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4223 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4225 tt = create_tmp_reg (itype, "tt");
4226 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4227 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4228 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4230 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4231 stmt = gimple_build_cond_empty (t);
4232 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4234 second_bb = split_block (entry_bb, stmt)->dest;
4235 gsi = gsi_last_bb (second_bb);
4236 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4238 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4239 GSI_SAME_STMT);
4240 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4241 build_int_cst (itype, 1));
4242 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4244 third_bb = split_block (second_bb, stmt)->dest;
4245 gsi = gsi_last_bb (third_bb);
4246 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4248 t = build2 (MULT_EXPR, itype, q, threadid);
4249 t = build2 (PLUS_EXPR, itype, t, tt);
4250 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4252 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4253 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4255 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4256 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4258 /* Remove the GIMPLE_OMP_FOR statement. */
4259 gsi_remove (&gsi, true);
4261 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4262 gsi = gsi_start_bb (seq_start_bb);
4264 t = fold_convert (itype, s0);
4265 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4266 if (POINTER_TYPE_P (type))
4267 t = fold_build_pointer_plus (fd->loop.n1, t);
4268 else
4269 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4270 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4271 false, GSI_CONTINUE_LINKING);
4272 stmt = gimple_build_assign (fd->loop.v, t);
4273 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4275 t = fold_convert (itype, e0);
4276 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4277 if (POINTER_TYPE_P (type))
4278 t = fold_build_pointer_plus (fd->loop.n1, t);
4279 else
4280 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4281 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4282 false, GSI_CONTINUE_LINKING);
4284 /* The code controlling the sequential loop replaces the
4285 GIMPLE_OMP_CONTINUE. */
4286 gsi = gsi_last_bb (cont_bb);
4287 stmt = gsi_stmt (gsi);
4288 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4289 vmain = gimple_omp_continue_control_use (stmt);
4290 vback = gimple_omp_continue_control_def (stmt);
4292 if (POINTER_TYPE_P (type))
4293 t = fold_build_pointer_plus (vmain, fd->loop.step);
4294 else
4295 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4296 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4297 true, GSI_SAME_STMT);
4298 stmt = gimple_build_assign (vback, t);
4299 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4301 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4302 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4304 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4305 gsi_remove (&gsi, true);
4307 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4308 gsi = gsi_last_bb (exit_bb);
4309 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4310 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4311 false, GSI_SAME_STMT);
4312 gsi_remove (&gsi, true);
4314 /* Connect all the blocks. */
4315 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4316 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4317 ep = find_edge (entry_bb, second_bb);
4318 ep->flags = EDGE_TRUE_VALUE;
4319 ep->probability = REG_BR_PROB_BASE / 4;
4320 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4321 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4323 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4324 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4326 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4327 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4328 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4329 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4330 recompute_dominator (CDI_DOMINATORS, body_bb));
4331 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4332 recompute_dominator (CDI_DOMINATORS, fin_bb));
4336 /* A subroutine of expand_omp_for. Generate code for a parallel
4337 loop with static schedule and a specified chunk size. Given
4338 parameters:
4340 for (V = N1; V cond N2; V += STEP) BODY;
4342 where COND is "<" or ">", we generate pseudocode
4344 if (cond is <)
4345 adj = STEP - 1;
4346 else
4347 adj = STEP + 1;
4348 if ((__typeof (V)) -1 > 0 && cond is >)
4349 n = -(adj + N2 - N1) / -STEP;
4350 else
4351 n = (adj + N2 - N1) / STEP;
4352 trip = 0;
4353 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4354 here so that V is defined
4355 if the loop is not entered
4357 s0 = (trip * nthreads + threadid) * CHUNK;
4358 e0 = min(s0 + CHUNK, n);
4359 if (s0 < n) goto L1; else goto L4;
4361 V = s0 * STEP + N1;
4362 e = e0 * STEP + N1;
4364 BODY;
4365 V += STEP;
4366 if (V cond e) goto L2; else goto L3;
4368 trip += 1;
4369 goto L0;
4373 static void
4374 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4376 tree n, s0, e0, e, t;
4377 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4378 tree type, itype, v_main, v_back, v_extra;
4379 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4380 basic_block trip_update_bb, cont_bb, fin_bb;
4381 gimple_stmt_iterator si;
4382 gimple stmt;
4383 edge se;
4385 itype = type = TREE_TYPE (fd->loop.v);
4386 if (POINTER_TYPE_P (type))
4387 itype = signed_type_for (type);
4389 entry_bb = region->entry;
4390 se = split_block (entry_bb, last_stmt (entry_bb));
4391 entry_bb = se->src;
4392 iter_part_bb = se->dest;
4393 cont_bb = region->cont;
4394 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4395 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4396 == FALLTHRU_EDGE (cont_bb)->dest);
4397 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4398 body_bb = single_succ (seq_start_bb);
4399 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4400 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4401 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4402 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4403 exit_bb = region->exit;
4405 /* Trip and adjustment setup goes in ENTRY_BB. */
4406 si = gsi_last_bb (entry_bb);
4407 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4409 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4410 t = fold_convert (itype, t);
4411 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4412 true, GSI_SAME_STMT);
4414 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4415 t = fold_convert (itype, t);
4416 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4417 true, GSI_SAME_STMT);
4419 fd->loop.n1
4420 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4421 true, NULL_TREE, true, GSI_SAME_STMT);
4422 fd->loop.n2
4423 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4424 true, NULL_TREE, true, GSI_SAME_STMT);
4425 fd->loop.step
4426 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4427 true, NULL_TREE, true, GSI_SAME_STMT);
4428 fd->chunk_size
4429 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4430 true, NULL_TREE, true, GSI_SAME_STMT);
4432 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4433 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4434 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4435 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4436 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4437 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4438 fold_build1 (NEGATE_EXPR, itype, t),
4439 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4440 else
4441 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4442 t = fold_convert (itype, t);
4443 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4444 true, GSI_SAME_STMT);
4446 trip_var = create_tmp_reg (itype, ".trip");
4447 if (gimple_in_ssa_p (cfun))
4449 trip_init = make_ssa_name (trip_var, NULL);
4450 trip_main = make_ssa_name (trip_var, NULL);
4451 trip_back = make_ssa_name (trip_var, NULL);
4453 else
4455 trip_init = trip_var;
4456 trip_main = trip_var;
4457 trip_back = trip_var;
4460 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4461 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4463 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4464 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4465 if (POINTER_TYPE_P (type))
4466 t = fold_build_pointer_plus (fd->loop.n1, t);
4467 else
4468 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4469 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4470 true, GSI_SAME_STMT);
4472 /* Remove the GIMPLE_OMP_FOR. */
4473 gsi_remove (&si, true);
4475 /* Iteration space partitioning goes in ITER_PART_BB. */
4476 si = gsi_last_bb (iter_part_bb);
4478 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4479 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4480 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4481 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4482 false, GSI_CONTINUE_LINKING);
4484 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4485 t = fold_build2 (MIN_EXPR, itype, t, n);
4486 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4487 false, GSI_CONTINUE_LINKING);
4489 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4490 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4492 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4493 si = gsi_start_bb (seq_start_bb);
4495 t = fold_convert (itype, s0);
4496 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4497 if (POINTER_TYPE_P (type))
4498 t = fold_build_pointer_plus (fd->loop.n1, t);
4499 else
4500 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4501 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4502 false, GSI_CONTINUE_LINKING);
4503 stmt = gimple_build_assign (fd->loop.v, t);
4504 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4506 t = fold_convert (itype, e0);
4507 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4508 if (POINTER_TYPE_P (type))
4509 t = fold_build_pointer_plus (fd->loop.n1, t);
4510 else
4511 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4512 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4513 false, GSI_CONTINUE_LINKING);
4515 /* The code controlling the sequential loop goes in CONT_BB,
4516 replacing the GIMPLE_OMP_CONTINUE. */
4517 si = gsi_last_bb (cont_bb);
4518 stmt = gsi_stmt (si);
4519 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4520 v_main = gimple_omp_continue_control_use (stmt);
4521 v_back = gimple_omp_continue_control_def (stmt);
4523 if (POINTER_TYPE_P (type))
4524 t = fold_build_pointer_plus (v_main, fd->loop.step);
4525 else
4526 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4527 stmt = gimple_build_assign (v_back, t);
4528 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4530 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4531 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4533 /* Remove GIMPLE_OMP_CONTINUE. */
4534 gsi_remove (&si, true);
4536 /* Trip update code goes into TRIP_UPDATE_BB. */
4537 si = gsi_start_bb (trip_update_bb);
4539 t = build_int_cst (itype, 1);
4540 t = build2 (PLUS_EXPR, itype, trip_main, t);
4541 stmt = gimple_build_assign (trip_back, t);
4542 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4544 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4545 si = gsi_last_bb (exit_bb);
4546 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4547 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4548 false, GSI_SAME_STMT);
4549 gsi_remove (&si, true);
4551 /* Connect the new blocks. */
4552 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4553 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4555 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4556 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4558 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4560 if (gimple_in_ssa_p (cfun))
4562 gimple_stmt_iterator psi;
4563 gimple phi;
4564 edge re, ene;
4565 edge_var_map_vector *head;
4566 edge_var_map *vm;
4567 size_t i;
4569 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4570 remove arguments of the phi nodes in fin_bb. We need to create
4571 appropriate phi nodes in iter_part_bb instead. */
4572 se = single_pred_edge (fin_bb);
4573 re = single_succ_edge (trip_update_bb);
4574 head = redirect_edge_var_map_vector (re);
4575 ene = single_succ_edge (entry_bb);
4577 psi = gsi_start_phis (fin_bb);
4578 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4579 gsi_next (&psi), ++i)
4581 gimple nphi;
4582 source_location locus;
4584 phi = gsi_stmt (psi);
4585 t = gimple_phi_result (phi);
4586 gcc_assert (t == redirect_edge_var_map_result (vm));
4587 nphi = create_phi_node (t, iter_part_bb);
4589 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4590 locus = gimple_phi_arg_location_from_edge (phi, se);
4592 /* A special case -- fd->loop.v is not yet computed in
4593 iter_part_bb, we need to use v_extra instead. */
4594 if (t == fd->loop.v)
4595 t = v_extra;
4596 add_phi_arg (nphi, t, ene, locus);
4597 locus = redirect_edge_var_map_location (vm);
4598 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4600 gcc_assert (!gsi_end_p (psi) && i == head->length ());
4601 redirect_edge_var_map_clear (re);
4602 while (1)
4604 psi = gsi_start_phis (fin_bb);
4605 if (gsi_end_p (psi))
4606 break;
4607 remove_phi_node (&psi, false);
4610 /* Make phi node for trip. */
4611 phi = create_phi_node (trip_main, iter_part_bb);
4612 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4613 UNKNOWN_LOCATION);
4614 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4615 UNKNOWN_LOCATION);
4618 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4619 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4620 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4621 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4622 recompute_dominator (CDI_DOMINATORS, fin_bb));
4623 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4624 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4625 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4626 recompute_dominator (CDI_DOMINATORS, body_bb));
4630 /* Expand the OpenMP loop defined by REGION. */
4632 static void
4633 expand_omp_for (struct omp_region *region)
4635 struct omp_for_data fd;
4636 struct omp_for_data_loop *loops;
4638 loops
4639 = (struct omp_for_data_loop *)
4640 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4641 * sizeof (struct omp_for_data_loop));
4642 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4643 region->sched_kind = fd.sched_kind;
4645 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4646 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4647 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4648 if (region->cont)
4650 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4651 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4652 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4655 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4656 && !fd.have_ordered
4657 && fd.collapse == 1
4658 && region->cont != NULL)
4660 if (fd.chunk_size == NULL)
4661 expand_omp_for_static_nochunk (region, &fd);
4662 else
4663 expand_omp_for_static_chunk (region, &fd);
4665 else
4667 int fn_index, start_ix, next_ix;
4669 if (fd.chunk_size == NULL
4670 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4671 fd.chunk_size = integer_zero_node;
4672 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4673 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4674 ? 3 : fd.sched_kind;
4675 fn_index += fd.have_ordered * 4;
4676 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4677 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4678 if (fd.iter_type == long_long_unsigned_type_node)
4680 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4681 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4682 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4683 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4685 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4686 (enum built_in_function) next_ix);
4689 if (gimple_in_ssa_p (cfun))
4690 update_ssa (TODO_update_ssa_only_virtuals);
4694 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4696 v = GOMP_sections_start (n);
4698 switch (v)
4700 case 0:
4701 goto L2;
4702 case 1:
4703 section 1;
4704 goto L1;
4705 case 2:
4707 case n:
4709 default:
4710 abort ();
4713 v = GOMP_sections_next ();
4714 goto L0;
4716 reduction;
4718 If this is a combined parallel sections, replace the call to
4719 GOMP_sections_start with call to GOMP_sections_next. */
4721 static void
4722 expand_omp_sections (struct omp_region *region)
4724 tree t, u, vin = NULL, vmain, vnext, l2;
4725 vec<tree> label_vec;
4726 unsigned len;
4727 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4728 gimple_stmt_iterator si, switch_si;
4729 gimple sections_stmt, stmt, cont;
4730 edge_iterator ei;
4731 edge e;
4732 struct omp_region *inner;
4733 unsigned i, casei;
4734 bool exit_reachable = region->cont != NULL;
4736 gcc_assert (region->exit != NULL);
4737 entry_bb = region->entry;
4738 l0_bb = single_succ (entry_bb);
4739 l1_bb = region->cont;
4740 l2_bb = region->exit;
4741 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4742 l2 = gimple_block_label (l2_bb);
4743 else
4745 /* This can happen if there are reductions. */
4746 len = EDGE_COUNT (l0_bb->succs);
4747 gcc_assert (len > 0);
4748 e = EDGE_SUCC (l0_bb, len - 1);
4749 si = gsi_last_bb (e->dest);
4750 l2 = NULL_TREE;
4751 if (gsi_end_p (si)
4752 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4753 l2 = gimple_block_label (e->dest);
4754 else
4755 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4757 si = gsi_last_bb (e->dest);
4758 if (gsi_end_p (si)
4759 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4761 l2 = gimple_block_label (e->dest);
4762 break;
4766 if (exit_reachable)
4767 default_bb = create_empty_bb (l1_bb->prev_bb);
4768 else
4769 default_bb = create_empty_bb (l0_bb);
4771 /* We will build a switch() with enough cases for all the
4772 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4773 and a default case to abort if something goes wrong. */
4774 len = EDGE_COUNT (l0_bb->succs);
4776 /* Use vec::quick_push on label_vec throughout, since we know the size
4777 in advance. */
4778 label_vec.create (len);
4780 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4781 GIMPLE_OMP_SECTIONS statement. */
4782 si = gsi_last_bb (entry_bb);
4783 sections_stmt = gsi_stmt (si);
4784 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4785 vin = gimple_omp_sections_control (sections_stmt);
4786 if (!is_combined_parallel (region))
4788 /* If we are not inside a combined parallel+sections region,
4789 call GOMP_sections_start. */
4790 t = build_int_cst (unsigned_type_node,
4791 exit_reachable ? len - 1 : len);
4792 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4793 stmt = gimple_build_call (u, 1, t);
4795 else
4797 /* Otherwise, call GOMP_sections_next. */
4798 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4799 stmt = gimple_build_call (u, 0);
4801 gimple_call_set_lhs (stmt, vin);
4802 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4803 gsi_remove (&si, true);
4805 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4806 L0_BB. */
4807 switch_si = gsi_last_bb (l0_bb);
4808 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4809 if (exit_reachable)
4811 cont = last_stmt (l1_bb);
4812 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4813 vmain = gimple_omp_continue_control_use (cont);
4814 vnext = gimple_omp_continue_control_def (cont);
4816 else
4818 vmain = vin;
4819 vnext = NULL_TREE;
4822 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4823 label_vec.quick_push (t);
4824 i = 1;
4826 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4827 for (inner = region->inner, casei = 1;
4828 inner;
4829 inner = inner->next, i++, casei++)
4831 basic_block s_entry_bb, s_exit_bb;
4833 /* Skip optional reduction region. */
4834 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4836 --i;
4837 --casei;
4838 continue;
4841 s_entry_bb = inner->entry;
4842 s_exit_bb = inner->exit;
4844 t = gimple_block_label (s_entry_bb);
4845 u = build_int_cst (unsigned_type_node, casei);
4846 u = build_case_label (u, NULL, t);
4847 label_vec.quick_push (u);
4849 si = gsi_last_bb (s_entry_bb);
4850 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4851 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4852 gsi_remove (&si, true);
4853 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4855 if (s_exit_bb == NULL)
4856 continue;
4858 si = gsi_last_bb (s_exit_bb);
4859 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4860 gsi_remove (&si, true);
4862 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4865 /* Error handling code goes in DEFAULT_BB. */
4866 t = gimple_block_label (default_bb);
4867 u = build_case_label (NULL, NULL, t);
4868 make_edge (l0_bb, default_bb, 0);
4870 stmt = gimple_build_switch (vmain, u, label_vec);
4871 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4872 gsi_remove (&switch_si, true);
4873 label_vec.release ();
4875 si = gsi_start_bb (default_bb);
4876 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4877 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4879 if (exit_reachable)
4881 tree bfn_decl;
4883 /* Code to get the next section goes in L1_BB. */
4884 si = gsi_last_bb (l1_bb);
4885 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4887 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4888 stmt = gimple_build_call (bfn_decl, 0);
4889 gimple_call_set_lhs (stmt, vnext);
4890 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4891 gsi_remove (&si, true);
4893 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4896 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4897 si = gsi_last_bb (l2_bb);
4898 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4899 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4900 else
4901 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4902 stmt = gimple_build_call (t, 0);
4903 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4904 gsi_remove (&si, true);
4906 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4910 /* Expand code for an OpenMP single directive. We've already expanded
4911 much of the code, here we simply place the GOMP_barrier call. */
4913 static void
4914 expand_omp_single (struct omp_region *region)
4916 basic_block entry_bb, exit_bb;
4917 gimple_stmt_iterator si;
4918 bool need_barrier = false;
4920 entry_bb = region->entry;
4921 exit_bb = region->exit;
4923 si = gsi_last_bb (entry_bb);
4924 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4925 be removed. We need to ensure that the thread that entered the single
4926 does not exit before the data is copied out by the other threads. */
4927 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4928 OMP_CLAUSE_COPYPRIVATE))
4929 need_barrier = true;
4930 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4931 gsi_remove (&si, true);
4932 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4934 si = gsi_last_bb (exit_bb);
4935 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4936 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4937 false, GSI_SAME_STMT);
4938 gsi_remove (&si, true);
4939 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4943 /* Generic expansion for OpenMP synchronization directives: master,
4944 ordered and critical. All we need to do here is remove the entry
4945 and exit markers for REGION. */
4947 static void
4948 expand_omp_synch (struct omp_region *region)
4950 basic_block entry_bb, exit_bb;
4951 gimple_stmt_iterator si;
4953 entry_bb = region->entry;
4954 exit_bb = region->exit;
4956 si = gsi_last_bb (entry_bb);
4957 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4958 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4959 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4960 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4961 gsi_remove (&si, true);
4962 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4964 if (exit_bb)
4966 si = gsi_last_bb (exit_bb);
4967 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4968 gsi_remove (&si, true);
4969 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4973 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4974 operation as a normal volatile load. */
4976 static bool
4977 expand_omp_atomic_load (basic_block load_bb, tree addr,
4978 tree loaded_val, int index)
4980 enum built_in_function tmpbase;
4981 gimple_stmt_iterator gsi;
4982 basic_block store_bb;
4983 location_t loc;
4984 gimple stmt;
4985 tree decl, call, type, itype;
4987 gsi = gsi_last_bb (load_bb);
4988 stmt = gsi_stmt (gsi);
4989 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4990 loc = gimple_location (stmt);
4992 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4993 is smaller than word size, then expand_atomic_load assumes that the load
4994 is atomic. We could avoid the builtin entirely in this case. */
4996 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
4997 decl = builtin_decl_explicit (tmpbase);
4998 if (decl == NULL_TREE)
4999 return false;
5001 type = TREE_TYPE (loaded_val);
5002 itype = TREE_TYPE (TREE_TYPE (decl));
5004 call = build_call_expr_loc (loc, decl, 2, addr,
5005 build_int_cst (NULL, MEMMODEL_RELAXED));
5006 if (!useless_type_conversion_p (type, itype))
5007 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5008 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5010 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5011 gsi_remove (&gsi, true);
5013 store_bb = single_succ (load_bb);
5014 gsi = gsi_last_bb (store_bb);
5015 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5016 gsi_remove (&gsi, true);
5018 if (gimple_in_ssa_p (cfun))
5019 update_ssa (TODO_update_ssa_no_phi);
5021 return true;
5024 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5025 operation as a normal volatile store. */
5027 static bool
5028 expand_omp_atomic_store (basic_block load_bb, tree addr,
5029 tree loaded_val, tree stored_val, int index)
5031 enum built_in_function tmpbase;
5032 gimple_stmt_iterator gsi;
5033 basic_block store_bb = single_succ (load_bb);
5034 location_t loc;
5035 gimple stmt;
5036 tree decl, call, type, itype;
5037 enum machine_mode imode;
5038 bool exchange;
5040 gsi = gsi_last_bb (load_bb);
5041 stmt = gsi_stmt (gsi);
5042 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5044 /* If the load value is needed, then this isn't a store but an exchange. */
5045 exchange = gimple_omp_atomic_need_value_p (stmt);
5047 gsi = gsi_last_bb (store_bb);
5048 stmt = gsi_stmt (gsi);
5049 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5050 loc = gimple_location (stmt);
5052 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5053 is smaller than word size, then expand_atomic_store assumes that the store
5054 is atomic. We could avoid the builtin entirely in this case. */
5056 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5057 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5058 decl = builtin_decl_explicit (tmpbase);
5059 if (decl == NULL_TREE)
5060 return false;
5062 type = TREE_TYPE (stored_val);
5064 /* Dig out the type of the function's second argument. */
5065 itype = TREE_TYPE (decl);
5066 itype = TYPE_ARG_TYPES (itype);
5067 itype = TREE_CHAIN (itype);
5068 itype = TREE_VALUE (itype);
5069 imode = TYPE_MODE (itype);
5071 if (exchange && !can_atomic_exchange_p (imode, true))
5072 return false;
5074 if (!useless_type_conversion_p (itype, type))
5075 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5076 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5077 build_int_cst (NULL, MEMMODEL_RELAXED));
5078 if (exchange)
5080 if (!useless_type_conversion_p (type, itype))
5081 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5082 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5085 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5086 gsi_remove (&gsi, true);
5088 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5089 gsi = gsi_last_bb (load_bb);
5090 gsi_remove (&gsi, true);
5092 if (gimple_in_ssa_p (cfun))
5093 update_ssa (TODO_update_ssa_no_phi);
5095 return true;
5098 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5099 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5100 size of the data type, and thus usable to find the index of the builtin
5101 decl. Returns false if the expression is not of the proper form. */
5103 static bool
5104 expand_omp_atomic_fetch_op (basic_block load_bb,
5105 tree addr, tree loaded_val,
5106 tree stored_val, int index)
5108 enum built_in_function oldbase, newbase, tmpbase;
5109 tree decl, itype, call;
5110 tree lhs, rhs;
5111 basic_block store_bb = single_succ (load_bb);
5112 gimple_stmt_iterator gsi;
5113 gimple stmt;
5114 location_t loc;
5115 enum tree_code code;
5116 bool need_old, need_new;
5117 enum machine_mode imode;
5119 /* We expect to find the following sequences:
5121 load_bb:
5122 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5124 store_bb:
5125 val = tmp OP something; (or: something OP tmp)
5126 GIMPLE_OMP_STORE (val)
5128 ???FIXME: Allow a more flexible sequence.
5129 Perhaps use data flow to pick the statements.
5133 gsi = gsi_after_labels (store_bb);
5134 stmt = gsi_stmt (gsi);
5135 loc = gimple_location (stmt);
5136 if (!is_gimple_assign (stmt))
5137 return false;
5138 gsi_next (&gsi);
5139 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5140 return false;
5141 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5142 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5143 gcc_checking_assert (!need_old || !need_new);
5145 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5146 return false;
5148 /* Check for one of the supported fetch-op operations. */
5149 code = gimple_assign_rhs_code (stmt);
5150 switch (code)
5152 case PLUS_EXPR:
5153 case POINTER_PLUS_EXPR:
5154 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5155 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5156 break;
5157 case MINUS_EXPR:
5158 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5159 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5160 break;
5161 case BIT_AND_EXPR:
5162 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5163 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5164 break;
5165 case BIT_IOR_EXPR:
5166 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5167 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5168 break;
5169 case BIT_XOR_EXPR:
5170 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5171 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5172 break;
5173 default:
5174 return false;
5177 /* Make sure the expression is of the proper form. */
5178 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5179 rhs = gimple_assign_rhs2 (stmt);
5180 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5181 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5182 rhs = gimple_assign_rhs1 (stmt);
5183 else
5184 return false;
5186 tmpbase = ((enum built_in_function)
5187 ((need_new ? newbase : oldbase) + index + 1));
5188 decl = builtin_decl_explicit (tmpbase);
5189 if (decl == NULL_TREE)
5190 return false;
5191 itype = TREE_TYPE (TREE_TYPE (decl));
5192 imode = TYPE_MODE (itype);
5194 /* We could test all of the various optabs involved, but the fact of the
5195 matter is that (with the exception of i486 vs i586 and xadd) all targets
5196 that support any atomic operaton optab also implements compare-and-swap.
5197 Let optabs.c take care of expanding any compare-and-swap loop. */
5198 if (!can_compare_and_swap_p (imode, true))
5199 return false;
5201 gsi = gsi_last_bb (load_bb);
5202 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5204 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5205 It only requires that the operation happen atomically. Thus we can
5206 use the RELAXED memory model. */
5207 call = build_call_expr_loc (loc, decl, 3, addr,
5208 fold_convert_loc (loc, itype, rhs),
5209 build_int_cst (NULL, MEMMODEL_RELAXED));
5211 if (need_old || need_new)
5213 lhs = need_old ? loaded_val : stored_val;
5214 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5215 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5217 else
5218 call = fold_convert_loc (loc, void_type_node, call);
5219 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5220 gsi_remove (&gsi, true);
5222 gsi = gsi_last_bb (store_bb);
5223 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5224 gsi_remove (&gsi, true);
5225 gsi = gsi_last_bb (store_bb);
5226 gsi_remove (&gsi, true);
5228 if (gimple_in_ssa_p (cfun))
5229 update_ssa (TODO_update_ssa_no_phi);
5231 return true;
5234 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5236 oldval = *addr;
5237 repeat:
5238 newval = rhs; // with oldval replacing *addr in rhs
5239 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5240 if (oldval != newval)
5241 goto repeat;
5243 INDEX is log2 of the size of the data type, and thus usable to find the
5244 index of the builtin decl. */
5246 static bool
5247 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5248 tree addr, tree loaded_val, tree stored_val,
5249 int index)
5251 tree loadedi, storedi, initial, new_storedi, old_vali;
5252 tree type, itype, cmpxchg, iaddr;
5253 gimple_stmt_iterator si;
5254 basic_block loop_header = single_succ (load_bb);
5255 gimple phi, stmt;
5256 edge e;
5257 enum built_in_function fncode;
5259 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5260 order to use the RELAXED memory model effectively. */
5261 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5262 + index + 1);
5263 cmpxchg = builtin_decl_explicit (fncode);
5264 if (cmpxchg == NULL_TREE)
5265 return false;
5266 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5267 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5269 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5270 return false;
5272 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5273 si = gsi_last_bb (load_bb);
5274 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5276 /* For floating-point values, we'll need to view-convert them to integers
5277 so that we can perform the atomic compare and swap. Simplify the
5278 following code by always setting up the "i"ntegral variables. */
5279 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5281 tree iaddr_val;
5283 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5284 true), NULL);
5285 iaddr_val
5286 = force_gimple_operand_gsi (&si,
5287 fold_convert (TREE_TYPE (iaddr), addr),
5288 false, NULL_TREE, true, GSI_SAME_STMT);
5289 stmt = gimple_build_assign (iaddr, iaddr_val);
5290 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5291 loadedi = create_tmp_var (itype, NULL);
5292 if (gimple_in_ssa_p (cfun))
5293 loadedi = make_ssa_name (loadedi, NULL);
5295 else
5297 iaddr = addr;
5298 loadedi = loaded_val;
5301 initial
5302 = force_gimple_operand_gsi (&si,
5303 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5304 iaddr,
5305 build_int_cst (TREE_TYPE (iaddr), 0)),
5306 true, NULL_TREE, true, GSI_SAME_STMT);
5308 /* Move the value to the LOADEDI temporary. */
5309 if (gimple_in_ssa_p (cfun))
5311 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5312 phi = create_phi_node (loadedi, loop_header);
5313 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5314 initial);
5316 else
5317 gsi_insert_before (&si,
5318 gimple_build_assign (loadedi, initial),
5319 GSI_SAME_STMT);
5320 if (loadedi != loaded_val)
5322 gimple_stmt_iterator gsi2;
5323 tree x;
5325 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5326 gsi2 = gsi_start_bb (loop_header);
5327 if (gimple_in_ssa_p (cfun))
5329 gimple stmt;
5330 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5331 true, GSI_SAME_STMT);
5332 stmt = gimple_build_assign (loaded_val, x);
5333 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5335 else
5337 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5338 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5339 true, GSI_SAME_STMT);
5342 gsi_remove (&si, true);
5344 si = gsi_last_bb (store_bb);
5345 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5347 if (iaddr == addr)
5348 storedi = stored_val;
5349 else
5350 storedi =
5351 force_gimple_operand_gsi (&si,
5352 build1 (VIEW_CONVERT_EXPR, itype,
5353 stored_val), true, NULL_TREE, true,
5354 GSI_SAME_STMT);
5356 /* Build the compare&swap statement. */
5357 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5358 new_storedi = force_gimple_operand_gsi (&si,
5359 fold_convert (TREE_TYPE (loadedi),
5360 new_storedi),
5361 true, NULL_TREE,
5362 true, GSI_SAME_STMT);
5364 if (gimple_in_ssa_p (cfun))
5365 old_vali = loadedi;
5366 else
5368 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5369 stmt = gimple_build_assign (old_vali, loadedi);
5370 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5372 stmt = gimple_build_assign (loadedi, new_storedi);
5373 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5376 /* Note that we always perform the comparison as an integer, even for
5377 floating point. This allows the atomic operation to properly
5378 succeed even with NaNs and -0.0. */
5379 stmt = gimple_build_cond_empty
5380 (build2 (NE_EXPR, boolean_type_node,
5381 new_storedi, old_vali));
5382 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5384 /* Update cfg. */
5385 e = single_succ_edge (store_bb);
5386 e->flags &= ~EDGE_FALLTHRU;
5387 e->flags |= EDGE_FALSE_VALUE;
5389 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5391 /* Copy the new value to loadedi (we already did that before the condition
5392 if we are not in SSA). */
5393 if (gimple_in_ssa_p (cfun))
5395 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5396 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5399 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5400 gsi_remove (&si, true);
5402 if (gimple_in_ssa_p (cfun))
5403 update_ssa (TODO_update_ssa_no_phi);
5405 return true;
5408 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5410 GOMP_atomic_start ();
5411 *addr = rhs;
5412 GOMP_atomic_end ();
5414 The result is not globally atomic, but works so long as all parallel
5415 references are within #pragma omp atomic directives. According to
5416 responses received from omp@openmp.org, appears to be within spec.
5417 Which makes sense, since that's how several other compilers handle
5418 this situation as well.
5419 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5420 expanding. STORED_VAL is the operand of the matching
5421 GIMPLE_OMP_ATOMIC_STORE.
5423 We replace
5424 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5425 loaded_val = *addr;
5427 and replace
5428 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5429 *addr = stored_val;
5432 static bool
5433 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5434 tree addr, tree loaded_val, tree stored_val)
5436 gimple_stmt_iterator si;
5437 gimple stmt;
5438 tree t;
5440 si = gsi_last_bb (load_bb);
5441 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5443 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5444 t = build_call_expr (t, 0);
5445 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5447 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5448 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5449 gsi_remove (&si, true);
5451 si = gsi_last_bb (store_bb);
5452 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5454 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5455 stored_val);
5456 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5458 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5459 t = build_call_expr (t, 0);
5460 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5461 gsi_remove (&si, true);
5463 if (gimple_in_ssa_p (cfun))
5464 update_ssa (TODO_update_ssa_no_phi);
5465 return true;
5468 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5469 using expand_omp_atomic_fetch_op. If it failed, we try to
5470 call expand_omp_atomic_pipeline, and if it fails too, the
5471 ultimate fallback is wrapping the operation in a mutex
5472 (expand_omp_atomic_mutex). REGION is the atomic region built
5473 by build_omp_regions_1(). */
5475 static void
5476 expand_omp_atomic (struct omp_region *region)
5478 basic_block load_bb = region->entry, store_bb = region->exit;
5479 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5480 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5481 tree addr = gimple_omp_atomic_load_rhs (load);
5482 tree stored_val = gimple_omp_atomic_store_val (store);
5483 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5484 HOST_WIDE_INT index;
5486 /* Make sure the type is one of the supported sizes. */
5487 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5488 index = exact_log2 (index);
5489 if (index >= 0 && index <= 4)
5491 unsigned int align = TYPE_ALIGN_UNIT (type);
5493 /* __sync builtins require strict data alignment. */
5494 if (exact_log2 (align) >= index)
5496 /* Atomic load. */
5497 if (loaded_val == stored_val
5498 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5499 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5500 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5501 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5502 return;
5504 /* Atomic store. */
5505 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5506 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5507 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5508 && store_bb == single_succ (load_bb)
5509 && first_stmt (store_bb) == store
5510 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5511 stored_val, index))
5512 return;
5514 /* When possible, use specialized atomic update functions. */
5515 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5516 && store_bb == single_succ (load_bb)
5517 && expand_omp_atomic_fetch_op (load_bb, addr,
5518 loaded_val, stored_val, index))
5519 return;
5521 /* If we don't have specialized __sync builtins, try and implement
5522 as a compare and swap loop. */
5523 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5524 loaded_val, stored_val, index))
5525 return;
5529 /* The ultimate fallback is wrapping the operation in a mutex. */
5530 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5534 /* Expand the parallel region tree rooted at REGION. Expansion
5535 proceeds in depth-first order. Innermost regions are expanded
5536 first. This way, parallel regions that require a new function to
5537 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5538 internal dependencies in their body. */
5540 static void
5541 expand_omp (struct omp_region *region)
5543 while (region)
5545 location_t saved_location;
5547 /* First, determine whether this is a combined parallel+workshare
5548 region. */
5549 if (region->type == GIMPLE_OMP_PARALLEL)
5550 determine_parallel_type (region);
5552 if (region->inner)
5553 expand_omp (region->inner);
5555 saved_location = input_location;
5556 if (gimple_has_location (last_stmt (region->entry)))
5557 input_location = gimple_location (last_stmt (region->entry));
5559 switch (region->type)
5561 case GIMPLE_OMP_PARALLEL:
5562 case GIMPLE_OMP_TASK:
5563 expand_omp_taskreg (region);
5564 break;
5566 case GIMPLE_OMP_FOR:
5567 expand_omp_for (region);
5568 break;
5570 case GIMPLE_OMP_SECTIONS:
5571 expand_omp_sections (region);
5572 break;
5574 case GIMPLE_OMP_SECTION:
5575 /* Individual omp sections are handled together with their
5576 parent GIMPLE_OMP_SECTIONS region. */
5577 break;
5579 case GIMPLE_OMP_SINGLE:
5580 expand_omp_single (region);
5581 break;
5583 case GIMPLE_OMP_MASTER:
5584 case GIMPLE_OMP_ORDERED:
5585 case GIMPLE_OMP_CRITICAL:
5586 expand_omp_synch (region);
5587 break;
5589 case GIMPLE_OMP_ATOMIC_LOAD:
5590 expand_omp_atomic (region);
5591 break;
5593 default:
5594 gcc_unreachable ();
5597 input_location = saved_location;
5598 region = region->next;
5603 /* Helper for build_omp_regions. Scan the dominator tree starting at
5604 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5605 true, the function ends once a single tree is built (otherwise, whole
5606 forest of OMP constructs may be built). */
5608 static void
5609 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5610 bool single_tree)
5612 gimple_stmt_iterator gsi;
5613 gimple stmt;
5614 basic_block son;
5616 gsi = gsi_last_bb (bb);
5617 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5619 struct omp_region *region;
5620 enum gimple_code code;
5622 stmt = gsi_stmt (gsi);
5623 code = gimple_code (stmt);
5624 if (code == GIMPLE_OMP_RETURN)
5626 /* STMT is the return point out of region PARENT. Mark it
5627 as the exit point and make PARENT the immediately
5628 enclosing region. */
5629 gcc_assert (parent);
5630 region = parent;
5631 region->exit = bb;
5632 parent = parent->outer;
5634 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5636 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5637 GIMPLE_OMP_RETURN, but matches with
5638 GIMPLE_OMP_ATOMIC_LOAD. */
5639 gcc_assert (parent);
5640 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5641 region = parent;
5642 region->exit = bb;
5643 parent = parent->outer;
5646 else if (code == GIMPLE_OMP_CONTINUE)
5648 gcc_assert (parent);
5649 parent->cont = bb;
5651 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5653 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5654 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5657 else
5659 /* Otherwise, this directive becomes the parent for a new
5660 region. */
5661 region = new_omp_region (bb, code, parent);
5662 parent = region;
5666 if (single_tree && !parent)
5667 return;
5669 for (son = first_dom_son (CDI_DOMINATORS, bb);
5670 son;
5671 son = next_dom_son (CDI_DOMINATORS, son))
5672 build_omp_regions_1 (son, parent, single_tree);
5675 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5676 root_omp_region. */
5678 static void
5679 build_omp_regions_root (basic_block root)
5681 gcc_assert (root_omp_region == NULL);
5682 build_omp_regions_1 (root, NULL, true);
5683 gcc_assert (root_omp_region != NULL);
5686 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5688 void
5689 omp_expand_local (basic_block head)
5691 build_omp_regions_root (head);
5692 if (dump_file && (dump_flags & TDF_DETAILS))
5694 fprintf (dump_file, "\nOMP region tree\n\n");
5695 dump_omp_region (dump_file, root_omp_region, 0);
5696 fprintf (dump_file, "\n");
5699 remove_exit_barriers (root_omp_region);
5700 expand_omp (root_omp_region);
5702 free_omp_regions ();
5705 /* Scan the CFG and build a tree of OMP regions. Return the root of
5706 the OMP region tree. */
5708 static void
5709 build_omp_regions (void)
5711 gcc_assert (root_omp_region == NULL);
5712 calculate_dominance_info (CDI_DOMINATORS);
5713 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5716 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5718 static unsigned int
5719 execute_expand_omp (void)
5721 build_omp_regions ();
5723 if (!root_omp_region)
5724 return 0;
5726 if (dump_file)
5728 fprintf (dump_file, "\nOMP region tree\n\n");
5729 dump_omp_region (dump_file, root_omp_region, 0);
5730 fprintf (dump_file, "\n");
5733 remove_exit_barriers (root_omp_region);
5735 expand_omp (root_omp_region);
5737 cleanup_tree_cfg ();
5739 free_omp_regions ();
5741 return 0;
5744 /* OMP expansion -- the default pass, run before creation of SSA form. */
5746 static bool
5747 gate_expand_omp (void)
5749 return (flag_openmp != 0 && !seen_error ());
5752 struct gimple_opt_pass pass_expand_omp =
5755 GIMPLE_PASS,
5756 "ompexp", /* name */
5757 OPTGROUP_NONE, /* optinfo_flags */
5758 gate_expand_omp, /* gate */
5759 execute_expand_omp, /* execute */
5760 NULL, /* sub */
5761 NULL, /* next */
5762 0, /* static_pass_number */
5763 TV_NONE, /* tv_id */
5764 PROP_gimple_any, /* properties_required */
5765 0, /* properties_provided */
5766 0, /* properties_destroyed */
5767 0, /* todo_flags_start */
5768 0 /* todo_flags_finish */
5772 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5774 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5775 CTX is the enclosing OMP context for the current statement. */
5777 static void
5778 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5780 tree block, control;
5781 gimple_stmt_iterator tgsi;
5782 gimple stmt, new_stmt, bind, t;
5783 gimple_seq ilist, dlist, olist, new_body;
5784 struct gimplify_ctx gctx;
5786 stmt = gsi_stmt (*gsi_p);
5788 push_gimplify_context (&gctx);
5790 dlist = NULL;
5791 ilist = NULL;
5792 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5793 &ilist, &dlist, ctx);
5795 new_body = gimple_omp_body (stmt);
5796 gimple_omp_set_body (stmt, NULL);
5797 tgsi = gsi_start (new_body);
5798 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
5800 omp_context *sctx;
5801 gimple sec_start;
5803 sec_start = gsi_stmt (tgsi);
5804 sctx = maybe_lookup_ctx (sec_start);
5805 gcc_assert (sctx);
5807 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5808 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5809 GSI_CONTINUE_LINKING);
5810 gimple_omp_set_body (sec_start, NULL);
5812 if (gsi_one_before_end_p (tgsi))
5814 gimple_seq l = NULL;
5815 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5816 &l, ctx);
5817 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
5818 gimple_omp_section_set_last (sec_start);
5821 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5822 GSI_CONTINUE_LINKING);
5825 block = make_node (BLOCK);
5826 bind = gimple_build_bind (NULL, new_body, block);
5828 olist = NULL;
5829 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5831 block = make_node (BLOCK);
5832 new_stmt = gimple_build_bind (NULL, NULL, block);
5833 gsi_replace (gsi_p, new_stmt, true);
5835 pop_gimplify_context (new_stmt);
5836 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5837 BLOCK_VARS (block) = gimple_bind_vars (bind);
5838 if (BLOCK_VARS (block))
5839 TREE_USED (block) = 1;
5841 new_body = NULL;
5842 gimple_seq_add_seq (&new_body, ilist);
5843 gimple_seq_add_stmt (&new_body, stmt);
5844 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5845 gimple_seq_add_stmt (&new_body, bind);
5847 control = create_tmp_var (unsigned_type_node, ".section");
5848 t = gimple_build_omp_continue (control, control);
5849 gimple_omp_sections_set_control (stmt, control);
5850 gimple_seq_add_stmt (&new_body, t);
5852 gimple_seq_add_seq (&new_body, olist);
5853 gimple_seq_add_seq (&new_body, dlist);
5855 new_body = maybe_catch_exception (new_body);
5857 t = gimple_build_omp_return
5858 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5859 OMP_CLAUSE_NOWAIT));
5860 gimple_seq_add_stmt (&new_body, t);
5862 gimple_bind_set_body (new_stmt, new_body);
5866 /* A subroutine of lower_omp_single. Expand the simple form of
5867 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5869 if (GOMP_single_start ())
5870 BODY;
5871 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5873 FIXME. It may be better to delay expanding the logic of this until
5874 pass_expand_omp. The expanded logic may make the job more difficult
5875 to a synchronization analysis pass. */
5877 static void
5878 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5880 location_t loc = gimple_location (single_stmt);
5881 tree tlabel = create_artificial_label (loc);
5882 tree flabel = create_artificial_label (loc);
5883 gimple call, cond;
5884 tree lhs, decl;
5886 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5887 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5888 call = gimple_build_call (decl, 0);
5889 gimple_call_set_lhs (call, lhs);
5890 gimple_seq_add_stmt (pre_p, call);
5892 cond = gimple_build_cond (EQ_EXPR, lhs,
5893 fold_convert_loc (loc, TREE_TYPE (lhs),
5894 boolean_true_node),
5895 tlabel, flabel);
5896 gimple_seq_add_stmt (pre_p, cond);
5897 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5898 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5899 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5903 /* A subroutine of lower_omp_single. Expand the simple form of
5904 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5906 #pragma omp single copyprivate (a, b, c)
5908 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5911 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5913 BODY;
5914 copyout.a = a;
5915 copyout.b = b;
5916 copyout.c = c;
5917 GOMP_single_copy_end (&copyout);
5919 else
5921 a = copyout_p->a;
5922 b = copyout_p->b;
5923 c = copyout_p->c;
5925 GOMP_barrier ();
5928 FIXME. It may be better to delay expanding the logic of this until
5929 pass_expand_omp. The expanded logic may make the job more difficult
5930 to a synchronization analysis pass. */
5932 static void
5933 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5935 tree ptr_type, t, l0, l1, l2, bfn_decl;
5936 gimple_seq copyin_seq;
5937 location_t loc = gimple_location (single_stmt);
5939 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5941 ptr_type = build_pointer_type (ctx->record_type);
5942 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5944 l0 = create_artificial_label (loc);
5945 l1 = create_artificial_label (loc);
5946 l2 = create_artificial_label (loc);
5948 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5949 t = build_call_expr_loc (loc, bfn_decl, 0);
5950 t = fold_convert_loc (loc, ptr_type, t);
5951 gimplify_assign (ctx->receiver_decl, t, pre_p);
5953 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5954 build_int_cst (ptr_type, 0));
5955 t = build3 (COND_EXPR, void_type_node, t,
5956 build_and_jump (&l0), build_and_jump (&l1));
5957 gimplify_and_add (t, pre_p);
5959 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5961 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5963 copyin_seq = NULL;
5964 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5965 &copyin_seq, ctx);
5967 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5968 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5969 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5970 gimplify_and_add (t, pre_p);
5972 t = build_and_jump (&l2);
5973 gimplify_and_add (t, pre_p);
5975 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5977 gimple_seq_add_seq (pre_p, copyin_seq);
5979 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5983 /* Expand code for an OpenMP single directive. */
5985 static void
5986 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5988 tree block;
5989 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5990 gimple_seq bind_body, dlist;
5991 struct gimplify_ctx gctx;
5993 push_gimplify_context (&gctx);
5995 block = make_node (BLOCK);
5996 bind = gimple_build_bind (NULL, NULL, block);
5997 gsi_replace (gsi_p, bind, true);
5998 bind_body = NULL;
5999 dlist = NULL;
6000 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6001 &bind_body, &dlist, ctx);
6002 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6004 gimple_seq_add_stmt (&bind_body, single_stmt);
6006 if (ctx->record_type)
6007 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6008 else
6009 lower_omp_single_simple (single_stmt, &bind_body);
6011 gimple_omp_set_body (single_stmt, NULL);
6013 gimple_seq_add_seq (&bind_body, dlist);
6015 bind_body = maybe_catch_exception (bind_body);
6017 t = gimple_build_omp_return
6018 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6019 OMP_CLAUSE_NOWAIT));
6020 gimple_seq_add_stmt (&bind_body, t);
6021 gimple_bind_set_body (bind, bind_body);
6023 pop_gimplify_context (bind);
6025 gimple_bind_append_vars (bind, ctx->block_vars);
6026 BLOCK_VARS (block) = ctx->block_vars;
6027 if (BLOCK_VARS (block))
6028 TREE_USED (block) = 1;
6032 /* Expand code for an OpenMP master directive. */
6034 static void
6035 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6037 tree block, lab = NULL, x, bfn_decl;
6038 gimple stmt = gsi_stmt (*gsi_p), bind;
6039 location_t loc = gimple_location (stmt);
6040 gimple_seq tseq;
6041 struct gimplify_ctx gctx;
6043 push_gimplify_context (&gctx);
6045 block = make_node (BLOCK);
6046 bind = gimple_build_bind (NULL, NULL, block);
6047 gsi_replace (gsi_p, bind, true);
6048 gimple_bind_add_stmt (bind, stmt);
6050 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6051 x = build_call_expr_loc (loc, bfn_decl, 0);
6052 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6053 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6054 tseq = NULL;
6055 gimplify_and_add (x, &tseq);
6056 gimple_bind_add_seq (bind, tseq);
6058 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6059 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6060 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6061 gimple_omp_set_body (stmt, NULL);
6063 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6065 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6067 pop_gimplify_context (bind);
6069 gimple_bind_append_vars (bind, ctx->block_vars);
6070 BLOCK_VARS (block) = ctx->block_vars;
6074 /* Expand code for an OpenMP ordered directive. */
6076 static void
6077 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6079 tree block;
6080 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6081 struct gimplify_ctx gctx;
6083 push_gimplify_context (&gctx);
6085 block = make_node (BLOCK);
6086 bind = gimple_build_bind (NULL, NULL, block);
6087 gsi_replace (gsi_p, bind, true);
6088 gimple_bind_add_stmt (bind, stmt);
6090 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6092 gimple_bind_add_stmt (bind, x);
6094 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6095 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6096 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6097 gimple_omp_set_body (stmt, NULL);
6099 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6100 gimple_bind_add_stmt (bind, x);
6102 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6104 pop_gimplify_context (bind);
6106 gimple_bind_append_vars (bind, ctx->block_vars);
6107 BLOCK_VARS (block) = gimple_bind_vars (bind);
6111 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6112 substitution of a couple of function calls. But in the NAMED case,
6113 requires that languages coordinate a symbol name. It is therefore
6114 best put here in common code. */
6116 static GTY((param1_is (tree), param2_is (tree)))
6117 splay_tree critical_name_mutexes;
6119 static void
6120 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6122 tree block;
6123 tree name, lock, unlock;
6124 gimple stmt = gsi_stmt (*gsi_p), bind;
6125 location_t loc = gimple_location (stmt);
6126 gimple_seq tbody;
6127 struct gimplify_ctx gctx;
6129 name = gimple_omp_critical_name (stmt);
6130 if (name)
6132 tree decl;
6133 splay_tree_node n;
6135 if (!critical_name_mutexes)
6136 critical_name_mutexes
6137 = splay_tree_new_ggc (splay_tree_compare_pointers,
6138 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6139 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6141 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6142 if (n == NULL)
6144 char *new_str;
6146 decl = create_tmp_var_raw (ptr_type_node, NULL);
6148 new_str = ACONCAT ((".gomp_critical_user_",
6149 IDENTIFIER_POINTER (name), NULL));
6150 DECL_NAME (decl) = get_identifier (new_str);
6151 TREE_PUBLIC (decl) = 1;
6152 TREE_STATIC (decl) = 1;
6153 DECL_COMMON (decl) = 1;
6154 DECL_ARTIFICIAL (decl) = 1;
6155 DECL_IGNORED_P (decl) = 1;
6156 varpool_finalize_decl (decl);
6158 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6159 (splay_tree_value) decl);
6161 else
6162 decl = (tree) n->value;
6164 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6165 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6167 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6168 unlock = build_call_expr_loc (loc, unlock, 1,
6169 build_fold_addr_expr_loc (loc, decl));
6171 else
6173 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6174 lock = build_call_expr_loc (loc, lock, 0);
6176 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6177 unlock = build_call_expr_loc (loc, unlock, 0);
6180 push_gimplify_context (&gctx);
6182 block = make_node (BLOCK);
6183 bind = gimple_build_bind (NULL, NULL, block);
6184 gsi_replace (gsi_p, bind, true);
6185 gimple_bind_add_stmt (bind, stmt);
6187 tbody = gimple_bind_body (bind);
6188 gimplify_and_add (lock, &tbody);
6189 gimple_bind_set_body (bind, tbody);
6191 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6192 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6193 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6194 gimple_omp_set_body (stmt, NULL);
6196 tbody = gimple_bind_body (bind);
6197 gimplify_and_add (unlock, &tbody);
6198 gimple_bind_set_body (bind, tbody);
6200 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6202 pop_gimplify_context (bind);
6203 gimple_bind_append_vars (bind, ctx->block_vars);
6204 BLOCK_VARS (block) = gimple_bind_vars (bind);
6208 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6209 for a lastprivate clause. Given a loop control predicate of (V
6210 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6211 is appended to *DLIST, iterator initialization is appended to
6212 *BODY_P. */
6214 static void
6215 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6216 gimple_seq *dlist, struct omp_context *ctx)
6218 tree clauses, cond, vinit;
6219 enum tree_code cond_code;
6220 gimple_seq stmts;
6222 cond_code = fd->loop.cond_code;
6223 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6225 /* When possible, use a strict equality expression. This can let VRP
6226 type optimizations deduce the value and remove a copy. */
6227 if (host_integerp (fd->loop.step, 0))
6229 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6230 if (step == 1 || step == -1)
6231 cond_code = EQ_EXPR;
6234 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6236 clauses = gimple_omp_for_clauses (fd->for_stmt);
6237 stmts = NULL;
6238 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6239 if (!gimple_seq_empty_p (stmts))
6241 gimple_seq_add_seq (&stmts, *dlist);
6242 *dlist = stmts;
6244 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6245 vinit = fd->loop.n1;
6246 if (cond_code == EQ_EXPR
6247 && host_integerp (fd->loop.n2, 0)
6248 && ! integer_zerop (fd->loop.n2))
6249 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6251 /* Initialize the iterator variable, so that threads that don't execute
6252 any iterations don't execute the lastprivate clauses by accident. */
6253 gimplify_assign (fd->loop.v, vinit, body_p);
6258 /* Lower code for an OpenMP loop directive. */
6260 static void
6261 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6263 tree *rhs_p, block;
6264 struct omp_for_data fd;
6265 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6266 gimple_seq omp_for_body, body, dlist;
6267 size_t i;
6268 struct gimplify_ctx gctx;
6270 push_gimplify_context (&gctx);
6272 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6273 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6275 block = make_node (BLOCK);
6276 new_stmt = gimple_build_bind (NULL, NULL, block);
6277 /* Replace at gsi right away, so that 'stmt' is no member
6278 of a sequence anymore as we're going to add to to a different
6279 one below. */
6280 gsi_replace (gsi_p, new_stmt, true);
6282 /* Move declaration of temporaries in the loop body before we make
6283 it go away. */
6284 omp_for_body = gimple_omp_body (stmt);
6285 if (!gimple_seq_empty_p (omp_for_body)
6286 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6288 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6289 gimple_bind_append_vars (new_stmt, vars);
6292 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6293 dlist = NULL;
6294 body = NULL;
6295 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6296 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6298 /* Lower the header expressions. At this point, we can assume that
6299 the header is of the form:
6301 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6303 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6304 using the .omp_data_s mapping, if needed. */
6305 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6307 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6308 if (!is_gimple_min_invariant (*rhs_p))
6309 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6311 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6312 if (!is_gimple_min_invariant (*rhs_p))
6313 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6315 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6316 if (!is_gimple_min_invariant (*rhs_p))
6317 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6320 /* Once lowered, extract the bounds and clauses. */
6321 extract_omp_for_data (stmt, &fd, NULL);
6323 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6325 gimple_seq_add_stmt (&body, stmt);
6326 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6328 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6329 fd.loop.v));
6331 /* After the loop, add exit clauses. */
6332 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6333 gimple_seq_add_seq (&body, dlist);
6335 body = maybe_catch_exception (body);
6337 /* Region exit marker goes at the end of the loop body. */
6338 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6340 pop_gimplify_context (new_stmt);
6342 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6343 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6344 if (BLOCK_VARS (block))
6345 TREE_USED (block) = 1;
6347 gimple_bind_set_body (new_stmt, body);
6348 gimple_omp_set_body (stmt, NULL);
6349 gimple_omp_for_set_pre_body (stmt, NULL);
6352 /* Callback for walk_stmts. Check if the current statement only contains
6353 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6355 static tree
6356 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6357 bool *handled_ops_p,
6358 struct walk_stmt_info *wi)
6360 int *info = (int *) wi->info;
6361 gimple stmt = gsi_stmt (*gsi_p);
6363 *handled_ops_p = true;
6364 switch (gimple_code (stmt))
6366 WALK_SUBSTMTS;
6368 case GIMPLE_OMP_FOR:
6369 case GIMPLE_OMP_SECTIONS:
6370 *info = *info == 0 ? 1 : -1;
6371 break;
6372 default:
6373 *info = -1;
6374 break;
6376 return NULL;
6379 struct omp_taskcopy_context
6381 /* This field must be at the beginning, as we do "inheritance": Some
6382 callback functions for tree-inline.c (e.g., omp_copy_decl)
6383 receive a copy_body_data pointer that is up-casted to an
6384 omp_context pointer. */
6385 copy_body_data cb;
6386 omp_context *ctx;
6389 static tree
6390 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6392 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6394 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6395 return create_tmp_var (TREE_TYPE (var), NULL);
6397 return var;
6400 static tree
6401 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6403 tree name, new_fields = NULL, type, f;
6405 type = lang_hooks.types.make_type (RECORD_TYPE);
6406 name = DECL_NAME (TYPE_NAME (orig_type));
6407 name = build_decl (gimple_location (tcctx->ctx->stmt),
6408 TYPE_DECL, name, type);
6409 TYPE_NAME (type) = name;
6411 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6413 tree new_f = copy_node (f);
6414 DECL_CONTEXT (new_f) = type;
6415 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6416 TREE_CHAIN (new_f) = new_fields;
6417 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6418 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6419 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6420 &tcctx->cb, NULL);
6421 new_fields = new_f;
6422 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6424 TYPE_FIELDS (type) = nreverse (new_fields);
6425 layout_type (type);
6426 return type;
6429 /* Create task copyfn. */
6431 static void
6432 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6434 struct function *child_cfun;
6435 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6436 tree record_type, srecord_type, bind, list;
6437 bool record_needs_remap = false, srecord_needs_remap = false;
6438 splay_tree_node n;
6439 struct omp_taskcopy_context tcctx;
6440 struct gimplify_ctx gctx;
6441 location_t loc = gimple_location (task_stmt);
6443 child_fn = gimple_omp_task_copy_fn (task_stmt);
6444 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6445 gcc_assert (child_cfun->cfg == NULL);
6446 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6448 /* Reset DECL_CONTEXT on function arguments. */
6449 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6450 DECL_CONTEXT (t) = child_fn;
6452 /* Populate the function. */
6453 push_gimplify_context (&gctx);
6454 push_cfun (child_cfun);
6456 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6457 TREE_SIDE_EFFECTS (bind) = 1;
6458 list = NULL;
6459 DECL_SAVED_TREE (child_fn) = bind;
6460 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6462 /* Remap src and dst argument types if needed. */
6463 record_type = ctx->record_type;
6464 srecord_type = ctx->srecord_type;
6465 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6466 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6468 record_needs_remap = true;
6469 break;
6471 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6472 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6474 srecord_needs_remap = true;
6475 break;
6478 if (record_needs_remap || srecord_needs_remap)
6480 memset (&tcctx, '\0', sizeof (tcctx));
6481 tcctx.cb.src_fn = ctx->cb.src_fn;
6482 tcctx.cb.dst_fn = child_fn;
6483 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6484 gcc_checking_assert (tcctx.cb.src_node);
6485 tcctx.cb.dst_node = tcctx.cb.src_node;
6486 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6487 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6488 tcctx.cb.eh_lp_nr = 0;
6489 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6490 tcctx.cb.decl_map = pointer_map_create ();
6491 tcctx.ctx = ctx;
6493 if (record_needs_remap)
6494 record_type = task_copyfn_remap_type (&tcctx, record_type);
6495 if (srecord_needs_remap)
6496 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6498 else
6499 tcctx.cb.decl_map = NULL;
6501 arg = DECL_ARGUMENTS (child_fn);
6502 TREE_TYPE (arg) = build_pointer_type (record_type);
6503 sarg = DECL_CHAIN (arg);
6504 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6506 /* First pass: initialize temporaries used in record_type and srecord_type
6507 sizes and field offsets. */
6508 if (tcctx.cb.decl_map)
6509 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6510 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6512 tree *p;
6514 decl = OMP_CLAUSE_DECL (c);
6515 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6516 if (p == NULL)
6517 continue;
6518 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6519 sf = (tree) n->value;
6520 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6521 src = build_simple_mem_ref_loc (loc, sarg);
6522 src = omp_build_component_ref (src, sf);
6523 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6524 append_to_statement_list (t, &list);
6527 /* Second pass: copy shared var pointers and copy construct non-VLA
6528 firstprivate vars. */
6529 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6530 switch (OMP_CLAUSE_CODE (c))
6532 case OMP_CLAUSE_SHARED:
6533 decl = OMP_CLAUSE_DECL (c);
6534 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6535 if (n == NULL)
6536 break;
6537 f = (tree) n->value;
6538 if (tcctx.cb.decl_map)
6539 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6540 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6541 sf = (tree) n->value;
6542 if (tcctx.cb.decl_map)
6543 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6544 src = build_simple_mem_ref_loc (loc, sarg);
6545 src = omp_build_component_ref (src, sf);
6546 dst = build_simple_mem_ref_loc (loc, arg);
6547 dst = omp_build_component_ref (dst, f);
6548 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6549 append_to_statement_list (t, &list);
6550 break;
6551 case OMP_CLAUSE_FIRSTPRIVATE:
6552 decl = OMP_CLAUSE_DECL (c);
6553 if (is_variable_sized (decl))
6554 break;
6555 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6556 if (n == NULL)
6557 break;
6558 f = (tree) n->value;
6559 if (tcctx.cb.decl_map)
6560 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6561 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6562 if (n != NULL)
6564 sf = (tree) n->value;
6565 if (tcctx.cb.decl_map)
6566 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6567 src = build_simple_mem_ref_loc (loc, sarg);
6568 src = omp_build_component_ref (src, sf);
6569 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6570 src = build_simple_mem_ref_loc (loc, src);
6572 else
6573 src = decl;
6574 dst = build_simple_mem_ref_loc (loc, arg);
6575 dst = omp_build_component_ref (dst, f);
6576 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6577 append_to_statement_list (t, &list);
6578 break;
6579 case OMP_CLAUSE_PRIVATE:
6580 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6581 break;
6582 decl = OMP_CLAUSE_DECL (c);
6583 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6584 f = (tree) n->value;
6585 if (tcctx.cb.decl_map)
6586 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6587 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6588 if (n != NULL)
6590 sf = (tree) n->value;
6591 if (tcctx.cb.decl_map)
6592 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6593 src = build_simple_mem_ref_loc (loc, sarg);
6594 src = omp_build_component_ref (src, sf);
6595 if (use_pointer_for_field (decl, NULL))
6596 src = build_simple_mem_ref_loc (loc, src);
6598 else
6599 src = decl;
6600 dst = build_simple_mem_ref_loc (loc, arg);
6601 dst = omp_build_component_ref (dst, f);
6602 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6603 append_to_statement_list (t, &list);
6604 break;
6605 default:
6606 break;
6609 /* Last pass: handle VLA firstprivates. */
6610 if (tcctx.cb.decl_map)
6611 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6612 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6614 tree ind, ptr, df;
6616 decl = OMP_CLAUSE_DECL (c);
6617 if (!is_variable_sized (decl))
6618 continue;
6619 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6620 if (n == NULL)
6621 continue;
6622 f = (tree) n->value;
6623 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6624 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6625 ind = DECL_VALUE_EXPR (decl);
6626 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6627 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6628 n = splay_tree_lookup (ctx->sfield_map,
6629 (splay_tree_key) TREE_OPERAND (ind, 0));
6630 sf = (tree) n->value;
6631 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6632 src = build_simple_mem_ref_loc (loc, sarg);
6633 src = omp_build_component_ref (src, sf);
6634 src = build_simple_mem_ref_loc (loc, src);
6635 dst = build_simple_mem_ref_loc (loc, arg);
6636 dst = omp_build_component_ref (dst, f);
6637 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6638 append_to_statement_list (t, &list);
6639 n = splay_tree_lookup (ctx->field_map,
6640 (splay_tree_key) TREE_OPERAND (ind, 0));
6641 df = (tree) n->value;
6642 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6643 ptr = build_simple_mem_ref_loc (loc, arg);
6644 ptr = omp_build_component_ref (ptr, df);
6645 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6646 build_fold_addr_expr_loc (loc, dst));
6647 append_to_statement_list (t, &list);
6650 t = build1 (RETURN_EXPR, void_type_node, NULL);
6651 append_to_statement_list (t, &list);
6653 if (tcctx.cb.decl_map)
6654 pointer_map_destroy (tcctx.cb.decl_map);
6655 pop_gimplify_context (NULL);
6656 BIND_EXPR_BODY (bind) = list;
6657 pop_cfun ();
6660 /* Lower the OpenMP parallel or task directive in the current statement
6661 in GSI_P. CTX holds context information for the directive. */
6663 static void
6664 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6666 tree clauses;
6667 tree child_fn, t;
6668 gimple stmt = gsi_stmt (*gsi_p);
6669 gimple par_bind, bind;
6670 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6671 struct gimplify_ctx gctx;
6672 location_t loc = gimple_location (stmt);
6674 clauses = gimple_omp_taskreg_clauses (stmt);
6675 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6676 par_body = gimple_bind_body (par_bind);
6677 child_fn = ctx->cb.dst_fn;
6678 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6679 && !gimple_omp_parallel_combined_p (stmt))
6681 struct walk_stmt_info wi;
6682 int ws_num = 0;
6684 memset (&wi, 0, sizeof (wi));
6685 wi.info = &ws_num;
6686 wi.val_only = true;
6687 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6688 if (ws_num == 1)
6689 gimple_omp_parallel_set_combined_p (stmt, true);
6691 if (ctx->srecord_type)
6692 create_task_copyfn (stmt, ctx);
6694 push_gimplify_context (&gctx);
6696 par_olist = NULL;
6697 par_ilist = NULL;
6698 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6699 lower_omp (&par_body, ctx);
6700 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6701 lower_reduction_clauses (clauses, &par_olist, ctx);
6703 /* Declare all the variables created by mapping and the variables
6704 declared in the scope of the parallel body. */
6705 record_vars_into (ctx->block_vars, child_fn);
6706 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6708 if (ctx->record_type)
6710 ctx->sender_decl
6711 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6712 : ctx->record_type, ".omp_data_o");
6713 DECL_NAMELESS (ctx->sender_decl) = 1;
6714 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6715 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6718 olist = NULL;
6719 ilist = NULL;
6720 lower_send_clauses (clauses, &ilist, &olist, ctx);
6721 lower_send_shared_vars (&ilist, &olist, ctx);
6723 /* Once all the expansions are done, sequence all the different
6724 fragments inside gimple_omp_body. */
6726 new_body = NULL;
6728 if (ctx->record_type)
6730 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6731 /* fixup_child_record_type might have changed receiver_decl's type. */
6732 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6733 gimple_seq_add_stmt (&new_body,
6734 gimple_build_assign (ctx->receiver_decl, t));
6737 gimple_seq_add_seq (&new_body, par_ilist);
6738 gimple_seq_add_seq (&new_body, par_body);
6739 gimple_seq_add_seq (&new_body, par_olist);
6740 new_body = maybe_catch_exception (new_body);
6741 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6742 gimple_omp_set_body (stmt, new_body);
6744 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6745 gsi_replace (gsi_p, bind, true);
6746 gimple_bind_add_seq (bind, ilist);
6747 gimple_bind_add_stmt (bind, stmt);
6748 gimple_bind_add_seq (bind, olist);
6750 pop_gimplify_context (NULL);
6753 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6754 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6755 of OpenMP context, but with task_shared_vars set. */
6757 static tree
6758 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6759 void *data)
6761 tree t = *tp;
6763 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6764 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6765 return t;
6767 if (task_shared_vars
6768 && DECL_P (t)
6769 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6770 return t;
6772 /* If a global variable has been privatized, TREE_CONSTANT on
6773 ADDR_EXPR might be wrong. */
6774 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6775 recompute_tree_invariant_for_addr_expr (t);
6777 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6778 return NULL_TREE;
6781 static void
6782 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6784 gimple stmt = gsi_stmt (*gsi_p);
6785 struct walk_stmt_info wi;
6787 if (gimple_has_location (stmt))
6788 input_location = gimple_location (stmt);
6790 if (task_shared_vars)
6791 memset (&wi, '\0', sizeof (wi));
6793 /* If we have issued syntax errors, avoid doing any heavy lifting.
6794 Just replace the OpenMP directives with a NOP to avoid
6795 confusing RTL expansion. */
6796 if (seen_error () && is_gimple_omp (stmt))
6798 gsi_replace (gsi_p, gimple_build_nop (), true);
6799 return;
6802 switch (gimple_code (stmt))
6804 case GIMPLE_COND:
6805 if ((ctx || task_shared_vars)
6806 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6807 ctx ? NULL : &wi, NULL)
6808 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6809 ctx ? NULL : &wi, NULL)))
6810 gimple_regimplify_operands (stmt, gsi_p);
6811 break;
6812 case GIMPLE_CATCH:
6813 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
6814 break;
6815 case GIMPLE_EH_FILTER:
6816 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
6817 break;
6818 case GIMPLE_TRY:
6819 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6820 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
6821 break;
6822 case GIMPLE_TRANSACTION:
6823 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
6824 break;
6825 case GIMPLE_BIND:
6826 lower_omp (gimple_bind_body_ptr (stmt), ctx);
6827 break;
6828 case GIMPLE_OMP_PARALLEL:
6829 case GIMPLE_OMP_TASK:
6830 ctx = maybe_lookup_ctx (stmt);
6831 lower_omp_taskreg (gsi_p, ctx);
6832 break;
6833 case GIMPLE_OMP_FOR:
6834 ctx = maybe_lookup_ctx (stmt);
6835 gcc_assert (ctx);
6836 lower_omp_for (gsi_p, ctx);
6837 break;
6838 case GIMPLE_OMP_SECTIONS:
6839 ctx = maybe_lookup_ctx (stmt);
6840 gcc_assert (ctx);
6841 lower_omp_sections (gsi_p, ctx);
6842 break;
6843 case GIMPLE_OMP_SINGLE:
6844 ctx = maybe_lookup_ctx (stmt);
6845 gcc_assert (ctx);
6846 lower_omp_single (gsi_p, ctx);
6847 break;
6848 case GIMPLE_OMP_MASTER:
6849 ctx = maybe_lookup_ctx (stmt);
6850 gcc_assert (ctx);
6851 lower_omp_master (gsi_p, ctx);
6852 break;
6853 case GIMPLE_OMP_ORDERED:
6854 ctx = maybe_lookup_ctx (stmt);
6855 gcc_assert (ctx);
6856 lower_omp_ordered (gsi_p, ctx);
6857 break;
6858 case GIMPLE_OMP_CRITICAL:
6859 ctx = maybe_lookup_ctx (stmt);
6860 gcc_assert (ctx);
6861 lower_omp_critical (gsi_p, ctx);
6862 break;
6863 case GIMPLE_OMP_ATOMIC_LOAD:
6864 if ((ctx || task_shared_vars)
6865 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6866 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6867 gimple_regimplify_operands (stmt, gsi_p);
6868 break;
6869 default:
6870 if ((ctx || task_shared_vars)
6871 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6872 ctx ? NULL : &wi))
6873 gimple_regimplify_operands (stmt, gsi_p);
6874 break;
6878 static void
6879 lower_omp (gimple_seq *body, omp_context *ctx)
6881 location_t saved_location = input_location;
6882 gimple_stmt_iterator gsi;
6883 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
6884 lower_omp_1 (&gsi, ctx);
6885 input_location = saved_location;
6888 /* Main entry point. */
6890 static unsigned int
6891 execute_lower_omp (void)
6893 gimple_seq body;
6895 /* This pass always runs, to provide PROP_gimple_lomp.
6896 But there is nothing to do unless -fopenmp is given. */
6897 if (flag_openmp == 0)
6898 return 0;
6900 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6901 delete_omp_context);
6903 body = gimple_body (current_function_decl);
6904 scan_omp (&body, NULL);
6905 gcc_assert (taskreg_nesting_level == 0);
6907 if (all_contexts->root)
6909 struct gimplify_ctx gctx;
6911 if (task_shared_vars)
6912 push_gimplify_context (&gctx);
6913 lower_omp (&body, NULL);
6914 if (task_shared_vars)
6915 pop_gimplify_context (NULL);
6918 if (all_contexts)
6920 splay_tree_delete (all_contexts);
6921 all_contexts = NULL;
6923 BITMAP_FREE (task_shared_vars);
6924 return 0;
6927 struct gimple_opt_pass pass_lower_omp =
6930 GIMPLE_PASS,
6931 "omplower", /* name */
6932 OPTGROUP_NONE, /* optinfo_flags */
6933 NULL, /* gate */
6934 execute_lower_omp, /* execute */
6935 NULL, /* sub */
6936 NULL, /* next */
6937 0, /* static_pass_number */
6938 TV_NONE, /* tv_id */
6939 PROP_gimple_any, /* properties_required */
6940 PROP_gimple_lomp, /* properties_provided */
6941 0, /* properties_destroyed */
6942 0, /* todo_flags_start */
6943 0 /* todo_flags_finish */
6947 /* The following is a utility to diagnose OpenMP structured block violations.
6948 It is not part of the "omplower" pass, as that's invoked too late. It
6949 should be invoked by the respective front ends after gimplification. */
6951 static splay_tree all_labels;
6953 /* Check for mismatched contexts and generate an error if needed. Return
6954 true if an error is detected. */
6956 static bool
6957 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6958 gimple branch_ctx, gimple label_ctx)
6960 if (label_ctx == branch_ctx)
6961 return false;
6965 Previously we kept track of the label's entire context in diagnose_sb_[12]
6966 so we could traverse it and issue a correct "exit" or "enter" error
6967 message upon a structured block violation.
6969 We built the context by building a list with tree_cons'ing, but there is
6970 no easy counterpart in gimple tuples. It seems like far too much work
6971 for issuing exit/enter error messages. If someone really misses the
6972 distinct error message... patches welcome.
6975 #if 0
6976 /* Try to avoid confusing the user by producing and error message
6977 with correct "exit" or "enter" verbiage. We prefer "exit"
6978 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6979 if (branch_ctx == NULL)
6980 exit_p = false;
6981 else
6983 while (label_ctx)
6985 if (TREE_VALUE (label_ctx) == branch_ctx)
6987 exit_p = false;
6988 break;
6990 label_ctx = TREE_CHAIN (label_ctx);
6994 if (exit_p)
6995 error ("invalid exit from OpenMP structured block");
6996 else
6997 error ("invalid entry to OpenMP structured block");
6998 #endif
7000 /* If it's obvious we have an invalid entry, be specific about the error. */
7001 if (branch_ctx == NULL)
7002 error ("invalid entry to OpenMP structured block");
7003 else
7004 /* Otherwise, be vague and lazy, but efficient. */
7005 error ("invalid branch to/from an OpenMP structured block");
7007 gsi_replace (gsi_p, gimple_build_nop (), false);
7008 return true;
7011 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7012 where each label is found. */
7014 static tree
7015 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7016 struct walk_stmt_info *wi)
7018 gimple context = (gimple) wi->info;
7019 gimple inner_context;
7020 gimple stmt = gsi_stmt (*gsi_p);
7022 *handled_ops_p = true;
7024 switch (gimple_code (stmt))
7026 WALK_SUBSTMTS;
7028 case GIMPLE_OMP_PARALLEL:
7029 case GIMPLE_OMP_TASK:
7030 case GIMPLE_OMP_SECTIONS:
7031 case GIMPLE_OMP_SINGLE:
7032 case GIMPLE_OMP_SECTION:
7033 case GIMPLE_OMP_MASTER:
7034 case GIMPLE_OMP_ORDERED:
7035 case GIMPLE_OMP_CRITICAL:
7036 /* The minimal context here is just the current OMP construct. */
7037 inner_context = stmt;
7038 wi->info = inner_context;
7039 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7040 wi->info = context;
7041 break;
7043 case GIMPLE_OMP_FOR:
7044 inner_context = stmt;
7045 wi->info = inner_context;
7046 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7047 walk them. */
7048 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7049 diagnose_sb_1, NULL, wi);
7050 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7051 wi->info = context;
7052 break;
7054 case GIMPLE_LABEL:
7055 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7056 (splay_tree_value) context);
7057 break;
7059 default:
7060 break;
7063 return NULL_TREE;
7066 /* Pass 2: Check each branch and see if its context differs from that of
7067 the destination label's context. */
7069 static tree
7070 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7071 struct walk_stmt_info *wi)
7073 gimple context = (gimple) wi->info;
7074 splay_tree_node n;
7075 gimple stmt = gsi_stmt (*gsi_p);
7077 *handled_ops_p = true;
7079 switch (gimple_code (stmt))
7081 WALK_SUBSTMTS;
7083 case GIMPLE_OMP_PARALLEL:
7084 case GIMPLE_OMP_TASK:
7085 case GIMPLE_OMP_SECTIONS:
7086 case GIMPLE_OMP_SINGLE:
7087 case GIMPLE_OMP_SECTION:
7088 case GIMPLE_OMP_MASTER:
7089 case GIMPLE_OMP_ORDERED:
7090 case GIMPLE_OMP_CRITICAL:
7091 wi->info = stmt;
7092 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7093 wi->info = context;
7094 break;
7096 case GIMPLE_OMP_FOR:
7097 wi->info = stmt;
7098 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7099 walk them. */
7100 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7101 diagnose_sb_2, NULL, wi);
7102 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7103 wi->info = context;
7104 break;
7106 case GIMPLE_COND:
7108 tree lab = gimple_cond_true_label (stmt);
7109 if (lab)
7111 n = splay_tree_lookup (all_labels,
7112 (splay_tree_key) lab);
7113 diagnose_sb_0 (gsi_p, context,
7114 n ? (gimple) n->value : NULL);
7116 lab = gimple_cond_false_label (stmt);
7117 if (lab)
7119 n = splay_tree_lookup (all_labels,
7120 (splay_tree_key) lab);
7121 diagnose_sb_0 (gsi_p, context,
7122 n ? (gimple) n->value : NULL);
7125 break;
7127 case GIMPLE_GOTO:
7129 tree lab = gimple_goto_dest (stmt);
7130 if (TREE_CODE (lab) != LABEL_DECL)
7131 break;
7133 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7134 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7136 break;
7138 case GIMPLE_SWITCH:
7140 unsigned int i;
7141 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7143 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7144 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7145 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7146 break;
7149 break;
7151 case GIMPLE_RETURN:
7152 diagnose_sb_0 (gsi_p, context, NULL);
7153 break;
7155 default:
7156 break;
7159 return NULL_TREE;
7162 static unsigned int
7163 diagnose_omp_structured_block_errors (void)
7165 struct walk_stmt_info wi;
7166 gimple_seq body = gimple_body (current_function_decl);
7168 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7170 memset (&wi, 0, sizeof (wi));
7171 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7173 memset (&wi, 0, sizeof (wi));
7174 wi.want_locations = true;
7175 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7177 gimple_set_body (current_function_decl, body);
7179 splay_tree_delete (all_labels);
7180 all_labels = NULL;
7182 return 0;
7185 static bool
7186 gate_diagnose_omp_blocks (void)
7188 return flag_openmp != 0;
7191 struct gimple_opt_pass pass_diagnose_omp_blocks =
7194 GIMPLE_PASS,
7195 "*diagnose_omp_blocks", /* name */
7196 OPTGROUP_NONE, /* optinfo_flags */
7197 gate_diagnose_omp_blocks, /* gate */
7198 diagnose_omp_structured_block_errors, /* execute */
7199 NULL, /* sub */
7200 NULL, /* next */
7201 0, /* static_pass_number */
7202 TV_NONE, /* tv_id */
7203 PROP_gimple_any, /* properties_required */
7204 0, /* properties_provided */
7205 0, /* properties_destroyed */
7206 0, /* todo_flags_start */
7207 0, /* todo_flags_finish */
7211 #include "gt-omp-low.h"