* config/darwin.c (darwin_assemble_visibility): Treat
[official-gcc.git] / gcc / omp-low.c
blob4d32fb6d35d73f2a4b73e43f1245f1d76154cb48
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
53 expressions.
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
62 typedef struct omp_context
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
72 gimple stmt;
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104 } omp_context;
107 struct omp_for_data_loop
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
113 /* A structure describing the main elements of a parallel loop. */
115 struct omp_for_data
117 struct omp_for_data_loop loop;
118 tree chunk_size;
119 gimple for_stmt;
120 tree pre, iter_type;
121 int collapse;
122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
124 struct omp_for_data_loop *loops;
128 static splay_tree all_contexts;
129 static int taskreg_nesting_level;
130 struct omp_region *root_omp_region;
131 static bitmap task_shared_vars;
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
136 #define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 default:
261 break;
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
314 else
315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
319 break;
320 case GE_EXPR:
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
323 else
324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
328 break;
329 default:
330 gcc_unreachable ();
333 t = gimple_omp_for_incr (for_stmt, i);
334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
337 case PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case POINTER_PLUS_EXPR:
341 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
342 break;
343 case MINUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 loop->step = fold_build1_loc (loc,
346 NEGATE_EXPR, TREE_TYPE (loop->step),
347 loop->step);
348 break;
349 default:
350 gcc_unreachable ();
353 if (iter_type != long_long_unsigned_type_node)
355 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
356 iter_type = long_long_unsigned_type_node;
357 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
358 && TYPE_PRECISION (TREE_TYPE (loop->v))
359 >= TYPE_PRECISION (iter_type))
361 tree n;
363 if (loop->cond_code == LT_EXPR)
364 n = fold_build2_loc (loc,
365 PLUS_EXPR, TREE_TYPE (loop->v),
366 loop->n2, loop->step);
367 else
368 n = loop->n1;
369 if (TREE_CODE (n) != INTEGER_CST
370 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
371 iter_type = long_long_unsigned_type_node;
373 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
374 > TYPE_PRECISION (iter_type))
376 tree n1, n2;
378 if (loop->cond_code == LT_EXPR)
380 n1 = loop->n1;
381 n2 = fold_build2_loc (loc,
382 PLUS_EXPR, TREE_TYPE (loop->v),
383 loop->n2, loop->step);
385 else
387 n1 = fold_build2_loc (loc,
388 MINUS_EXPR, TREE_TYPE (loop->v),
389 loop->n2, loop->step);
390 n2 = loop->n1;
392 if (TREE_CODE (n1) != INTEGER_CST
393 || TREE_CODE (n2) != INTEGER_CST
394 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
395 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
396 iter_type = long_long_unsigned_type_node;
400 if (collapse_count && *collapse_count == NULL)
402 if ((i == 0 || count != NULL_TREE)
403 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
404 && TREE_CONSTANT (loop->n1)
405 && TREE_CONSTANT (loop->n2)
406 && TREE_CODE (loop->step) == INTEGER_CST)
408 tree itype = TREE_TYPE (loop->v);
410 if (POINTER_TYPE_P (itype))
411 itype = signed_type_for (itype);
412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
426 else
427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
430 if (count != NULL_TREE)
431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
439 else
440 count = NULL_TREE;
444 if (count)
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
458 if (count)
459 *collapse_count = fold_convert_loc (loc, iter_type, count);
460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
464 if (fd->collapse > 1)
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
475 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
489 Is lowered into:
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
517 static bool
518 workshare_safe_to_combine_p (basic_block ws_entry_bb)
520 struct omp_for_data fd;
521 gimple ws_stmt = last_stmt (ws_entry_bb);
523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
524 return true;
526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
528 extract_omp_for_data (ws_stmt, &fd, NULL);
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
546 return true;
550 /* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
554 static VEC(tree,gc) *
555 get_ws_args_for (gimple ws_stmt)
557 tree t;
558 location_t loc = gimple_location (ws_stmt);
559 VEC(tree,gc) *ws_args;
561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
563 struct omp_for_data fd;
565 extract_omp_for_data (ws_stmt, &fd, NULL);
567 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 VEC_quick_push (tree, ws_args, t);
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
573 VEC_quick_push (tree, ws_args, t);
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 VEC_quick_push (tree, ws_args, t);
578 if (fd.chunk_size)
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 VEC_quick_push (tree, ws_args, t);
584 return ws_args;
586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
588 /* Number of sections is equal to the number of edges from the
589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
593 ws_args = VEC_alloc (tree, gc, 1);
594 VEC_quick_push (tree, ws_args, t);
595 return ws_args;
598 gcc_unreachable ();
602 /* Discover whether REGION is a combined parallel+workshare region. */
604 static void
605 determine_parallel_type (struct omp_region *region)
607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
610 if (region == NULL || region->inner == NULL
611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
613 return;
615 /* We only support parallel+for and parallel+sections. */
616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
619 return;
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
630 && workshare_safe_to_combine_p (ws_entry_bb)
631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
635 gimple ws_stmt = last_stmt (ws_entry_bb);
637 if (region->inner->type == GIMPLE_OMP_FOR)
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
648 tree clauses = gimple_omp_for_clauses (ws_stmt);
649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
662 region->ws_args = get_ws_args_for (ws_stmt);
667 /* Return true if EXPR is variable sized. */
669 static inline bool
670 is_variable_sized (const_tree expr)
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675 /* Return true if DECL is a reference type. */
677 static inline bool
678 is_reference (tree decl)
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
683 /* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
687 static inline tree
688 lookup_decl (tree var, omp_context *ctx)
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
695 static inline tree
696 maybe_lookup_decl (const_tree var, omp_context *ctx)
698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
703 static inline tree
704 lookup_field (tree var, omp_context *ctx)
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
711 static inline tree
712 lookup_sfield (tree var, omp_context *ctx)
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
721 static inline tree
722 maybe_lookup_field (tree var, omp_context *ctx)
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
729 /* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
732 static bool
733 use_pointer_for_field (tree decl, omp_context *shared_ctx)
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
738 /* We can only use copy-in/copy-out semantics for shared variables
739 when we know the value is not accessible from an outer scope. */
740 if (shared_ctx)
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
754 return true;
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
768 omp_context *up;
770 for (up = shared_ctx->outer; up; up = up->outer)
771 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
772 break;
774 if (up)
776 tree c;
778 for (c = gimple_omp_taskreg_clauses (up->stmt);
779 c; c = OMP_CLAUSE_CHAIN (c))
780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c) == decl)
782 break;
784 if (c)
785 goto maybe_mark_addressable_and_ret;
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
795 tree outer;
796 maybe_mark_addressable_and_ret:
797 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
808 return true;
812 return false;
815 /* Create a new VAR_DECL and copy information from VAR to it. */
817 tree
818 copy_var_decl (tree var, tree name, tree type)
820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828 TREE_USED (copy) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831 return copy;
834 /* Construct a new automatic decl similar to VAR. */
836 static tree
837 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 tree copy = copy_var_decl (var, name, type);
841 DECL_CONTEXT (copy) = current_function_decl;
842 DECL_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
845 return copy;
848 static tree
849 omp_copy_decl_1 (tree var, omp_context *ctx)
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
854 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
855 as appropriate. */
856 static tree
857 omp_build_component_ref (tree obj, tree field)
859 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
860 if (TREE_THIS_VOLATILE (field))
861 TREE_THIS_VOLATILE (ret) |= 1;
862 if (TREE_READONLY (field))
863 TREE_READONLY (ret) |= 1;
864 return ret;
867 /* Build tree nodes to access the field for VAR on the receiver side. */
869 static tree
870 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
872 tree x, field = lookup_field (var, ctx);
874 /* If the receiver record type was remapped in the child function,
875 remap the field into the new record type. */
876 x = maybe_lookup_field (field, ctx);
877 if (x != NULL)
878 field = x;
880 x = build_simple_mem_ref (ctx->receiver_decl);
881 x = omp_build_component_ref (x, field);
882 if (by_ref)
883 x = build_simple_mem_ref (x);
885 return x;
888 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
889 of a parallel, this is a component reference; for workshare constructs
890 this is some variable. */
892 static tree
893 build_outer_var_ref (tree var, omp_context *ctx)
895 tree x;
897 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
898 x = var;
899 else if (is_variable_sized (var))
901 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
902 x = build_outer_var_ref (x, ctx);
903 x = build_simple_mem_ref (x);
905 else if (is_taskreg_ctx (ctx))
907 bool by_ref = use_pointer_for_field (var, NULL);
908 x = build_receiver_ref (var, by_ref, ctx);
910 else if (ctx->outer)
911 x = lookup_decl (var, ctx->outer);
912 else if (is_reference (var))
913 /* This can happen with orphaned constructs. If var is reference, it is
914 possible it is shared and as such valid. */
915 x = var;
916 else
917 gcc_unreachable ();
919 if (is_reference (var))
920 x = build_simple_mem_ref (x);
922 return x;
925 /* Build tree nodes to access the field for VAR on the sender side. */
927 static tree
928 build_sender_ref (tree var, omp_context *ctx)
930 tree field = lookup_sfield (var, ctx);
931 return omp_build_component_ref (ctx->sender_decl, field);
934 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
936 static void
937 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
939 tree field, type, sfield = NULL_TREE;
941 gcc_assert ((mask & 1) == 0
942 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
943 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
944 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
946 type = TREE_TYPE (var);
947 if (by_ref)
948 type = build_pointer_type (type);
949 else if ((mask & 3) == 1 && is_reference (var))
950 type = TREE_TYPE (type);
952 field = build_decl (DECL_SOURCE_LOCATION (var),
953 FIELD_DECL, DECL_NAME (var), type);
955 /* Remember what variable this field was created for. This does have a
956 side effect of making dwarf2out ignore this member, so for helpful
957 debugging we clear it later in delete_omp_context. */
958 DECL_ABSTRACT_ORIGIN (field) = var;
959 if (type == TREE_TYPE (var))
961 DECL_ALIGN (field) = DECL_ALIGN (var);
962 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
963 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
965 else
966 DECL_ALIGN (field) = TYPE_ALIGN (type);
968 if ((mask & 3) == 3)
970 insert_field_into_struct (ctx->record_type, field);
971 if (ctx->srecord_type)
973 sfield = build_decl (DECL_SOURCE_LOCATION (var),
974 FIELD_DECL, DECL_NAME (var), type);
975 DECL_ABSTRACT_ORIGIN (sfield) = var;
976 DECL_ALIGN (sfield) = DECL_ALIGN (field);
977 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
978 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
979 insert_field_into_struct (ctx->srecord_type, sfield);
982 else
984 if (ctx->srecord_type == NULL_TREE)
986 tree t;
988 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
989 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
990 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
992 sfield = build_decl (DECL_SOURCE_LOCATION (var),
993 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
994 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
995 insert_field_into_struct (ctx->srecord_type, sfield);
996 splay_tree_insert (ctx->sfield_map,
997 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
998 (splay_tree_value) sfield);
1001 sfield = field;
1002 insert_field_into_struct ((mask & 1) ? ctx->record_type
1003 : ctx->srecord_type, field);
1006 if (mask & 1)
1007 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1008 (splay_tree_value) field);
1009 if ((mask & 2) && ctx->sfield_map)
1010 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1011 (splay_tree_value) sfield);
1014 static tree
1015 install_var_local (tree var, omp_context *ctx)
1017 tree new_var = omp_copy_decl_1 (var, ctx);
1018 insert_decl_map (&ctx->cb, var, new_var);
1019 return new_var;
1022 /* Adjust the replacement for DECL in CTX for the new context. This means
1023 copying the DECL_VALUE_EXPR, and fixing up the type. */
1025 static void
1026 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1028 tree new_decl, size;
1030 new_decl = lookup_decl (decl, ctx);
1032 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1034 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1035 && DECL_HAS_VALUE_EXPR_P (decl))
1037 tree ve = DECL_VALUE_EXPR (decl);
1038 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1039 SET_DECL_VALUE_EXPR (new_decl, ve);
1040 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1043 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1045 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1046 if (size == error_mark_node)
1047 size = TYPE_SIZE (TREE_TYPE (new_decl));
1048 DECL_SIZE (new_decl) = size;
1050 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1051 if (size == error_mark_node)
1052 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1053 DECL_SIZE_UNIT (new_decl) = size;
1057 /* The callback for remap_decl. Search all containing contexts for a
1058 mapping of the variable; this avoids having to duplicate the splay
1059 tree ahead of time. We know a mapping doesn't already exist in the
1060 given context. Create new mappings to implement default semantics. */
1062 static tree
1063 omp_copy_decl (tree var, copy_body_data *cb)
1065 omp_context *ctx = (omp_context *) cb;
1066 tree new_var;
1068 if (TREE_CODE (var) == LABEL_DECL)
1070 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1071 DECL_CONTEXT (new_var) = current_function_decl;
1072 insert_decl_map (&ctx->cb, var, new_var);
1073 return new_var;
1076 while (!is_taskreg_ctx (ctx))
1078 ctx = ctx->outer;
1079 if (ctx == NULL)
1080 return var;
1081 new_var = maybe_lookup_decl (var, ctx);
1082 if (new_var)
1083 return new_var;
1086 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1087 return var;
1089 return error_mark_node;
1093 /* Return the parallel region associated with STMT. */
1095 /* Debugging dumps for parallel regions. */
1096 void dump_omp_region (FILE *, struct omp_region *, int);
1097 void debug_omp_region (struct omp_region *);
1098 void debug_all_omp_regions (void);
1100 /* Dump the parallel region tree rooted at REGION. */
1102 void
1103 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1105 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1106 gimple_code_name[region->type]);
1108 if (region->inner)
1109 dump_omp_region (file, region->inner, indent + 4);
1111 if (region->cont)
1113 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1114 region->cont->index);
1117 if (region->exit)
1118 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1119 region->exit->index);
1120 else
1121 fprintf (file, "%*s[no exit marker]\n", indent, "");
1123 if (region->next)
1124 dump_omp_region (file, region->next, indent);
1127 DEBUG_FUNCTION void
1128 debug_omp_region (struct omp_region *region)
1130 dump_omp_region (stderr, region, 0);
1133 DEBUG_FUNCTION void
1134 debug_all_omp_regions (void)
1136 dump_omp_region (stderr, root_omp_region, 0);
1140 /* Create a new parallel region starting at STMT inside region PARENT. */
1142 struct omp_region *
1143 new_omp_region (basic_block bb, enum gimple_code type,
1144 struct omp_region *parent)
1146 struct omp_region *region = XCNEW (struct omp_region);
1148 region->outer = parent;
1149 region->entry = bb;
1150 region->type = type;
1152 if (parent)
1154 /* This is a nested region. Add it to the list of inner
1155 regions in PARENT. */
1156 region->next = parent->inner;
1157 parent->inner = region;
1159 else
1161 /* This is a toplevel region. Add it to the list of toplevel
1162 regions in ROOT_OMP_REGION. */
1163 region->next = root_omp_region;
1164 root_omp_region = region;
1167 return region;
1170 /* Release the memory associated with the region tree rooted at REGION. */
1172 static void
1173 free_omp_region_1 (struct omp_region *region)
1175 struct omp_region *i, *n;
1177 for (i = region->inner; i ; i = n)
1179 n = i->next;
1180 free_omp_region_1 (i);
1183 free (region);
1186 /* Release the memory for the entire omp region tree. */
1188 void
1189 free_omp_regions (void)
1191 struct omp_region *r, *n;
1192 for (r = root_omp_region; r ; r = n)
1194 n = r->next;
1195 free_omp_region_1 (r);
1197 root_omp_region = NULL;
1201 /* Create a new context, with OUTER_CTX being the surrounding context. */
1203 static omp_context *
1204 new_omp_context (gimple stmt, omp_context *outer_ctx)
1206 omp_context *ctx = XCNEW (omp_context);
1208 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1209 (splay_tree_value) ctx);
1210 ctx->stmt = stmt;
1212 if (outer_ctx)
1214 ctx->outer = outer_ctx;
1215 ctx->cb = outer_ctx->cb;
1216 ctx->cb.block = NULL;
1217 ctx->depth = outer_ctx->depth + 1;
1219 else
1221 ctx->cb.src_fn = current_function_decl;
1222 ctx->cb.dst_fn = current_function_decl;
1223 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1224 gcc_checking_assert (ctx->cb.src_node);
1225 ctx->cb.dst_node = ctx->cb.src_node;
1226 ctx->cb.src_cfun = cfun;
1227 ctx->cb.copy_decl = omp_copy_decl;
1228 ctx->cb.eh_lp_nr = 0;
1229 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1230 ctx->depth = 1;
1233 ctx->cb.decl_map = pointer_map_create ();
1235 return ctx;
1238 static gimple_seq maybe_catch_exception (gimple_seq);
1240 /* Finalize task copyfn. */
1242 static void
1243 finalize_task_copyfn (gimple task_stmt)
1245 struct function *child_cfun;
1246 tree child_fn;
1247 gimple_seq seq = NULL, new_seq;
1248 gimple bind;
1250 child_fn = gimple_omp_task_copy_fn (task_stmt);
1251 if (child_fn == NULL_TREE)
1252 return;
1254 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1256 /* Inform the callgraph about the new function. */
1257 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1258 = cfun->curr_properties & ~PROP_loops;
1260 push_cfun (child_cfun);
1261 bind = gimplify_body (child_fn, false);
1262 gimple_seq_add_stmt (&seq, bind);
1263 new_seq = maybe_catch_exception (seq);
1264 if (new_seq != seq)
1266 bind = gimple_build_bind (NULL, new_seq, NULL);
1267 seq = NULL;
1268 gimple_seq_add_stmt (&seq, bind);
1270 gimple_set_body (child_fn, seq);
1271 pop_cfun ();
1273 cgraph_add_new_function (child_fn, false);
1276 /* Destroy a omp_context data structures. Called through the splay tree
1277 value delete callback. */
1279 static void
1280 delete_omp_context (splay_tree_value value)
1282 omp_context *ctx = (omp_context *) value;
1284 pointer_map_destroy (ctx->cb.decl_map);
1286 if (ctx->field_map)
1287 splay_tree_delete (ctx->field_map);
1288 if (ctx->sfield_map)
1289 splay_tree_delete (ctx->sfield_map);
1291 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1292 it produces corrupt debug information. */
1293 if (ctx->record_type)
1295 tree t;
1296 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1297 DECL_ABSTRACT_ORIGIN (t) = NULL;
1299 if (ctx->srecord_type)
1301 tree t;
1302 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1303 DECL_ABSTRACT_ORIGIN (t) = NULL;
1306 if (is_task_ctx (ctx))
1307 finalize_task_copyfn (ctx->stmt);
1309 XDELETE (ctx);
1312 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1313 context. */
1315 static void
1316 fixup_child_record_type (omp_context *ctx)
1318 tree f, type = ctx->record_type;
1320 /* ??? It isn't sufficient to just call remap_type here, because
1321 variably_modified_type_p doesn't work the way we expect for
1322 record types. Testing each field for whether it needs remapping
1323 and creating a new record by hand works, however. */
1324 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1325 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1326 break;
1327 if (f)
1329 tree name, new_fields = NULL;
1331 type = lang_hooks.types.make_type (RECORD_TYPE);
1332 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1333 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1334 TYPE_DECL, name, type);
1335 TYPE_NAME (type) = name;
1337 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1339 tree new_f = copy_node (f);
1340 DECL_CONTEXT (new_f) = type;
1341 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1342 DECL_CHAIN (new_f) = new_fields;
1343 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1344 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1345 &ctx->cb, NULL);
1346 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1347 &ctx->cb, NULL);
1348 new_fields = new_f;
1350 /* Arrange to be able to look up the receiver field
1351 given the sender field. */
1352 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1353 (splay_tree_value) new_f);
1355 TYPE_FIELDS (type) = nreverse (new_fields);
1356 layout_type (type);
1359 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1362 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1363 specified by CLAUSES. */
1365 static void
1366 scan_sharing_clauses (tree clauses, omp_context *ctx)
1368 tree c, decl;
1369 bool scan_array_reductions = false;
1371 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1373 bool by_ref;
1375 switch (OMP_CLAUSE_CODE (c))
1377 case OMP_CLAUSE_PRIVATE:
1378 decl = OMP_CLAUSE_DECL (c);
1379 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1380 goto do_private;
1381 else if (!is_variable_sized (decl))
1382 install_var_local (decl, ctx);
1383 break;
1385 case OMP_CLAUSE_SHARED:
1386 gcc_assert (is_taskreg_ctx (ctx));
1387 decl = OMP_CLAUSE_DECL (c);
1388 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1389 || !is_variable_sized (decl));
1390 /* Global variables don't need to be copied,
1391 the receiver side will use them directly. */
1392 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1393 break;
1394 by_ref = use_pointer_for_field (decl, ctx);
1395 if (! TREE_READONLY (decl)
1396 || TREE_ADDRESSABLE (decl)
1397 || by_ref
1398 || is_reference (decl))
1400 install_var_field (decl, by_ref, 3, ctx);
1401 install_var_local (decl, ctx);
1402 break;
1404 /* We don't need to copy const scalar vars back. */
1405 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1406 goto do_private;
1408 case OMP_CLAUSE_LASTPRIVATE:
1409 /* Let the corresponding firstprivate clause create
1410 the variable. */
1411 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1412 break;
1413 /* FALLTHRU */
1415 case OMP_CLAUSE_FIRSTPRIVATE:
1416 case OMP_CLAUSE_REDUCTION:
1417 decl = OMP_CLAUSE_DECL (c);
1418 do_private:
1419 if (is_variable_sized (decl))
1421 if (is_task_ctx (ctx))
1422 install_var_field (decl, false, 1, ctx);
1423 break;
1425 else if (is_taskreg_ctx (ctx))
1427 bool global
1428 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1429 by_ref = use_pointer_for_field (decl, NULL);
1431 if (is_task_ctx (ctx)
1432 && (global || by_ref || is_reference (decl)))
1434 install_var_field (decl, false, 1, ctx);
1435 if (!global)
1436 install_var_field (decl, by_ref, 2, ctx);
1438 else if (!global)
1439 install_var_field (decl, by_ref, 3, ctx);
1441 install_var_local (decl, ctx);
1442 break;
1444 case OMP_CLAUSE_COPYPRIVATE:
1445 case OMP_CLAUSE_COPYIN:
1446 decl = OMP_CLAUSE_DECL (c);
1447 by_ref = use_pointer_for_field (decl, NULL);
1448 install_var_field (decl, by_ref, 3, ctx);
1449 break;
1451 case OMP_CLAUSE_DEFAULT:
1452 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1453 break;
1455 case OMP_CLAUSE_FINAL:
1456 case OMP_CLAUSE_IF:
1457 case OMP_CLAUSE_NUM_THREADS:
1458 case OMP_CLAUSE_SCHEDULE:
1459 if (ctx->outer)
1460 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1461 break;
1463 case OMP_CLAUSE_NOWAIT:
1464 case OMP_CLAUSE_ORDERED:
1465 case OMP_CLAUSE_COLLAPSE:
1466 case OMP_CLAUSE_UNTIED:
1467 case OMP_CLAUSE_MERGEABLE:
1468 break;
1470 default:
1471 gcc_unreachable ();
1475 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1477 switch (OMP_CLAUSE_CODE (c))
1479 case OMP_CLAUSE_LASTPRIVATE:
1480 /* Let the corresponding firstprivate clause create
1481 the variable. */
1482 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1483 scan_array_reductions = true;
1484 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1485 break;
1486 /* FALLTHRU */
1488 case OMP_CLAUSE_PRIVATE:
1489 case OMP_CLAUSE_FIRSTPRIVATE:
1490 case OMP_CLAUSE_REDUCTION:
1491 decl = OMP_CLAUSE_DECL (c);
1492 if (is_variable_sized (decl))
1493 install_var_local (decl, ctx);
1494 fixup_remapped_decl (decl, ctx,
1495 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1496 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1497 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1498 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1499 scan_array_reductions = true;
1500 break;
1502 case OMP_CLAUSE_SHARED:
1503 decl = OMP_CLAUSE_DECL (c);
1504 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1505 fixup_remapped_decl (decl, ctx, false);
1506 break;
1508 case OMP_CLAUSE_COPYPRIVATE:
1509 case OMP_CLAUSE_COPYIN:
1510 case OMP_CLAUSE_DEFAULT:
1511 case OMP_CLAUSE_IF:
1512 case OMP_CLAUSE_NUM_THREADS:
1513 case OMP_CLAUSE_SCHEDULE:
1514 case OMP_CLAUSE_NOWAIT:
1515 case OMP_CLAUSE_ORDERED:
1516 case OMP_CLAUSE_COLLAPSE:
1517 case OMP_CLAUSE_UNTIED:
1518 case OMP_CLAUSE_FINAL:
1519 case OMP_CLAUSE_MERGEABLE:
1520 break;
1522 default:
1523 gcc_unreachable ();
1527 if (scan_array_reductions)
1528 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1529 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1530 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1532 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1533 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1535 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1536 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1537 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1540 /* Create a new name for omp child function. Returns an identifier. */
1542 static GTY(()) unsigned int tmp_ompfn_id_num;
1544 static tree
1545 create_omp_child_function_name (bool task_copy)
1547 return (clone_function_name (current_function_decl,
1548 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1551 /* Build a decl for the omp child function. It'll not contain a body
1552 yet, just the bare decl. */
1554 static void
1555 create_omp_child_function (omp_context *ctx, bool task_copy)
1557 tree decl, type, name, t;
1559 name = create_omp_child_function_name (task_copy);
1560 if (task_copy)
1561 type = build_function_type_list (void_type_node, ptr_type_node,
1562 ptr_type_node, NULL_TREE);
1563 else
1564 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1566 decl = build_decl (gimple_location (ctx->stmt),
1567 FUNCTION_DECL, name, type);
1569 if (!task_copy)
1570 ctx->cb.dst_fn = decl;
1571 else
1572 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1574 TREE_STATIC (decl) = 1;
1575 TREE_USED (decl) = 1;
1576 DECL_ARTIFICIAL (decl) = 1;
1577 DECL_NAMELESS (decl) = 1;
1578 DECL_IGNORED_P (decl) = 0;
1579 TREE_PUBLIC (decl) = 0;
1580 DECL_UNINLINABLE (decl) = 1;
1581 DECL_EXTERNAL (decl) = 0;
1582 DECL_CONTEXT (decl) = NULL_TREE;
1583 DECL_INITIAL (decl) = make_node (BLOCK);
1585 t = build_decl (DECL_SOURCE_LOCATION (decl),
1586 RESULT_DECL, NULL_TREE, void_type_node);
1587 DECL_ARTIFICIAL (t) = 1;
1588 DECL_IGNORED_P (t) = 1;
1589 DECL_CONTEXT (t) = decl;
1590 DECL_RESULT (decl) = t;
1592 t = build_decl (DECL_SOURCE_LOCATION (decl),
1593 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_NAMELESS (t) = 1;
1596 DECL_ARG_TYPE (t) = ptr_type_node;
1597 DECL_CONTEXT (t) = current_function_decl;
1598 TREE_USED (t) = 1;
1599 DECL_ARGUMENTS (decl) = t;
1600 if (!task_copy)
1601 ctx->receiver_decl = t;
1602 else
1604 t = build_decl (DECL_SOURCE_LOCATION (decl),
1605 PARM_DECL, get_identifier (".omp_data_o"),
1606 ptr_type_node);
1607 DECL_ARTIFICIAL (t) = 1;
1608 DECL_NAMELESS (t) = 1;
1609 DECL_ARG_TYPE (t) = ptr_type_node;
1610 DECL_CONTEXT (t) = current_function_decl;
1611 TREE_USED (t) = 1;
1612 TREE_ADDRESSABLE (t) = 1;
1613 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1614 DECL_ARGUMENTS (decl) = t;
1617 /* Allocate memory for the function structure. The call to
1618 allocate_struct_function clobbers CFUN, so we need to restore
1619 it afterward. */
1620 push_struct_function (decl);
1621 cfun->function_end_locus = gimple_location (ctx->stmt);
1622 pop_cfun ();
1626 /* Scan an OpenMP parallel directive. */
1628 static void
1629 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1631 omp_context *ctx;
1632 tree name;
1633 gimple stmt = gsi_stmt (*gsi);
1635 /* Ignore parallel directives with empty bodies, unless there
1636 are copyin clauses. */
1637 if (optimize > 0
1638 && empty_body_p (gimple_omp_body (stmt))
1639 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1640 OMP_CLAUSE_COPYIN) == NULL)
1642 gsi_replace (gsi, gimple_build_nop (), false);
1643 return;
1646 ctx = new_omp_context (stmt, outer_ctx);
1647 if (taskreg_nesting_level > 1)
1648 ctx->is_nested = true;
1649 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1650 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1651 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1652 name = create_tmp_var_name (".omp_data_s");
1653 name = build_decl (gimple_location (stmt),
1654 TYPE_DECL, name, ctx->record_type);
1655 DECL_ARTIFICIAL (name) = 1;
1656 DECL_NAMELESS (name) = 1;
1657 TYPE_NAME (ctx->record_type) = name;
1658 create_omp_child_function (ctx, false);
1659 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1661 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1662 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1664 if (TYPE_FIELDS (ctx->record_type) == NULL)
1665 ctx->record_type = ctx->receiver_decl = NULL;
1666 else
1668 layout_type (ctx->record_type);
1669 fixup_child_record_type (ctx);
1673 /* Scan an OpenMP task directive. */
1675 static void
1676 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1678 omp_context *ctx;
1679 tree name, t;
1680 gimple stmt = gsi_stmt (*gsi);
1681 location_t loc = gimple_location (stmt);
1683 /* Ignore task directives with empty bodies. */
1684 if (optimize > 0
1685 && empty_body_p (gimple_omp_body (stmt)))
1687 gsi_replace (gsi, gimple_build_nop (), false);
1688 return;
1691 ctx = new_omp_context (stmt, outer_ctx);
1692 if (taskreg_nesting_level > 1)
1693 ctx->is_nested = true;
1694 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1695 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1696 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1697 name = create_tmp_var_name (".omp_data_s");
1698 name = build_decl (gimple_location (stmt),
1699 TYPE_DECL, name, ctx->record_type);
1700 DECL_ARTIFICIAL (name) = 1;
1701 DECL_NAMELESS (name) = 1;
1702 TYPE_NAME (ctx->record_type) = name;
1703 create_omp_child_function (ctx, false);
1704 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1706 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1708 if (ctx->srecord_type)
1710 name = create_tmp_var_name (".omp_data_a");
1711 name = build_decl (gimple_location (stmt),
1712 TYPE_DECL, name, ctx->srecord_type);
1713 DECL_ARTIFICIAL (name) = 1;
1714 DECL_NAMELESS (name) = 1;
1715 TYPE_NAME (ctx->srecord_type) = name;
1716 create_omp_child_function (ctx, true);
1719 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1721 if (TYPE_FIELDS (ctx->record_type) == NULL)
1723 ctx->record_type = ctx->receiver_decl = NULL;
1724 t = build_int_cst (long_integer_type_node, 0);
1725 gimple_omp_task_set_arg_size (stmt, t);
1726 t = build_int_cst (long_integer_type_node, 1);
1727 gimple_omp_task_set_arg_align (stmt, t);
1729 else
1731 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1732 /* Move VLA fields to the end. */
1733 p = &TYPE_FIELDS (ctx->record_type);
1734 while (*p)
1735 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1736 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1738 *q = *p;
1739 *p = TREE_CHAIN (*p);
1740 TREE_CHAIN (*q) = NULL_TREE;
1741 q = &TREE_CHAIN (*q);
1743 else
1744 p = &DECL_CHAIN (*p);
1745 *p = vla_fields;
1746 layout_type (ctx->record_type);
1747 fixup_child_record_type (ctx);
1748 if (ctx->srecord_type)
1749 layout_type (ctx->srecord_type);
1750 t = fold_convert_loc (loc, long_integer_type_node,
1751 TYPE_SIZE_UNIT (ctx->record_type));
1752 gimple_omp_task_set_arg_size (stmt, t);
1753 t = build_int_cst (long_integer_type_node,
1754 TYPE_ALIGN_UNIT (ctx->record_type));
1755 gimple_omp_task_set_arg_align (stmt, t);
1760 /* Scan an OpenMP loop directive. */
1762 static void
1763 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1765 omp_context *ctx;
1766 size_t i;
1768 ctx = new_omp_context (stmt, outer_ctx);
1770 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1772 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1773 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1775 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1776 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1777 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1778 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1780 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1783 /* Scan an OpenMP sections directive. */
1785 static void
1786 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1788 omp_context *ctx;
1790 ctx = new_omp_context (stmt, outer_ctx);
1791 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1792 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1795 /* Scan an OpenMP single directive. */
1797 static void
1798 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1800 omp_context *ctx;
1801 tree name;
1803 ctx = new_omp_context (stmt, outer_ctx);
1804 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1805 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1806 name = create_tmp_var_name (".omp_copy_s");
1807 name = build_decl (gimple_location (stmt),
1808 TYPE_DECL, name, ctx->record_type);
1809 TYPE_NAME (ctx->record_type) = name;
1811 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1812 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1814 if (TYPE_FIELDS (ctx->record_type) == NULL)
1815 ctx->record_type = NULL;
1816 else
1817 layout_type (ctx->record_type);
1821 /* Check OpenMP nesting restrictions. */
1822 static bool
1823 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1825 switch (gimple_code (stmt))
1827 case GIMPLE_OMP_FOR:
1828 case GIMPLE_OMP_SECTIONS:
1829 case GIMPLE_OMP_SINGLE:
1830 case GIMPLE_CALL:
1831 for (; ctx != NULL; ctx = ctx->outer)
1832 switch (gimple_code (ctx->stmt))
1834 case GIMPLE_OMP_FOR:
1835 case GIMPLE_OMP_SECTIONS:
1836 case GIMPLE_OMP_SINGLE:
1837 case GIMPLE_OMP_ORDERED:
1838 case GIMPLE_OMP_MASTER:
1839 case GIMPLE_OMP_TASK:
1840 if (is_gimple_call (stmt))
1842 error_at (gimple_location (stmt),
1843 "barrier region may not be closely nested inside "
1844 "of work-sharing, critical, ordered, master or "
1845 "explicit task region");
1846 return false;
1848 error_at (gimple_location (stmt),
1849 "work-sharing region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or explicit "
1851 "task region");
1852 return false;
1853 case GIMPLE_OMP_PARALLEL:
1854 return true;
1855 default:
1856 break;
1858 break;
1859 case GIMPLE_OMP_MASTER:
1860 for (; ctx != NULL; ctx = ctx->outer)
1861 switch (gimple_code (ctx->stmt))
1863 case GIMPLE_OMP_FOR:
1864 case GIMPLE_OMP_SECTIONS:
1865 case GIMPLE_OMP_SINGLE:
1866 case GIMPLE_OMP_TASK:
1867 error_at (gimple_location (stmt),
1868 "master region may not be closely nested inside "
1869 "of work-sharing or explicit task region");
1870 return false;
1871 case GIMPLE_OMP_PARALLEL:
1872 return true;
1873 default:
1874 break;
1876 break;
1877 case GIMPLE_OMP_ORDERED:
1878 for (; ctx != NULL; ctx = ctx->outer)
1879 switch (gimple_code (ctx->stmt))
1881 case GIMPLE_OMP_CRITICAL:
1882 case GIMPLE_OMP_TASK:
1883 error_at (gimple_location (stmt),
1884 "ordered region may not be closely nested inside "
1885 "of critical or explicit task region");
1886 return false;
1887 case GIMPLE_OMP_FOR:
1888 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1889 OMP_CLAUSE_ORDERED) == NULL)
1891 error_at (gimple_location (stmt),
1892 "ordered region must be closely nested inside "
1893 "a loop region with an ordered clause");
1894 return false;
1896 return true;
1897 case GIMPLE_OMP_PARALLEL:
1898 return true;
1899 default:
1900 break;
1902 break;
1903 case GIMPLE_OMP_CRITICAL:
1904 for (; ctx != NULL; ctx = ctx->outer)
1905 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1906 && (gimple_omp_critical_name (stmt)
1907 == gimple_omp_critical_name (ctx->stmt)))
1909 error_at (gimple_location (stmt),
1910 "critical region may not be nested inside a critical "
1911 "region with the same name");
1912 return false;
1914 break;
1915 default:
1916 break;
1918 return true;
1922 /* Helper function scan_omp.
1924 Callback for walk_tree or operators in walk_gimple_stmt used to
1925 scan for OpenMP directives in TP. */
1927 static tree
1928 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1930 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1931 omp_context *ctx = (omp_context *) wi->info;
1932 tree t = *tp;
1934 switch (TREE_CODE (t))
1936 case VAR_DECL:
1937 case PARM_DECL:
1938 case LABEL_DECL:
1939 case RESULT_DECL:
1940 if (ctx)
1941 *tp = remap_decl (t, &ctx->cb);
1942 break;
1944 default:
1945 if (ctx && TYPE_P (t))
1946 *tp = remap_type (t, &ctx->cb);
1947 else if (!DECL_P (t))
1949 *walk_subtrees = 1;
1950 if (ctx)
1952 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1953 if (tem != TREE_TYPE (t))
1955 if (TREE_CODE (t) == INTEGER_CST)
1956 *tp = build_int_cst_wide (tem,
1957 TREE_INT_CST_LOW (t),
1958 TREE_INT_CST_HIGH (t));
1959 else
1960 TREE_TYPE (t) = tem;
1964 break;
1967 return NULL_TREE;
1971 /* Helper function for scan_omp.
1973 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1974 the current statement in GSI. */
1976 static tree
1977 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1978 struct walk_stmt_info *wi)
1980 gimple stmt = gsi_stmt (*gsi);
1981 omp_context *ctx = (omp_context *) wi->info;
1983 if (gimple_has_location (stmt))
1984 input_location = gimple_location (stmt);
1986 /* Check the OpenMP nesting restrictions. */
1987 if (ctx != NULL)
1989 bool remove = false;
1990 if (is_gimple_omp (stmt))
1991 remove = !check_omp_nesting_restrictions (stmt, ctx);
1992 else if (is_gimple_call (stmt))
1994 tree fndecl = gimple_call_fndecl (stmt);
1995 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1996 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1997 remove = !check_omp_nesting_restrictions (stmt, ctx);
1999 if (remove)
2001 stmt = gimple_build_nop ();
2002 gsi_replace (gsi, stmt, false);
2006 *handled_ops_p = true;
2008 switch (gimple_code (stmt))
2010 case GIMPLE_OMP_PARALLEL:
2011 taskreg_nesting_level++;
2012 scan_omp_parallel (gsi, ctx);
2013 taskreg_nesting_level--;
2014 break;
2016 case GIMPLE_OMP_TASK:
2017 taskreg_nesting_level++;
2018 scan_omp_task (gsi, ctx);
2019 taskreg_nesting_level--;
2020 break;
2022 case GIMPLE_OMP_FOR:
2023 scan_omp_for (stmt, ctx);
2024 break;
2026 case GIMPLE_OMP_SECTIONS:
2027 scan_omp_sections (stmt, ctx);
2028 break;
2030 case GIMPLE_OMP_SINGLE:
2031 scan_omp_single (stmt, ctx);
2032 break;
2034 case GIMPLE_OMP_SECTION:
2035 case GIMPLE_OMP_MASTER:
2036 case GIMPLE_OMP_ORDERED:
2037 case GIMPLE_OMP_CRITICAL:
2038 ctx = new_omp_context (stmt, ctx);
2039 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2040 break;
2042 case GIMPLE_BIND:
2044 tree var;
2046 *handled_ops_p = false;
2047 if (ctx)
2048 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2049 insert_decl_map (&ctx->cb, var, var);
2051 break;
2052 default:
2053 *handled_ops_p = false;
2054 break;
2057 return NULL_TREE;
2061 /* Scan all the statements starting at the current statement. CTX
2062 contains context information about the OpenMP directives and
2063 clauses found during the scan. */
2065 static void
2066 scan_omp (gimple_seq *body_p, omp_context *ctx)
2068 location_t saved_location;
2069 struct walk_stmt_info wi;
2071 memset (&wi, 0, sizeof (wi));
2072 wi.info = ctx;
2073 wi.want_locations = true;
2075 saved_location = input_location;
2076 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2077 input_location = saved_location;
2080 /* Re-gimplification and code generation routines. */
2082 /* Build a call to GOMP_barrier. */
2084 static tree
2085 build_omp_barrier (void)
2087 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2090 /* If a context was created for STMT when it was scanned, return it. */
2092 static omp_context *
2093 maybe_lookup_ctx (gimple stmt)
2095 splay_tree_node n;
2096 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2097 return n ? (omp_context *) n->value : NULL;
2101 /* Find the mapping for DECL in CTX or the immediately enclosing
2102 context that has a mapping for DECL.
2104 If CTX is a nested parallel directive, we may have to use the decl
2105 mappings created in CTX's parent context. Suppose that we have the
2106 following parallel nesting (variable UIDs showed for clarity):
2108 iD.1562 = 0;
2109 #omp parallel shared(iD.1562) -> outer parallel
2110 iD.1562 = iD.1562 + 1;
2112 #omp parallel shared (iD.1562) -> inner parallel
2113 iD.1562 = iD.1562 - 1;
2115 Each parallel structure will create a distinct .omp_data_s structure
2116 for copying iD.1562 in/out of the directive:
2118 outer parallel .omp_data_s.1.i -> iD.1562
2119 inner parallel .omp_data_s.2.i -> iD.1562
2121 A shared variable mapping will produce a copy-out operation before
2122 the parallel directive and a copy-in operation after it. So, in
2123 this case we would have:
2125 iD.1562 = 0;
2126 .omp_data_o.1.i = iD.1562;
2127 #omp parallel shared(iD.1562) -> outer parallel
2128 .omp_data_i.1 = &.omp_data_o.1
2129 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2131 .omp_data_o.2.i = iD.1562; -> **
2132 #omp parallel shared(iD.1562) -> inner parallel
2133 .omp_data_i.2 = &.omp_data_o.2
2134 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2137 ** This is a problem. The symbol iD.1562 cannot be referenced
2138 inside the body of the outer parallel region. But since we are
2139 emitting this copy operation while expanding the inner parallel
2140 directive, we need to access the CTX structure of the outer
2141 parallel directive to get the correct mapping:
2143 .omp_data_o.2.i = .omp_data_i.1->i
2145 Since there may be other workshare or parallel directives enclosing
2146 the parallel directive, it may be necessary to walk up the context
2147 parent chain. This is not a problem in general because nested
2148 parallelism happens only rarely. */
2150 static tree
2151 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2153 tree t;
2154 omp_context *up;
2156 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2157 t = maybe_lookup_decl (decl, up);
2159 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2161 return t ? t : decl;
2165 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2166 in outer contexts. */
2168 static tree
2169 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2171 tree t = NULL;
2172 omp_context *up;
2174 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2175 t = maybe_lookup_decl (decl, up);
2177 return t ? t : decl;
2181 /* Construct the initialization value for reduction CLAUSE. */
2183 tree
2184 omp_reduction_init (tree clause, tree type)
2186 location_t loc = OMP_CLAUSE_LOCATION (clause);
2187 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2189 case PLUS_EXPR:
2190 case MINUS_EXPR:
2191 case BIT_IOR_EXPR:
2192 case BIT_XOR_EXPR:
2193 case TRUTH_OR_EXPR:
2194 case TRUTH_ORIF_EXPR:
2195 case TRUTH_XOR_EXPR:
2196 case NE_EXPR:
2197 return build_zero_cst (type);
2199 case MULT_EXPR:
2200 case TRUTH_AND_EXPR:
2201 case TRUTH_ANDIF_EXPR:
2202 case EQ_EXPR:
2203 return fold_convert_loc (loc, type, integer_one_node);
2205 case BIT_AND_EXPR:
2206 return fold_convert_loc (loc, type, integer_minus_one_node);
2208 case MAX_EXPR:
2209 if (SCALAR_FLOAT_TYPE_P (type))
2211 REAL_VALUE_TYPE max, min;
2212 if (HONOR_INFINITIES (TYPE_MODE (type)))
2214 real_inf (&max);
2215 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2217 else
2218 real_maxval (&min, 1, TYPE_MODE (type));
2219 return build_real (type, min);
2221 else
2223 gcc_assert (INTEGRAL_TYPE_P (type));
2224 return TYPE_MIN_VALUE (type);
2227 case MIN_EXPR:
2228 if (SCALAR_FLOAT_TYPE_P (type))
2230 REAL_VALUE_TYPE max;
2231 if (HONOR_INFINITIES (TYPE_MODE (type)))
2232 real_inf (&max);
2233 else
2234 real_maxval (&max, 0, TYPE_MODE (type));
2235 return build_real (type, max);
2237 else
2239 gcc_assert (INTEGRAL_TYPE_P (type));
2240 return TYPE_MAX_VALUE (type);
2243 default:
2244 gcc_unreachable ();
2248 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2249 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2250 private variables. Initialization statements go in ILIST, while calls
2251 to destructors go in DLIST. */
2253 static void
2254 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2255 omp_context *ctx)
2257 tree c, dtor, copyin_seq, x, ptr;
2258 bool copyin_by_ref = false;
2259 bool lastprivate_firstprivate = false;
2260 int pass;
2262 copyin_seq = NULL;
2264 /* Do all the fixed sized types in the first pass, and the variable sized
2265 types in the second pass. This makes sure that the scalar arguments to
2266 the variable sized types are processed before we use them in the
2267 variable sized operations. */
2268 for (pass = 0; pass < 2; ++pass)
2270 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2272 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2273 tree var, new_var;
2274 bool by_ref;
2275 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2277 switch (c_kind)
2279 case OMP_CLAUSE_PRIVATE:
2280 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2281 continue;
2282 break;
2283 case OMP_CLAUSE_SHARED:
2284 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2286 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2287 continue;
2289 case OMP_CLAUSE_FIRSTPRIVATE:
2290 case OMP_CLAUSE_COPYIN:
2291 case OMP_CLAUSE_REDUCTION:
2292 break;
2293 case OMP_CLAUSE_LASTPRIVATE:
2294 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2296 lastprivate_firstprivate = true;
2297 if (pass != 0)
2298 continue;
2300 break;
2301 default:
2302 continue;
2305 new_var = var = OMP_CLAUSE_DECL (c);
2306 if (c_kind != OMP_CLAUSE_COPYIN)
2307 new_var = lookup_decl (var, ctx);
2309 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2311 if (pass != 0)
2312 continue;
2314 else if (is_variable_sized (var))
2316 /* For variable sized types, we need to allocate the
2317 actual storage here. Call alloca and store the
2318 result in the pointer decl that we created elsewhere. */
2319 if (pass == 0)
2320 continue;
2322 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2324 gimple stmt;
2325 tree tmp, atmp;
2327 ptr = DECL_VALUE_EXPR (new_var);
2328 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2329 ptr = TREE_OPERAND (ptr, 0);
2330 gcc_assert (DECL_P (ptr));
2331 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2333 /* void *tmp = __builtin_alloca */
2334 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2335 stmt = gimple_build_call (atmp, 1, x);
2336 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2337 gimple_add_tmp_var (tmp);
2338 gimple_call_set_lhs (stmt, tmp);
2340 gimple_seq_add_stmt (ilist, stmt);
2342 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2343 gimplify_assign (ptr, x, ilist);
2346 else if (is_reference (var))
2348 /* For references that are being privatized for Fortran,
2349 allocate new backing storage for the new pointer
2350 variable. This allows us to avoid changing all the
2351 code that expects a pointer to something that expects
2352 a direct variable. Note that this doesn't apply to
2353 C++, since reference types are disallowed in data
2354 sharing clauses there, except for NRV optimized
2355 return values. */
2356 if (pass == 0)
2357 continue;
2359 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2360 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2362 x = build_receiver_ref (var, false, ctx);
2363 x = build_fold_addr_expr_loc (clause_loc, x);
2365 else if (TREE_CONSTANT (x))
2367 const char *name = NULL;
2368 if (DECL_NAME (var))
2369 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2371 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2372 name);
2373 gimple_add_tmp_var (x);
2374 TREE_ADDRESSABLE (x) = 1;
2375 x = build_fold_addr_expr_loc (clause_loc, x);
2377 else
2379 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2380 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2383 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2384 gimplify_assign (new_var, x, ilist);
2386 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2388 else if (c_kind == OMP_CLAUSE_REDUCTION
2389 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2391 if (pass == 0)
2392 continue;
2394 else if (pass != 0)
2395 continue;
2397 switch (OMP_CLAUSE_CODE (c))
2399 case OMP_CLAUSE_SHARED:
2400 /* Shared global vars are just accessed directly. */
2401 if (is_global_var (new_var))
2402 break;
2403 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2404 needs to be delayed until after fixup_child_record_type so
2405 that we get the correct type during the dereference. */
2406 by_ref = use_pointer_for_field (var, ctx);
2407 x = build_receiver_ref (var, by_ref, ctx);
2408 SET_DECL_VALUE_EXPR (new_var, x);
2409 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 /* ??? If VAR is not passed by reference, and the variable
2412 hasn't been initialized yet, then we'll get a warning for
2413 the store into the omp_data_s structure. Ideally, we'd be
2414 able to notice this and not store anything at all, but
2415 we're generating code too early. Suppress the warning. */
2416 if (!by_ref)
2417 TREE_NO_WARNING (var) = 1;
2418 break;
2420 case OMP_CLAUSE_LASTPRIVATE:
2421 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2422 break;
2423 /* FALLTHRU */
2425 case OMP_CLAUSE_PRIVATE:
2426 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2427 x = build_outer_var_ref (var, ctx);
2428 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2430 if (is_task_ctx (ctx))
2431 x = build_receiver_ref (var, false, ctx);
2432 else
2433 x = build_outer_var_ref (var, ctx);
2435 else
2436 x = NULL;
2437 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2438 if (x)
2439 gimplify_and_add (x, ilist);
2440 /* FALLTHRU */
2442 do_dtor:
2443 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2444 if (x)
2446 gimple_seq tseq = NULL;
2448 dtor = x;
2449 gimplify_stmt (&dtor, &tseq);
2450 gimple_seq_add_seq (dlist, tseq);
2452 break;
2454 case OMP_CLAUSE_FIRSTPRIVATE:
2455 if (is_task_ctx (ctx))
2457 if (is_reference (var) || is_variable_sized (var))
2458 goto do_dtor;
2459 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2460 ctx))
2461 || use_pointer_for_field (var, NULL))
2463 x = build_receiver_ref (var, false, ctx);
2464 SET_DECL_VALUE_EXPR (new_var, x);
2465 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2466 goto do_dtor;
2469 x = build_outer_var_ref (var, ctx);
2470 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2471 gimplify_and_add (x, ilist);
2472 goto do_dtor;
2473 break;
2475 case OMP_CLAUSE_COPYIN:
2476 by_ref = use_pointer_for_field (var, NULL);
2477 x = build_receiver_ref (var, by_ref, ctx);
2478 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2479 append_to_statement_list (x, &copyin_seq);
2480 copyin_by_ref |= by_ref;
2481 break;
2483 case OMP_CLAUSE_REDUCTION:
2484 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2486 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2487 x = build_outer_var_ref (var, ctx);
2489 if (is_reference (var))
2490 x = build_fold_addr_expr_loc (clause_loc, x);
2491 SET_DECL_VALUE_EXPR (placeholder, x);
2492 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2493 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2494 gimple_seq_add_seq (ilist,
2495 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2496 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2497 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2499 else
2501 x = omp_reduction_init (c, TREE_TYPE (new_var));
2502 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2503 gimplify_assign (new_var, x, ilist);
2505 break;
2507 default:
2508 gcc_unreachable ();
2513 /* The copyin sequence is not to be executed by the main thread, since
2514 that would result in self-copies. Perhaps not visible to scalars,
2515 but it certainly is to C++ operator=. */
2516 if (copyin_seq)
2518 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2520 x = build2 (NE_EXPR, boolean_type_node, x,
2521 build_int_cst (TREE_TYPE (x), 0));
2522 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2523 gimplify_and_add (x, ilist);
2526 /* If any copyin variable is passed by reference, we must ensure the
2527 master thread doesn't modify it before it is copied over in all
2528 threads. Similarly for variables in both firstprivate and
2529 lastprivate clauses we need to ensure the lastprivate copying
2530 happens after firstprivate copying in all threads. */
2531 if (copyin_by_ref || lastprivate_firstprivate)
2532 gimplify_and_add (build_omp_barrier (), ilist);
2536 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2537 both parallel and workshare constructs. PREDICATE may be NULL if it's
2538 always true. */
2540 static void
2541 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2542 omp_context *ctx)
2544 tree x, c, label = NULL;
2545 bool par_clauses = false;
2547 /* Early exit if there are no lastprivate clauses. */
2548 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2549 if (clauses == NULL)
2551 /* If this was a workshare clause, see if it had been combined
2552 with its parallel. In that case, look for the clauses on the
2553 parallel statement itself. */
2554 if (is_parallel_ctx (ctx))
2555 return;
2557 ctx = ctx->outer;
2558 if (ctx == NULL || !is_parallel_ctx (ctx))
2559 return;
2561 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2562 OMP_CLAUSE_LASTPRIVATE);
2563 if (clauses == NULL)
2564 return;
2565 par_clauses = true;
2568 if (predicate)
2570 gimple stmt;
2571 tree label_true, arm1, arm2;
2573 label = create_artificial_label (UNKNOWN_LOCATION);
2574 label_true = create_artificial_label (UNKNOWN_LOCATION);
2575 arm1 = TREE_OPERAND (predicate, 0);
2576 arm2 = TREE_OPERAND (predicate, 1);
2577 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2578 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2579 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2580 label_true, label);
2581 gimple_seq_add_stmt (stmt_list, stmt);
2582 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2585 for (c = clauses; c ;)
2587 tree var, new_var;
2588 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2590 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2592 var = OMP_CLAUSE_DECL (c);
2593 new_var = lookup_decl (var, ctx);
2595 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2597 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2598 gimple_seq_add_seq (stmt_list,
2599 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2601 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2603 x = build_outer_var_ref (var, ctx);
2604 if (is_reference (var))
2605 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2606 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2607 gimplify_and_add (x, stmt_list);
2609 c = OMP_CLAUSE_CHAIN (c);
2610 if (c == NULL && !par_clauses)
2612 /* If this was a workshare clause, see if it had been combined
2613 with its parallel. In that case, continue looking for the
2614 clauses also on the parallel statement itself. */
2615 if (is_parallel_ctx (ctx))
2616 break;
2618 ctx = ctx->outer;
2619 if (ctx == NULL || !is_parallel_ctx (ctx))
2620 break;
2622 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2623 OMP_CLAUSE_LASTPRIVATE);
2624 par_clauses = true;
2628 if (label)
2629 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2633 /* Generate code to implement the REDUCTION clauses. */
2635 static void
2636 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2638 gimple_seq sub_seq = NULL;
2639 gimple stmt;
2640 tree x, c;
2641 int count = 0;
2643 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2644 update in that case, otherwise use a lock. */
2645 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2646 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2648 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2650 /* Never use OMP_ATOMIC for array reductions. */
2651 count = -1;
2652 break;
2654 count++;
2657 if (count == 0)
2658 return;
2660 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2662 tree var, ref, new_var;
2663 enum tree_code code;
2664 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2666 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2667 continue;
2669 var = OMP_CLAUSE_DECL (c);
2670 new_var = lookup_decl (var, ctx);
2671 if (is_reference (var))
2672 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2673 ref = build_outer_var_ref (var, ctx);
2674 code = OMP_CLAUSE_REDUCTION_CODE (c);
2676 /* reduction(-:var) sums up the partial results, so it acts
2677 identically to reduction(+:var). */
2678 if (code == MINUS_EXPR)
2679 code = PLUS_EXPR;
2681 if (count == 1)
2683 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2685 addr = save_expr (addr);
2686 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2687 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2688 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2689 gimplify_and_add (x, stmt_seqp);
2690 return;
2693 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2695 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2697 if (is_reference (var))
2698 ref = build_fold_addr_expr_loc (clause_loc, ref);
2699 SET_DECL_VALUE_EXPR (placeholder, ref);
2700 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2701 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2702 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2704 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2706 else
2708 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2709 ref = build_outer_var_ref (var, ctx);
2710 gimplify_assign (ref, x, &sub_seq);
2714 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2716 gimple_seq_add_stmt (stmt_seqp, stmt);
2718 gimple_seq_add_seq (stmt_seqp, sub_seq);
2720 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2722 gimple_seq_add_stmt (stmt_seqp, stmt);
2726 /* Generate code to implement the COPYPRIVATE clauses. */
2728 static void
2729 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2730 omp_context *ctx)
2732 tree c;
2734 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2736 tree var, new_var, ref, x;
2737 bool by_ref;
2738 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2740 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2741 continue;
2743 var = OMP_CLAUSE_DECL (c);
2744 by_ref = use_pointer_for_field (var, NULL);
2746 ref = build_sender_ref (var, ctx);
2747 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2748 if (by_ref)
2750 x = build_fold_addr_expr_loc (clause_loc, new_var);
2751 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2753 gimplify_assign (ref, x, slist);
2755 ref = build_receiver_ref (var, false, ctx);
2756 if (by_ref)
2758 ref = fold_convert_loc (clause_loc,
2759 build_pointer_type (TREE_TYPE (new_var)),
2760 ref);
2761 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2763 if (is_reference (var))
2765 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2766 ref = build_simple_mem_ref_loc (clause_loc, ref);
2767 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2769 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2770 gimplify_and_add (x, rlist);
2775 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2776 and REDUCTION from the sender (aka parent) side. */
2778 static void
2779 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2780 omp_context *ctx)
2782 tree c;
2784 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2786 tree val, ref, x, var;
2787 bool by_ref, do_in = false, do_out = false;
2788 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2790 switch (OMP_CLAUSE_CODE (c))
2792 case OMP_CLAUSE_PRIVATE:
2793 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2794 break;
2795 continue;
2796 case OMP_CLAUSE_FIRSTPRIVATE:
2797 case OMP_CLAUSE_COPYIN:
2798 case OMP_CLAUSE_LASTPRIVATE:
2799 case OMP_CLAUSE_REDUCTION:
2800 break;
2801 default:
2802 continue;
2805 val = OMP_CLAUSE_DECL (c);
2806 var = lookup_decl_in_outer_ctx (val, ctx);
2808 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2809 && is_global_var (var))
2810 continue;
2811 if (is_variable_sized (val))
2812 continue;
2813 by_ref = use_pointer_for_field (val, NULL);
2815 switch (OMP_CLAUSE_CODE (c))
2817 case OMP_CLAUSE_PRIVATE:
2818 case OMP_CLAUSE_FIRSTPRIVATE:
2819 case OMP_CLAUSE_COPYIN:
2820 do_in = true;
2821 break;
2823 case OMP_CLAUSE_LASTPRIVATE:
2824 if (by_ref || is_reference (val))
2826 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2827 continue;
2828 do_in = true;
2830 else
2832 do_out = true;
2833 if (lang_hooks.decls.omp_private_outer_ref (val))
2834 do_in = true;
2836 break;
2838 case OMP_CLAUSE_REDUCTION:
2839 do_in = true;
2840 do_out = !(by_ref || is_reference (val));
2841 break;
2843 default:
2844 gcc_unreachable ();
2847 if (do_in)
2849 ref = build_sender_ref (val, ctx);
2850 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2851 gimplify_assign (ref, x, ilist);
2852 if (is_task_ctx (ctx))
2853 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2856 if (do_out)
2858 ref = build_sender_ref (val, ctx);
2859 gimplify_assign (var, ref, olist);
2864 /* Generate code to implement SHARED from the sender (aka parent)
2865 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2866 list things that got automatically shared. */
2868 static void
2869 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2871 tree var, ovar, nvar, f, x, record_type;
2873 if (ctx->record_type == NULL)
2874 return;
2876 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2877 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2879 ovar = DECL_ABSTRACT_ORIGIN (f);
2880 nvar = maybe_lookup_decl (ovar, ctx);
2881 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2882 continue;
2884 /* If CTX is a nested parallel directive. Find the immediately
2885 enclosing parallel or workshare construct that contains a
2886 mapping for OVAR. */
2887 var = lookup_decl_in_outer_ctx (ovar, ctx);
2889 if (use_pointer_for_field (ovar, ctx))
2891 x = build_sender_ref (ovar, ctx);
2892 var = build_fold_addr_expr (var);
2893 gimplify_assign (x, var, ilist);
2895 else
2897 x = build_sender_ref (ovar, ctx);
2898 gimplify_assign (x, var, ilist);
2900 if (!TREE_READONLY (var)
2901 /* We don't need to receive a new reference to a result
2902 or parm decl. In fact we may not store to it as we will
2903 invalidate any pending RSO and generate wrong gimple
2904 during inlining. */
2905 && !((TREE_CODE (var) == RESULT_DECL
2906 || TREE_CODE (var) == PARM_DECL)
2907 && DECL_BY_REFERENCE (var)))
2909 x = build_sender_ref (ovar, ctx);
2910 gimplify_assign (var, x, olist);
2917 /* A convenience function to build an empty GIMPLE_COND with just the
2918 condition. */
2920 static gimple
2921 gimple_build_cond_empty (tree cond)
2923 enum tree_code pred_code;
2924 tree lhs, rhs;
2926 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2927 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2931 /* Build the function calls to GOMP_parallel_start etc to actually
2932 generate the parallel operation. REGION is the parallel region
2933 being expanded. BB is the block where to insert the code. WS_ARGS
2934 will be set if this is a call to a combined parallel+workshare
2935 construct, it contains the list of additional arguments needed by
2936 the workshare construct. */
2938 static void
2939 expand_parallel_call (struct omp_region *region, basic_block bb,
2940 gimple entry_stmt, VEC(tree,gc) *ws_args)
2942 tree t, t1, t2, val, cond, c, clauses;
2943 gimple_stmt_iterator gsi;
2944 gimple stmt;
2945 enum built_in_function start_ix;
2946 int start_ix2;
2947 location_t clause_loc;
2948 VEC(tree,gc) *args;
2950 clauses = gimple_omp_parallel_clauses (entry_stmt);
2952 /* Determine what flavor of GOMP_parallel_start we will be
2953 emitting. */
2954 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2955 if (is_combined_parallel (region))
2957 switch (region->inner->type)
2959 case GIMPLE_OMP_FOR:
2960 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2961 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2962 + (region->inner->sched_kind
2963 == OMP_CLAUSE_SCHEDULE_RUNTIME
2964 ? 3 : region->inner->sched_kind));
2965 start_ix = (enum built_in_function)start_ix2;
2966 break;
2967 case GIMPLE_OMP_SECTIONS:
2968 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2969 break;
2970 default:
2971 gcc_unreachable ();
2975 /* By default, the value of NUM_THREADS is zero (selected at run time)
2976 and there is no conditional. */
2977 cond = NULL_TREE;
2978 val = build_int_cst (unsigned_type_node, 0);
2980 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2981 if (c)
2982 cond = OMP_CLAUSE_IF_EXPR (c);
2984 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2985 if (c)
2987 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2988 clause_loc = OMP_CLAUSE_LOCATION (c);
2990 else
2991 clause_loc = gimple_location (entry_stmt);
2993 /* Ensure 'val' is of the correct type. */
2994 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2996 /* If we found the clause 'if (cond)', build either
2997 (cond != 0) or (cond ? val : 1u). */
2998 if (cond)
3000 gimple_stmt_iterator gsi;
3002 cond = gimple_boolify (cond);
3004 if (integer_zerop (val))
3005 val = fold_build2_loc (clause_loc,
3006 EQ_EXPR, unsigned_type_node, cond,
3007 build_int_cst (TREE_TYPE (cond), 0));
3008 else
3010 basic_block cond_bb, then_bb, else_bb;
3011 edge e, e_then, e_else;
3012 tree tmp_then, tmp_else, tmp_join, tmp_var;
3014 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3015 if (gimple_in_ssa_p (cfun))
3017 tmp_then = make_ssa_name (tmp_var, NULL);
3018 tmp_else = make_ssa_name (tmp_var, NULL);
3019 tmp_join = make_ssa_name (tmp_var, NULL);
3021 else
3023 tmp_then = tmp_var;
3024 tmp_else = tmp_var;
3025 tmp_join = tmp_var;
3028 e = split_block (bb, NULL);
3029 cond_bb = e->src;
3030 bb = e->dest;
3031 remove_edge (e);
3033 then_bb = create_empty_bb (cond_bb);
3034 else_bb = create_empty_bb (then_bb);
3035 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3036 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3038 stmt = gimple_build_cond_empty (cond);
3039 gsi = gsi_start_bb (cond_bb);
3040 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3042 gsi = gsi_start_bb (then_bb);
3043 stmt = gimple_build_assign (tmp_then, val);
3044 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3046 gsi = gsi_start_bb (else_bb);
3047 stmt = gimple_build_assign
3048 (tmp_else, build_int_cst (unsigned_type_node, 1));
3049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3051 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3052 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3053 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3054 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3056 if (gimple_in_ssa_p (cfun))
3058 gimple phi = create_phi_node (tmp_join, bb);
3059 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3060 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3063 val = tmp_join;
3066 gsi = gsi_start_bb (bb);
3067 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3068 false, GSI_CONTINUE_LINKING);
3071 gsi = gsi_last_bb (bb);
3072 t = gimple_omp_parallel_data_arg (entry_stmt);
3073 if (t == NULL)
3074 t1 = null_pointer_node;
3075 else
3076 t1 = build_fold_addr_expr (t);
3077 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3079 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3080 VEC_quick_push (tree, args, t2);
3081 VEC_quick_push (tree, args, t1);
3082 VEC_quick_push (tree, args, val);
3083 VEC_splice (tree, args, ws_args);
3085 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3086 builtin_decl_explicit (start_ix), args);
3088 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3089 false, GSI_CONTINUE_LINKING);
3091 t = gimple_omp_parallel_data_arg (entry_stmt);
3092 if (t == NULL)
3093 t = null_pointer_node;
3094 else
3095 t = build_fold_addr_expr (t);
3096 t = build_call_expr_loc (gimple_location (entry_stmt),
3097 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3098 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3099 false, GSI_CONTINUE_LINKING);
3101 t = build_call_expr_loc (gimple_location (entry_stmt),
3102 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3104 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3105 false, GSI_CONTINUE_LINKING);
3109 /* Build the function call to GOMP_task to actually
3110 generate the task operation. BB is the block where to insert the code. */
3112 static void
3113 expand_task_call (basic_block bb, gimple entry_stmt)
3115 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3116 gimple_stmt_iterator gsi;
3117 location_t loc = gimple_location (entry_stmt);
3119 clauses = gimple_omp_task_clauses (entry_stmt);
3121 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3122 if (c)
3123 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3124 else
3125 cond = boolean_true_node;
3127 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3128 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3129 flags = build_int_cst (unsigned_type_node,
3130 (c ? 1 : 0) + (c2 ? 4 : 0));
3132 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3133 if (c)
3135 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3136 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3137 build_int_cst (unsigned_type_node, 2),
3138 build_int_cst (unsigned_type_node, 0));
3139 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3142 gsi = gsi_last_bb (bb);
3143 t = gimple_omp_task_data_arg (entry_stmt);
3144 if (t == NULL)
3145 t2 = null_pointer_node;
3146 else
3147 t2 = build_fold_addr_expr_loc (loc, t);
3148 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3149 t = gimple_omp_task_copy_fn (entry_stmt);
3150 if (t == NULL)
3151 t3 = null_pointer_node;
3152 else
3153 t3 = build_fold_addr_expr_loc (loc, t);
3155 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3156 7, t1, t2, t3,
3157 gimple_omp_task_arg_size (entry_stmt),
3158 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3160 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3161 false, GSI_CONTINUE_LINKING);
3165 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3166 catch handler and return it. This prevents programs from violating the
3167 structured block semantics with throws. */
3169 static gimple_seq
3170 maybe_catch_exception (gimple_seq body)
3172 gimple g;
3173 tree decl;
3175 if (!flag_exceptions)
3176 return body;
3178 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3179 decl = lang_hooks.eh_protect_cleanup_actions ();
3180 else
3181 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3183 g = gimple_build_eh_must_not_throw (decl);
3184 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3185 GIMPLE_TRY_CATCH);
3187 return gimple_seq_alloc_with_stmt (g);
3190 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3192 static tree
3193 vec2chain (VEC(tree,gc) *v)
3195 tree chain = NULL_TREE, t;
3196 unsigned ix;
3198 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3200 DECL_CHAIN (t) = chain;
3201 chain = t;
3204 return chain;
3208 /* Remove barriers in REGION->EXIT's block. Note that this is only
3209 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3210 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3211 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3212 removed. */
3214 static void
3215 remove_exit_barrier (struct omp_region *region)
3217 gimple_stmt_iterator gsi;
3218 basic_block exit_bb;
3219 edge_iterator ei;
3220 edge e;
3221 gimple stmt;
3222 int any_addressable_vars = -1;
3224 exit_bb = region->exit;
3226 /* If the parallel region doesn't return, we don't have REGION->EXIT
3227 block at all. */
3228 if (! exit_bb)
3229 return;
3231 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3232 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3233 statements that can appear in between are extremely limited -- no
3234 memory operations at all. Here, we allow nothing at all, so the
3235 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3236 gsi = gsi_last_bb (exit_bb);
3237 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3238 gsi_prev (&gsi);
3239 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3240 return;
3242 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3244 gsi = gsi_last_bb (e->src);
3245 if (gsi_end_p (gsi))
3246 continue;
3247 stmt = gsi_stmt (gsi);
3248 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3249 && !gimple_omp_return_nowait_p (stmt))
3251 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3252 in many cases. If there could be tasks queued, the barrier
3253 might be needed to let the tasks run before some local
3254 variable of the parallel that the task uses as shared
3255 runs out of scope. The task can be spawned either
3256 from within current function (this would be easy to check)
3257 or from some function it calls and gets passed an address
3258 of such a variable. */
3259 if (any_addressable_vars < 0)
3261 gimple parallel_stmt = last_stmt (region->entry);
3262 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3263 tree local_decls, block, decl;
3264 unsigned ix;
3266 any_addressable_vars = 0;
3267 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3268 if (TREE_ADDRESSABLE (decl))
3270 any_addressable_vars = 1;
3271 break;
3273 for (block = gimple_block (stmt);
3274 !any_addressable_vars
3275 && block
3276 && TREE_CODE (block) == BLOCK;
3277 block = BLOCK_SUPERCONTEXT (block))
3279 for (local_decls = BLOCK_VARS (block);
3280 local_decls;
3281 local_decls = DECL_CHAIN (local_decls))
3282 if (TREE_ADDRESSABLE (local_decls))
3284 any_addressable_vars = 1;
3285 break;
3287 if (block == gimple_block (parallel_stmt))
3288 break;
3291 if (!any_addressable_vars)
3292 gimple_omp_return_set_nowait (stmt);
3297 static void
3298 remove_exit_barriers (struct omp_region *region)
3300 if (region->type == GIMPLE_OMP_PARALLEL)
3301 remove_exit_barrier (region);
3303 if (region->inner)
3305 region = region->inner;
3306 remove_exit_barriers (region);
3307 while (region->next)
3309 region = region->next;
3310 remove_exit_barriers (region);
3315 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3316 calls. These can't be declared as const functions, but
3317 within one parallel body they are constant, so they can be
3318 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3319 which are declared const. Similarly for task body, except
3320 that in untied task omp_get_thread_num () can change at any task
3321 scheduling point. */
3323 static void
3324 optimize_omp_library_calls (gimple entry_stmt)
3326 basic_block bb;
3327 gimple_stmt_iterator gsi;
3328 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3329 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3330 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3331 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3332 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3333 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3334 OMP_CLAUSE_UNTIED) != NULL);
3336 FOR_EACH_BB (bb)
3337 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3339 gimple call = gsi_stmt (gsi);
3340 tree decl;
3342 if (is_gimple_call (call)
3343 && (decl = gimple_call_fndecl (call))
3344 && DECL_EXTERNAL (decl)
3345 && TREE_PUBLIC (decl)
3346 && DECL_INITIAL (decl) == NULL)
3348 tree built_in;
3350 if (DECL_NAME (decl) == thr_num_id)
3352 /* In #pragma omp task untied omp_get_thread_num () can change
3353 during the execution of the task region. */
3354 if (untied_task)
3355 continue;
3356 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3358 else if (DECL_NAME (decl) == num_thr_id)
3359 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3360 else
3361 continue;
3363 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3364 || gimple_call_num_args (call) != 0)
3365 continue;
3367 if (flag_exceptions && !TREE_NOTHROW (decl))
3368 continue;
3370 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3371 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3372 TREE_TYPE (TREE_TYPE (built_in))))
3373 continue;
3375 gimple_call_set_fndecl (call, built_in);
3380 /* Expand the OpenMP parallel or task directive starting at REGION. */
3382 static void
3383 expand_omp_taskreg (struct omp_region *region)
3385 basic_block entry_bb, exit_bb, new_bb;
3386 struct function *child_cfun;
3387 tree child_fn, block, t;
3388 gimple_stmt_iterator gsi;
3389 gimple entry_stmt, stmt;
3390 edge e;
3391 VEC(tree,gc) *ws_args;
3393 entry_stmt = last_stmt (region->entry);
3394 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3395 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3397 entry_bb = region->entry;
3398 exit_bb = region->exit;
3400 if (is_combined_parallel (region))
3401 ws_args = region->ws_args;
3402 else
3403 ws_args = NULL;
3405 if (child_cfun->cfg)
3407 /* Due to inlining, it may happen that we have already outlined
3408 the region, in which case all we need to do is make the
3409 sub-graph unreachable and emit the parallel call. */
3410 edge entry_succ_e, exit_succ_e;
3411 gimple_stmt_iterator gsi;
3413 entry_succ_e = single_succ_edge (entry_bb);
3415 gsi = gsi_last_bb (entry_bb);
3416 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3417 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3418 gsi_remove (&gsi, true);
3420 new_bb = entry_bb;
3421 if (exit_bb)
3423 exit_succ_e = single_succ_edge (exit_bb);
3424 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3426 remove_edge_and_dominated_blocks (entry_succ_e);
3428 else
3430 unsigned srcidx, dstidx, num;
3432 /* If the parallel region needs data sent from the parent
3433 function, then the very first statement (except possible
3434 tree profile counter updates) of the parallel body
3435 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3436 &.OMP_DATA_O is passed as an argument to the child function,
3437 we need to replace it with the argument as seen by the child
3438 function.
3440 In most cases, this will end up being the identity assignment
3441 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3442 a function call that has been inlined, the original PARM_DECL
3443 .OMP_DATA_I may have been converted into a different local
3444 variable. In which case, we need to keep the assignment. */
3445 if (gimple_omp_taskreg_data_arg (entry_stmt))
3447 basic_block entry_succ_bb = single_succ (entry_bb);
3448 gimple_stmt_iterator gsi;
3449 tree arg, narg;
3450 gimple parcopy_stmt = NULL;
3452 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3454 gimple stmt;
3456 gcc_assert (!gsi_end_p (gsi));
3457 stmt = gsi_stmt (gsi);
3458 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3459 continue;
3461 if (gimple_num_ops (stmt) == 2)
3463 tree arg = gimple_assign_rhs1 (stmt);
3465 /* We're ignore the subcode because we're
3466 effectively doing a STRIP_NOPS. */
3468 if (TREE_CODE (arg) == ADDR_EXPR
3469 && TREE_OPERAND (arg, 0)
3470 == gimple_omp_taskreg_data_arg (entry_stmt))
3472 parcopy_stmt = stmt;
3473 break;
3478 gcc_assert (parcopy_stmt != NULL);
3479 arg = DECL_ARGUMENTS (child_fn);
3481 if (!gimple_in_ssa_p (cfun))
3483 if (gimple_assign_lhs (parcopy_stmt) == arg)
3484 gsi_remove (&gsi, true);
3485 else
3487 /* ?? Is setting the subcode really necessary ?? */
3488 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3489 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3492 else
3494 /* If we are in ssa form, we must load the value from the default
3495 definition of the argument. That should not be defined now,
3496 since the argument is not used uninitialized. */
3497 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3498 narg = make_ssa_name (arg, gimple_build_nop ());
3499 set_ssa_default_def (cfun, arg, narg);
3500 /* ?? Is setting the subcode really necessary ?? */
3501 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3502 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3503 update_stmt (parcopy_stmt);
3507 /* Declare local variables needed in CHILD_CFUN. */
3508 block = DECL_INITIAL (child_fn);
3509 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3510 /* The gimplifier could record temporaries in parallel/task block
3511 rather than in containing function's local_decls chain,
3512 which would mean cgraph missed finalizing them. Do it now. */
3513 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3514 if (TREE_CODE (t) == VAR_DECL
3515 && TREE_STATIC (t)
3516 && !DECL_EXTERNAL (t))
3517 varpool_finalize_decl (t);
3518 DECL_SAVED_TREE (child_fn) = NULL;
3519 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3520 gimple_set_body (child_fn, NULL);
3521 TREE_USED (block) = 1;
3523 /* Reset DECL_CONTEXT on function arguments. */
3524 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3525 DECL_CONTEXT (t) = child_fn;
3527 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3528 so that it can be moved to the child function. */
3529 gsi = gsi_last_bb (entry_bb);
3530 stmt = gsi_stmt (gsi);
3531 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3532 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3533 gsi_remove (&gsi, true);
3534 e = split_block (entry_bb, stmt);
3535 entry_bb = e->dest;
3536 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3538 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3539 if (exit_bb)
3541 gsi = gsi_last_bb (exit_bb);
3542 gcc_assert (!gsi_end_p (gsi)
3543 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3544 stmt = gimple_build_return (NULL);
3545 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3546 gsi_remove (&gsi, true);
3549 /* Move the parallel region into CHILD_CFUN. */
3551 if (gimple_in_ssa_p (cfun))
3553 init_tree_ssa (child_cfun);
3554 init_ssa_operands (child_cfun);
3555 child_cfun->gimple_df->in_ssa_p = true;
3556 block = NULL_TREE;
3558 else
3559 block = gimple_block (entry_stmt);
3561 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3562 if (exit_bb)
3563 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3565 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3566 num = VEC_length (tree, child_cfun->local_decls);
3567 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3569 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3570 if (DECL_CONTEXT (t) == cfun->decl)
3571 continue;
3572 if (srcidx != dstidx)
3573 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3574 dstidx++;
3576 if (dstidx != num)
3577 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3579 /* Inform the callgraph about the new function. */
3580 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3581 = cfun->curr_properties & ~PROP_loops;
3582 cgraph_add_new_function (child_fn, true);
3584 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3585 fixed in a following pass. */
3586 push_cfun (child_cfun);
3587 if (optimize)
3588 optimize_omp_library_calls (entry_stmt);
3589 rebuild_cgraph_edges ();
3591 /* Some EH regions might become dead, see PR34608. If
3592 pass_cleanup_cfg isn't the first pass to happen with the
3593 new child, these dead EH edges might cause problems.
3594 Clean them up now. */
3595 if (flag_exceptions)
3597 basic_block bb;
3598 bool changed = false;
3600 FOR_EACH_BB (bb)
3601 changed |= gimple_purge_dead_eh_edges (bb);
3602 if (changed)
3603 cleanup_tree_cfg ();
3605 if (gimple_in_ssa_p (cfun))
3606 update_ssa (TODO_update_ssa);
3607 pop_cfun ();
3610 /* Emit a library call to launch the children threads. */
3611 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3612 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3613 else
3614 expand_task_call (new_bb, entry_stmt);
3615 if (gimple_in_ssa_p (cfun))
3616 update_ssa (TODO_update_ssa_only_virtuals);
3620 /* A subroutine of expand_omp_for. Generate code for a parallel
3621 loop with any schedule. Given parameters:
3623 for (V = N1; V cond N2; V += STEP) BODY;
3625 where COND is "<" or ">", we generate pseudocode
3627 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3628 if (more) goto L0; else goto L3;
3630 V = istart0;
3631 iend = iend0;
3633 BODY;
3634 V += STEP;
3635 if (V cond iend) goto L1; else goto L2;
3637 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3640 If this is a combined omp parallel loop, instead of the call to
3641 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3643 For collapsed loops, given parameters:
3644 collapse(3)
3645 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3646 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3647 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3648 BODY;
3650 we generate pseudocode
3652 if (cond3 is <)
3653 adj = STEP3 - 1;
3654 else
3655 adj = STEP3 + 1;
3656 count3 = (adj + N32 - N31) / STEP3;
3657 if (cond2 is <)
3658 adj = STEP2 - 1;
3659 else
3660 adj = STEP2 + 1;
3661 count2 = (adj + N22 - N21) / STEP2;
3662 if (cond1 is <)
3663 adj = STEP1 - 1;
3664 else
3665 adj = STEP1 + 1;
3666 count1 = (adj + N12 - N11) / STEP1;
3667 count = count1 * count2 * count3;
3668 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3669 if (more) goto L0; else goto L3;
3671 V = istart0;
3672 T = V;
3673 V3 = N31 + (T % count3) * STEP3;
3674 T = T / count3;
3675 V2 = N21 + (T % count2) * STEP2;
3676 T = T / count2;
3677 V1 = N11 + T * STEP1;
3678 iend = iend0;
3680 BODY;
3681 V += 1;
3682 if (V < iend) goto L10; else goto L2;
3683 L10:
3684 V3 += STEP3;
3685 if (V3 cond3 N32) goto L1; else goto L11;
3686 L11:
3687 V3 = N31;
3688 V2 += STEP2;
3689 if (V2 cond2 N22) goto L1; else goto L12;
3690 L12:
3691 V2 = N21;
3692 V1 += STEP1;
3693 goto L1;
3695 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3700 static void
3701 expand_omp_for_generic (struct omp_region *region,
3702 struct omp_for_data *fd,
3703 enum built_in_function start_fn,
3704 enum built_in_function next_fn)
3706 tree type, istart0, iend0, iend;
3707 tree t, vmain, vback, bias = NULL_TREE;
3708 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3709 basic_block l2_bb = NULL, l3_bb = NULL;
3710 gimple_stmt_iterator gsi;
3711 gimple stmt;
3712 bool in_combined_parallel = is_combined_parallel (region);
3713 bool broken_loop = region->cont == NULL;
3714 edge e, ne;
3715 tree *counts = NULL;
3716 int i;
3718 gcc_assert (!broken_loop || !in_combined_parallel);
3719 gcc_assert (fd->iter_type == long_integer_type_node
3720 || !in_combined_parallel);
3722 type = TREE_TYPE (fd->loop.v);
3723 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3724 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3725 TREE_ADDRESSABLE (istart0) = 1;
3726 TREE_ADDRESSABLE (iend0) = 1;
3728 /* See if we need to bias by LLONG_MIN. */
3729 if (fd->iter_type == long_long_unsigned_type_node
3730 && TREE_CODE (type) == INTEGER_TYPE
3731 && !TYPE_UNSIGNED (type))
3733 tree n1, n2;
3735 if (fd->loop.cond_code == LT_EXPR)
3737 n1 = fd->loop.n1;
3738 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3740 else
3742 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3743 n2 = fd->loop.n1;
3745 if (TREE_CODE (n1) != INTEGER_CST
3746 || TREE_CODE (n2) != INTEGER_CST
3747 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3748 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3751 entry_bb = region->entry;
3752 cont_bb = region->cont;
3753 collapse_bb = NULL;
3754 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3755 gcc_assert (broken_loop
3756 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3757 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3758 l1_bb = single_succ (l0_bb);
3759 if (!broken_loop)
3761 l2_bb = create_empty_bb (cont_bb);
3762 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3763 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3765 else
3766 l2_bb = NULL;
3767 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3768 exit_bb = region->exit;
3770 gsi = gsi_last_bb (entry_bb);
3772 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3773 if (fd->collapse > 1)
3775 /* collapsed loops need work for expansion in SSA form. */
3776 gcc_assert (!gimple_in_ssa_p (cfun));
3777 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3778 for (i = 0; i < fd->collapse; i++)
3780 tree itype = TREE_TYPE (fd->loops[i].v);
3782 if (POINTER_TYPE_P (itype))
3783 itype = signed_type_for (itype);
3784 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3785 ? -1 : 1));
3786 t = fold_build2 (PLUS_EXPR, itype,
3787 fold_convert (itype, fd->loops[i].step), t);
3788 t = fold_build2 (PLUS_EXPR, itype, t,
3789 fold_convert (itype, fd->loops[i].n2));
3790 t = fold_build2 (MINUS_EXPR, itype, t,
3791 fold_convert (itype, fd->loops[i].n1));
3792 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3793 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3794 fold_build1 (NEGATE_EXPR, itype, t),
3795 fold_build1 (NEGATE_EXPR, itype,
3796 fold_convert (itype,
3797 fd->loops[i].step)));
3798 else
3799 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3800 fold_convert (itype, fd->loops[i].step));
3801 t = fold_convert (type, t);
3802 if (TREE_CODE (t) == INTEGER_CST)
3803 counts[i] = t;
3804 else
3806 counts[i] = create_tmp_reg (type, ".count");
3807 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3808 true, GSI_SAME_STMT);
3809 stmt = gimple_build_assign (counts[i], t);
3810 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3812 if (SSA_VAR_P (fd->loop.n2))
3814 if (i == 0)
3815 t = counts[0];
3816 else
3818 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3819 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3820 true, GSI_SAME_STMT);
3822 stmt = gimple_build_assign (fd->loop.n2, t);
3823 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3827 if (in_combined_parallel)
3829 /* In a combined parallel loop, emit a call to
3830 GOMP_loop_foo_next. */
3831 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3832 build_fold_addr_expr (istart0),
3833 build_fold_addr_expr (iend0));
3835 else
3837 tree t0, t1, t2, t3, t4;
3838 /* If this is not a combined parallel loop, emit a call to
3839 GOMP_loop_foo_start in ENTRY_BB. */
3840 t4 = build_fold_addr_expr (iend0);
3841 t3 = build_fold_addr_expr (istart0);
3842 t2 = fold_convert (fd->iter_type, fd->loop.step);
3843 if (POINTER_TYPE_P (type)
3844 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3846 /* Avoid casting pointers to integer of a different size. */
3847 tree itype = signed_type_for (type);
3848 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3849 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3851 else
3853 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3854 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3856 if (bias)
3858 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3859 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3861 if (fd->iter_type == long_integer_type_node)
3863 if (fd->chunk_size)
3865 t = fold_convert (fd->iter_type, fd->chunk_size);
3866 t = build_call_expr (builtin_decl_explicit (start_fn),
3867 6, t0, t1, t2, t, t3, t4);
3869 else
3870 t = build_call_expr (builtin_decl_explicit (start_fn),
3871 5, t0, t1, t2, t3, t4);
3873 else
3875 tree t5;
3876 tree c_bool_type;
3877 tree bfn_decl;
3879 /* The GOMP_loop_ull_*start functions have additional boolean
3880 argument, true for < loops and false for > loops.
3881 In Fortran, the C bool type can be different from
3882 boolean_type_node. */
3883 bfn_decl = builtin_decl_explicit (start_fn);
3884 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3885 t5 = build_int_cst (c_bool_type,
3886 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3887 if (fd->chunk_size)
3889 tree bfn_decl = builtin_decl_explicit (start_fn);
3890 t = fold_convert (fd->iter_type, fd->chunk_size);
3891 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3893 else
3894 t = build_call_expr (builtin_decl_explicit (start_fn),
3895 6, t5, t0, t1, t2, t3, t4);
3898 if (TREE_TYPE (t) != boolean_type_node)
3899 t = fold_build2 (NE_EXPR, boolean_type_node,
3900 t, build_int_cst (TREE_TYPE (t), 0));
3901 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3902 true, GSI_SAME_STMT);
3903 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3905 /* Remove the GIMPLE_OMP_FOR statement. */
3906 gsi_remove (&gsi, true);
3908 /* Iteration setup for sequential loop goes in L0_BB. */
3909 gsi = gsi_start_bb (l0_bb);
3910 t = istart0;
3911 if (bias)
3912 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3913 if (POINTER_TYPE_P (type))
3914 t = fold_convert (signed_type_for (type), t);
3915 t = fold_convert (type, t);
3916 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3917 false, GSI_CONTINUE_LINKING);
3918 stmt = gimple_build_assign (fd->loop.v, t);
3919 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3921 t = iend0;
3922 if (bias)
3923 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3924 if (POINTER_TYPE_P (type))
3925 t = fold_convert (signed_type_for (type), t);
3926 t = fold_convert (type, t);
3927 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3928 false, GSI_CONTINUE_LINKING);
3929 if (fd->collapse > 1)
3931 tree tem = create_tmp_reg (type, ".tem");
3932 stmt = gimple_build_assign (tem, fd->loop.v);
3933 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3934 for (i = fd->collapse - 1; i >= 0; i--)
3936 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3937 itype = vtype;
3938 if (POINTER_TYPE_P (vtype))
3939 itype = signed_type_for (vtype);
3940 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3941 t = fold_convert (itype, t);
3942 t = fold_build2 (MULT_EXPR, itype, t,
3943 fold_convert (itype, fd->loops[i].step));
3944 if (POINTER_TYPE_P (vtype))
3945 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3946 else
3947 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3948 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3949 false, GSI_CONTINUE_LINKING);
3950 stmt = gimple_build_assign (fd->loops[i].v, t);
3951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3952 if (i != 0)
3954 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3955 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3956 false, GSI_CONTINUE_LINKING);
3957 stmt = gimple_build_assign (tem, t);
3958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3963 if (!broken_loop)
3965 /* Code to control the increment and predicate for the sequential
3966 loop goes in the CONT_BB. */
3967 gsi = gsi_last_bb (cont_bb);
3968 stmt = gsi_stmt (gsi);
3969 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3970 vmain = gimple_omp_continue_control_use (stmt);
3971 vback = gimple_omp_continue_control_def (stmt);
3973 if (POINTER_TYPE_P (type))
3974 t = fold_build_pointer_plus (vmain, fd->loop.step);
3975 else
3976 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3977 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3978 true, GSI_SAME_STMT);
3979 stmt = gimple_build_assign (vback, t);
3980 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3982 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3983 stmt = gimple_build_cond_empty (t);
3984 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3986 /* Remove GIMPLE_OMP_CONTINUE. */
3987 gsi_remove (&gsi, true);
3989 if (fd->collapse > 1)
3991 basic_block last_bb, bb;
3993 last_bb = cont_bb;
3994 for (i = fd->collapse - 1; i >= 0; i--)
3996 tree vtype = TREE_TYPE (fd->loops[i].v);
3998 bb = create_empty_bb (last_bb);
3999 gsi = gsi_start_bb (bb);
4001 if (i < fd->collapse - 1)
4003 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4004 e->probability = REG_BR_PROB_BASE / 8;
4006 t = fd->loops[i + 1].n1;
4007 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4008 false, GSI_CONTINUE_LINKING);
4009 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4012 else
4013 collapse_bb = bb;
4015 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4017 if (POINTER_TYPE_P (vtype))
4018 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4019 else
4020 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4021 fd->loops[i].step);
4022 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4023 false, GSI_CONTINUE_LINKING);
4024 stmt = gimple_build_assign (fd->loops[i].v, t);
4025 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4027 if (i > 0)
4029 t = fd->loops[i].n2;
4030 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4031 false, GSI_CONTINUE_LINKING);
4032 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4033 fd->loops[i].v, t);
4034 stmt = gimple_build_cond_empty (t);
4035 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4036 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4037 e->probability = REG_BR_PROB_BASE * 7 / 8;
4039 else
4040 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4041 last_bb = bb;
4045 /* Emit code to get the next parallel iteration in L2_BB. */
4046 gsi = gsi_start_bb (l2_bb);
4048 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4049 build_fold_addr_expr (istart0),
4050 build_fold_addr_expr (iend0));
4051 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4052 false, GSI_CONTINUE_LINKING);
4053 if (TREE_TYPE (t) != boolean_type_node)
4054 t = fold_build2 (NE_EXPR, boolean_type_node,
4055 t, build_int_cst (TREE_TYPE (t), 0));
4056 stmt = gimple_build_cond_empty (t);
4057 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4060 /* Add the loop cleanup function. */
4061 gsi = gsi_last_bb (exit_bb);
4062 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4063 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4064 else
4065 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4066 stmt = gimple_build_call (t, 0);
4067 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4068 gsi_remove (&gsi, true);
4070 /* Connect the new blocks. */
4071 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4072 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4074 if (!broken_loop)
4076 gimple_seq phis;
4078 e = find_edge (cont_bb, l3_bb);
4079 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4081 phis = phi_nodes (l3_bb);
4082 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4084 gimple phi = gsi_stmt (gsi);
4085 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4086 PHI_ARG_DEF_FROM_EDGE (phi, e));
4088 remove_edge (e);
4090 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4091 if (fd->collapse > 1)
4093 e = find_edge (cont_bb, l1_bb);
4094 remove_edge (e);
4095 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4097 else
4099 e = find_edge (cont_bb, l1_bb);
4100 e->flags = EDGE_TRUE_VALUE;
4102 e->probability = REG_BR_PROB_BASE * 7 / 8;
4103 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4104 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4106 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4107 recompute_dominator (CDI_DOMINATORS, l2_bb));
4108 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4109 recompute_dominator (CDI_DOMINATORS, l3_bb));
4110 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4111 recompute_dominator (CDI_DOMINATORS, l0_bb));
4112 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4113 recompute_dominator (CDI_DOMINATORS, l1_bb));
4118 /* A subroutine of expand_omp_for. Generate code for a parallel
4119 loop with static schedule and no specified chunk size. Given
4120 parameters:
4122 for (V = N1; V cond N2; V += STEP) BODY;
4124 where COND is "<" or ">", we generate pseudocode
4126 if (cond is <)
4127 adj = STEP - 1;
4128 else
4129 adj = STEP + 1;
4130 if ((__typeof (V)) -1 > 0 && cond is >)
4131 n = -(adj + N2 - N1) / -STEP;
4132 else
4133 n = (adj + N2 - N1) / STEP;
4134 q = n / nthreads;
4135 tt = n % nthreads;
4136 if (threadid < tt) goto L3; else goto L4;
4138 tt = 0;
4139 q = q + 1;
4141 s0 = q * threadid + tt;
4142 e0 = s0 + q;
4143 V = s0 * STEP + N1;
4144 if (s0 >= e0) goto L2; else goto L0;
4146 e = e0 * STEP + N1;
4148 BODY;
4149 V += STEP;
4150 if (V cond e) goto L1;
4154 static void
4155 expand_omp_for_static_nochunk (struct omp_region *region,
4156 struct omp_for_data *fd)
4158 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4159 tree type, itype, vmain, vback;
4160 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4161 basic_block body_bb, cont_bb;
4162 basic_block fin_bb;
4163 gimple_stmt_iterator gsi;
4164 gimple stmt;
4165 edge ep;
4167 itype = type = TREE_TYPE (fd->loop.v);
4168 if (POINTER_TYPE_P (type))
4169 itype = signed_type_for (type);
4171 entry_bb = region->entry;
4172 cont_bb = region->cont;
4173 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4174 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4175 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4176 body_bb = single_succ (seq_start_bb);
4177 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4178 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4179 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4180 exit_bb = region->exit;
4182 /* Iteration space partitioning goes in ENTRY_BB. */
4183 gsi = gsi_last_bb (entry_bb);
4184 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4186 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4187 t = fold_convert (itype, t);
4188 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4189 true, GSI_SAME_STMT);
4191 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4192 t = fold_convert (itype, t);
4193 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4194 true, GSI_SAME_STMT);
4196 fd->loop.n1
4197 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4198 true, NULL_TREE, true, GSI_SAME_STMT);
4199 fd->loop.n2
4200 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4201 true, NULL_TREE, true, GSI_SAME_STMT);
4202 fd->loop.step
4203 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4204 true, NULL_TREE, true, GSI_SAME_STMT);
4206 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4207 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4208 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4209 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4210 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4211 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4212 fold_build1 (NEGATE_EXPR, itype, t),
4213 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4214 else
4215 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4216 t = fold_convert (itype, t);
4217 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4219 q = create_tmp_reg (itype, "q");
4220 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4221 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4222 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4224 tt = create_tmp_reg (itype, "tt");
4225 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4226 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4227 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4229 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4230 stmt = gimple_build_cond_empty (t);
4231 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4233 second_bb = split_block (entry_bb, stmt)->dest;
4234 gsi = gsi_last_bb (second_bb);
4235 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4237 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4238 GSI_SAME_STMT);
4239 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4240 build_int_cst (itype, 1));
4241 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4243 third_bb = split_block (second_bb, stmt)->dest;
4244 gsi = gsi_last_bb (third_bb);
4245 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4247 t = build2 (MULT_EXPR, itype, q, threadid);
4248 t = build2 (PLUS_EXPR, itype, t, tt);
4249 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4251 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4252 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4254 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4255 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4257 /* Remove the GIMPLE_OMP_FOR statement. */
4258 gsi_remove (&gsi, true);
4260 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4261 gsi = gsi_start_bb (seq_start_bb);
4263 t = fold_convert (itype, s0);
4264 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4265 if (POINTER_TYPE_P (type))
4266 t = fold_build_pointer_plus (fd->loop.n1, t);
4267 else
4268 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4269 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4270 false, GSI_CONTINUE_LINKING);
4271 stmt = gimple_build_assign (fd->loop.v, t);
4272 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4274 t = fold_convert (itype, e0);
4275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4276 if (POINTER_TYPE_P (type))
4277 t = fold_build_pointer_plus (fd->loop.n1, t);
4278 else
4279 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4280 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4281 false, GSI_CONTINUE_LINKING);
4283 /* The code controlling the sequential loop replaces the
4284 GIMPLE_OMP_CONTINUE. */
4285 gsi = gsi_last_bb (cont_bb);
4286 stmt = gsi_stmt (gsi);
4287 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4288 vmain = gimple_omp_continue_control_use (stmt);
4289 vback = gimple_omp_continue_control_def (stmt);
4291 if (POINTER_TYPE_P (type))
4292 t = fold_build_pointer_plus (vmain, fd->loop.step);
4293 else
4294 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4295 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4296 true, GSI_SAME_STMT);
4297 stmt = gimple_build_assign (vback, t);
4298 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4300 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4301 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4303 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4304 gsi_remove (&gsi, true);
4306 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4307 gsi = gsi_last_bb (exit_bb);
4308 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4309 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4310 false, GSI_SAME_STMT);
4311 gsi_remove (&gsi, true);
4313 /* Connect all the blocks. */
4314 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4315 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4316 ep = find_edge (entry_bb, second_bb);
4317 ep->flags = EDGE_TRUE_VALUE;
4318 ep->probability = REG_BR_PROB_BASE / 4;
4319 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4320 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4322 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4323 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4325 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4326 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4327 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4328 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4329 recompute_dominator (CDI_DOMINATORS, body_bb));
4330 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4331 recompute_dominator (CDI_DOMINATORS, fin_bb));
4335 /* A subroutine of expand_omp_for. Generate code for a parallel
4336 loop with static schedule and a specified chunk size. Given
4337 parameters:
4339 for (V = N1; V cond N2; V += STEP) BODY;
4341 where COND is "<" or ">", we generate pseudocode
4343 if (cond is <)
4344 adj = STEP - 1;
4345 else
4346 adj = STEP + 1;
4347 if ((__typeof (V)) -1 > 0 && cond is >)
4348 n = -(adj + N2 - N1) / -STEP;
4349 else
4350 n = (adj + N2 - N1) / STEP;
4351 trip = 0;
4352 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4353 here so that V is defined
4354 if the loop is not entered
4356 s0 = (trip * nthreads + threadid) * CHUNK;
4357 e0 = min(s0 + CHUNK, n);
4358 if (s0 < n) goto L1; else goto L4;
4360 V = s0 * STEP + N1;
4361 e = e0 * STEP + N1;
4363 BODY;
4364 V += STEP;
4365 if (V cond e) goto L2; else goto L3;
4367 trip += 1;
4368 goto L0;
4372 static void
4373 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4375 tree n, s0, e0, e, t;
4376 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4377 tree type, itype, v_main, v_back, v_extra;
4378 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4379 basic_block trip_update_bb, cont_bb, fin_bb;
4380 gimple_stmt_iterator si;
4381 gimple stmt;
4382 edge se;
4384 itype = type = TREE_TYPE (fd->loop.v);
4385 if (POINTER_TYPE_P (type))
4386 itype = signed_type_for (type);
4388 entry_bb = region->entry;
4389 se = split_block (entry_bb, last_stmt (entry_bb));
4390 entry_bb = se->src;
4391 iter_part_bb = se->dest;
4392 cont_bb = region->cont;
4393 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4394 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4395 == FALLTHRU_EDGE (cont_bb)->dest);
4396 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4397 body_bb = single_succ (seq_start_bb);
4398 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4399 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4400 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4401 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4402 exit_bb = region->exit;
4404 /* Trip and adjustment setup goes in ENTRY_BB. */
4405 si = gsi_last_bb (entry_bb);
4406 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4408 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4409 t = fold_convert (itype, t);
4410 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4411 true, GSI_SAME_STMT);
4413 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4414 t = fold_convert (itype, t);
4415 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4416 true, GSI_SAME_STMT);
4418 fd->loop.n1
4419 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4420 true, NULL_TREE, true, GSI_SAME_STMT);
4421 fd->loop.n2
4422 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4423 true, NULL_TREE, true, GSI_SAME_STMT);
4424 fd->loop.step
4425 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4426 true, NULL_TREE, true, GSI_SAME_STMT);
4427 fd->chunk_size
4428 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4429 true, NULL_TREE, true, GSI_SAME_STMT);
4431 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4432 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4433 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4434 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4435 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4436 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4437 fold_build1 (NEGATE_EXPR, itype, t),
4438 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4439 else
4440 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4441 t = fold_convert (itype, t);
4442 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4443 true, GSI_SAME_STMT);
4445 trip_var = create_tmp_reg (itype, ".trip");
4446 if (gimple_in_ssa_p (cfun))
4448 trip_init = make_ssa_name (trip_var, NULL);
4449 trip_main = make_ssa_name (trip_var, NULL);
4450 trip_back = make_ssa_name (trip_var, NULL);
4452 else
4454 trip_init = trip_var;
4455 trip_main = trip_var;
4456 trip_back = trip_var;
4459 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4460 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4462 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4463 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4464 if (POINTER_TYPE_P (type))
4465 t = fold_build_pointer_plus (fd->loop.n1, t);
4466 else
4467 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4468 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4469 true, GSI_SAME_STMT);
4471 /* Remove the GIMPLE_OMP_FOR. */
4472 gsi_remove (&si, true);
4474 /* Iteration space partitioning goes in ITER_PART_BB. */
4475 si = gsi_last_bb (iter_part_bb);
4477 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4478 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4479 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4480 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4481 false, GSI_CONTINUE_LINKING);
4483 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4484 t = fold_build2 (MIN_EXPR, itype, t, n);
4485 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4486 false, GSI_CONTINUE_LINKING);
4488 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4489 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4491 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4492 si = gsi_start_bb (seq_start_bb);
4494 t = fold_convert (itype, s0);
4495 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4496 if (POINTER_TYPE_P (type))
4497 t = fold_build_pointer_plus (fd->loop.n1, t);
4498 else
4499 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4500 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4501 false, GSI_CONTINUE_LINKING);
4502 stmt = gimple_build_assign (fd->loop.v, t);
4503 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4505 t = fold_convert (itype, e0);
4506 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4507 if (POINTER_TYPE_P (type))
4508 t = fold_build_pointer_plus (fd->loop.n1, t);
4509 else
4510 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4511 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4512 false, GSI_CONTINUE_LINKING);
4514 /* The code controlling the sequential loop goes in CONT_BB,
4515 replacing the GIMPLE_OMP_CONTINUE. */
4516 si = gsi_last_bb (cont_bb);
4517 stmt = gsi_stmt (si);
4518 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4519 v_main = gimple_omp_continue_control_use (stmt);
4520 v_back = gimple_omp_continue_control_def (stmt);
4522 if (POINTER_TYPE_P (type))
4523 t = fold_build_pointer_plus (v_main, fd->loop.step);
4524 else
4525 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4526 stmt = gimple_build_assign (v_back, t);
4527 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4529 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4530 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4532 /* Remove GIMPLE_OMP_CONTINUE. */
4533 gsi_remove (&si, true);
4535 /* Trip update code goes into TRIP_UPDATE_BB. */
4536 si = gsi_start_bb (trip_update_bb);
4538 t = build_int_cst (itype, 1);
4539 t = build2 (PLUS_EXPR, itype, trip_main, t);
4540 stmt = gimple_build_assign (trip_back, t);
4541 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4543 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4544 si = gsi_last_bb (exit_bb);
4545 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4546 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4547 false, GSI_SAME_STMT);
4548 gsi_remove (&si, true);
4550 /* Connect the new blocks. */
4551 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4552 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4554 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4555 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4557 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4559 if (gimple_in_ssa_p (cfun))
4561 gimple_stmt_iterator psi;
4562 gimple phi;
4563 edge re, ene;
4564 edge_var_map_vector head;
4565 edge_var_map *vm;
4566 size_t i;
4568 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4569 remove arguments of the phi nodes in fin_bb. We need to create
4570 appropriate phi nodes in iter_part_bb instead. */
4571 se = single_pred_edge (fin_bb);
4572 re = single_succ_edge (trip_update_bb);
4573 head = redirect_edge_var_map_vector (re);
4574 ene = single_succ_edge (entry_bb);
4576 psi = gsi_start_phis (fin_bb);
4577 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4578 gsi_next (&psi), ++i)
4580 gimple nphi;
4581 source_location locus;
4583 phi = gsi_stmt (psi);
4584 t = gimple_phi_result (phi);
4585 gcc_assert (t == redirect_edge_var_map_result (vm));
4586 nphi = create_phi_node (t, iter_part_bb);
4588 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4589 locus = gimple_phi_arg_location_from_edge (phi, se);
4591 /* A special case -- fd->loop.v is not yet computed in
4592 iter_part_bb, we need to use v_extra instead. */
4593 if (t == fd->loop.v)
4594 t = v_extra;
4595 add_phi_arg (nphi, t, ene, locus);
4596 locus = redirect_edge_var_map_location (vm);
4597 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4599 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4600 redirect_edge_var_map_clear (re);
4601 while (1)
4603 psi = gsi_start_phis (fin_bb);
4604 if (gsi_end_p (psi))
4605 break;
4606 remove_phi_node (&psi, false);
4609 /* Make phi node for trip. */
4610 phi = create_phi_node (trip_main, iter_part_bb);
4611 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4612 UNKNOWN_LOCATION);
4613 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4614 UNKNOWN_LOCATION);
4617 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4618 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4619 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4620 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4621 recompute_dominator (CDI_DOMINATORS, fin_bb));
4622 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4623 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4624 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4625 recompute_dominator (CDI_DOMINATORS, body_bb));
4629 /* Expand the OpenMP loop defined by REGION. */
4631 static void
4632 expand_omp_for (struct omp_region *region)
4634 struct omp_for_data fd;
4635 struct omp_for_data_loop *loops;
4637 loops
4638 = (struct omp_for_data_loop *)
4639 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4640 * sizeof (struct omp_for_data_loop));
4641 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4642 region->sched_kind = fd.sched_kind;
4644 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4645 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4647 if (region->cont)
4649 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4650 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4654 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4655 && !fd.have_ordered
4656 && fd.collapse == 1
4657 && region->cont != NULL)
4659 if (fd.chunk_size == NULL)
4660 expand_omp_for_static_nochunk (region, &fd);
4661 else
4662 expand_omp_for_static_chunk (region, &fd);
4664 else
4666 int fn_index, start_ix, next_ix;
4668 if (fd.chunk_size == NULL
4669 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4670 fd.chunk_size = integer_zero_node;
4671 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4672 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4673 ? 3 : fd.sched_kind;
4674 fn_index += fd.have_ordered * 4;
4675 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4676 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4677 if (fd.iter_type == long_long_unsigned_type_node)
4679 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4680 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4681 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4682 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4684 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4685 (enum built_in_function) next_ix);
4688 if (gimple_in_ssa_p (cfun))
4689 update_ssa (TODO_update_ssa_only_virtuals);
4693 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4695 v = GOMP_sections_start (n);
4697 switch (v)
4699 case 0:
4700 goto L2;
4701 case 1:
4702 section 1;
4703 goto L1;
4704 case 2:
4706 case n:
4708 default:
4709 abort ();
4712 v = GOMP_sections_next ();
4713 goto L0;
4715 reduction;
4717 If this is a combined parallel sections, replace the call to
4718 GOMP_sections_start with call to GOMP_sections_next. */
4720 static void
4721 expand_omp_sections (struct omp_region *region)
4723 tree t, u, vin = NULL, vmain, vnext, l2;
4724 VEC (tree,heap) *label_vec;
4725 unsigned len;
4726 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4727 gimple_stmt_iterator si, switch_si;
4728 gimple sections_stmt, stmt, cont;
4729 edge_iterator ei;
4730 edge e;
4731 struct omp_region *inner;
4732 unsigned i, casei;
4733 bool exit_reachable = region->cont != NULL;
4735 gcc_assert (region->exit != NULL);
4736 entry_bb = region->entry;
4737 l0_bb = single_succ (entry_bb);
4738 l1_bb = region->cont;
4739 l2_bb = region->exit;
4740 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4741 l2 = gimple_block_label (l2_bb);
4742 else
4744 /* This can happen if there are reductions. */
4745 len = EDGE_COUNT (l0_bb->succs);
4746 gcc_assert (len > 0);
4747 e = EDGE_SUCC (l0_bb, len - 1);
4748 si = gsi_last_bb (e->dest);
4749 l2 = NULL_TREE;
4750 if (gsi_end_p (si)
4751 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4752 l2 = gimple_block_label (e->dest);
4753 else
4754 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4756 si = gsi_last_bb (e->dest);
4757 if (gsi_end_p (si)
4758 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4760 l2 = gimple_block_label (e->dest);
4761 break;
4765 if (exit_reachable)
4766 default_bb = create_empty_bb (l1_bb->prev_bb);
4767 else
4768 default_bb = create_empty_bb (l0_bb);
4770 /* We will build a switch() with enough cases for all the
4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4772 and a default case to abort if something goes wrong. */
4773 len = EDGE_COUNT (l0_bb->succs);
4775 /* Use VEC_quick_push on label_vec throughout, since we know the size
4776 in advance. */
4777 label_vec = VEC_alloc (tree, heap, len);
4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4780 GIMPLE_OMP_SECTIONS statement. */
4781 si = gsi_last_bb (entry_bb);
4782 sections_stmt = gsi_stmt (si);
4783 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4784 vin = gimple_omp_sections_control (sections_stmt);
4785 if (!is_combined_parallel (region))
4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
4789 t = build_int_cst (unsigned_type_node,
4790 exit_reachable ? len - 1 : len);
4791 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4792 stmt = gimple_build_call (u, 1, t);
4794 else
4796 /* Otherwise, call GOMP_sections_next. */
4797 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4798 stmt = gimple_build_call (u, 0);
4800 gimple_call_set_lhs (stmt, vin);
4801 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4802 gsi_remove (&si, true);
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 L0_BB. */
4806 switch_si = gsi_last_bb (l0_bb);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4808 if (exit_reachable)
4810 cont = last_stmt (l1_bb);
4811 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4812 vmain = gimple_omp_continue_control_use (cont);
4813 vnext = gimple_omp_continue_control_def (cont);
4815 else
4817 vmain = vin;
4818 vnext = NULL_TREE;
4821 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4822 VEC_quick_push (tree, label_vec, t);
4823 i = 1;
4825 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4826 for (inner = region->inner, casei = 1;
4827 inner;
4828 inner = inner->next, i++, casei++)
4830 basic_block s_entry_bb, s_exit_bb;
4832 /* Skip optional reduction region. */
4833 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4835 --i;
4836 --casei;
4837 continue;
4840 s_entry_bb = inner->entry;
4841 s_exit_bb = inner->exit;
4843 t = gimple_block_label (s_entry_bb);
4844 u = build_int_cst (unsigned_type_node, casei);
4845 u = build_case_label (u, NULL, t);
4846 VEC_quick_push (tree, label_vec, u);
4848 si = gsi_last_bb (s_entry_bb);
4849 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4850 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4851 gsi_remove (&si, true);
4852 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4854 if (s_exit_bb == NULL)
4855 continue;
4857 si = gsi_last_bb (s_exit_bb);
4858 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4859 gsi_remove (&si, true);
4861 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4864 /* Error handling code goes in DEFAULT_BB. */
4865 t = gimple_block_label (default_bb);
4866 u = build_case_label (NULL, NULL, t);
4867 make_edge (l0_bb, default_bb, 0);
4869 stmt = gimple_build_switch (vmain, u, label_vec);
4870 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4871 gsi_remove (&switch_si, true);
4872 VEC_free (tree, heap, label_vec);
4874 si = gsi_start_bb (default_bb);
4875 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4876 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4878 if (exit_reachable)
4880 tree bfn_decl;
4882 /* Code to get the next section goes in L1_BB. */
4883 si = gsi_last_bb (l1_bb);
4884 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4886 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4887 stmt = gimple_build_call (bfn_decl, 0);
4888 gimple_call_set_lhs (stmt, vnext);
4889 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4890 gsi_remove (&si, true);
4892 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4895 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4896 si = gsi_last_bb (l2_bb);
4897 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4898 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4899 else
4900 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4901 stmt = gimple_build_call (t, 0);
4902 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4903 gsi_remove (&si, true);
4905 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4909 /* Expand code for an OpenMP single directive. We've already expanded
4910 much of the code, here we simply place the GOMP_barrier call. */
4912 static void
4913 expand_omp_single (struct omp_region *region)
4915 basic_block entry_bb, exit_bb;
4916 gimple_stmt_iterator si;
4917 bool need_barrier = false;
4919 entry_bb = region->entry;
4920 exit_bb = region->exit;
4922 si = gsi_last_bb (entry_bb);
4923 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4924 be removed. We need to ensure that the thread that entered the single
4925 does not exit before the data is copied out by the other threads. */
4926 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4927 OMP_CLAUSE_COPYPRIVATE))
4928 need_barrier = true;
4929 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4930 gsi_remove (&si, true);
4931 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4933 si = gsi_last_bb (exit_bb);
4934 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4935 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4936 false, GSI_SAME_STMT);
4937 gsi_remove (&si, true);
4938 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4942 /* Generic expansion for OpenMP synchronization directives: master,
4943 ordered and critical. All we need to do here is remove the entry
4944 and exit markers for REGION. */
4946 static void
4947 expand_omp_synch (struct omp_region *region)
4949 basic_block entry_bb, exit_bb;
4950 gimple_stmt_iterator si;
4952 entry_bb = region->entry;
4953 exit_bb = region->exit;
4955 si = gsi_last_bb (entry_bb);
4956 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4957 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4958 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4959 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4960 gsi_remove (&si, true);
4961 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4963 if (exit_bb)
4965 si = gsi_last_bb (exit_bb);
4966 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4967 gsi_remove (&si, true);
4968 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4972 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4973 operation as a normal volatile load. */
4975 static bool
4976 expand_omp_atomic_load (basic_block load_bb, tree addr,
4977 tree loaded_val, int index)
4979 enum built_in_function tmpbase;
4980 gimple_stmt_iterator gsi;
4981 basic_block store_bb;
4982 location_t loc;
4983 gimple stmt;
4984 tree decl, call, type, itype;
4986 gsi = gsi_last_bb (load_bb);
4987 stmt = gsi_stmt (gsi);
4988 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4989 loc = gimple_location (stmt);
4991 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4992 is smaller than word size, then expand_atomic_load assumes that the load
4993 is atomic. We could avoid the builtin entirely in this case. */
4995 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
4996 decl = builtin_decl_explicit (tmpbase);
4997 if (decl == NULL_TREE)
4998 return false;
5000 type = TREE_TYPE (loaded_val);
5001 itype = TREE_TYPE (TREE_TYPE (decl));
5003 call = build_call_expr_loc (loc, decl, 2, addr,
5004 build_int_cst (NULL, MEMMODEL_RELAXED));
5005 if (!useless_type_conversion_p (type, itype))
5006 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5007 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5009 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5010 gsi_remove (&gsi, true);
5012 store_bb = single_succ (load_bb);
5013 gsi = gsi_last_bb (store_bb);
5014 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5015 gsi_remove (&gsi, true);
5017 if (gimple_in_ssa_p (cfun))
5018 update_ssa (TODO_update_ssa_no_phi);
5020 return true;
5023 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5024 operation as a normal volatile store. */
5026 static bool
5027 expand_omp_atomic_store (basic_block load_bb, tree addr,
5028 tree loaded_val, tree stored_val, int index)
5030 enum built_in_function tmpbase;
5031 gimple_stmt_iterator gsi;
5032 basic_block store_bb = single_succ (load_bb);
5033 location_t loc;
5034 gimple stmt;
5035 tree decl, call, type, itype;
5036 enum machine_mode imode;
5037 bool exchange;
5039 gsi = gsi_last_bb (load_bb);
5040 stmt = gsi_stmt (gsi);
5041 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5043 /* If the load value is needed, then this isn't a store but an exchange. */
5044 exchange = gimple_omp_atomic_need_value_p (stmt);
5046 gsi = gsi_last_bb (store_bb);
5047 stmt = gsi_stmt (gsi);
5048 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5049 loc = gimple_location (stmt);
5051 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5052 is smaller than word size, then expand_atomic_store assumes that the store
5053 is atomic. We could avoid the builtin entirely in this case. */
5055 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5056 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5057 decl = builtin_decl_explicit (tmpbase);
5058 if (decl == NULL_TREE)
5059 return false;
5061 type = TREE_TYPE (stored_val);
5063 /* Dig out the type of the function's second argument. */
5064 itype = TREE_TYPE (decl);
5065 itype = TYPE_ARG_TYPES (itype);
5066 itype = TREE_CHAIN (itype);
5067 itype = TREE_VALUE (itype);
5068 imode = TYPE_MODE (itype);
5070 if (exchange && !can_atomic_exchange_p (imode, true))
5071 return false;
5073 if (!useless_type_conversion_p (itype, type))
5074 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5075 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5076 build_int_cst (NULL, MEMMODEL_RELAXED));
5077 if (exchange)
5079 if (!useless_type_conversion_p (type, itype))
5080 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5081 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5084 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5085 gsi_remove (&gsi, true);
5087 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5088 gsi = gsi_last_bb (load_bb);
5089 gsi_remove (&gsi, true);
5091 if (gimple_in_ssa_p (cfun))
5092 update_ssa (TODO_update_ssa_no_phi);
5094 return true;
5097 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5098 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5099 size of the data type, and thus usable to find the index of the builtin
5100 decl. Returns false if the expression is not of the proper form. */
5102 static bool
5103 expand_omp_atomic_fetch_op (basic_block load_bb,
5104 tree addr, tree loaded_val,
5105 tree stored_val, int index)
5107 enum built_in_function oldbase, newbase, tmpbase;
5108 tree decl, itype, call;
5109 tree lhs, rhs;
5110 basic_block store_bb = single_succ (load_bb);
5111 gimple_stmt_iterator gsi;
5112 gimple stmt;
5113 location_t loc;
5114 enum tree_code code;
5115 bool need_old, need_new;
5116 enum machine_mode imode;
5118 /* We expect to find the following sequences:
5120 load_bb:
5121 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5123 store_bb:
5124 val = tmp OP something; (or: something OP tmp)
5125 GIMPLE_OMP_STORE (val)
5127 ???FIXME: Allow a more flexible sequence.
5128 Perhaps use data flow to pick the statements.
5132 gsi = gsi_after_labels (store_bb);
5133 stmt = gsi_stmt (gsi);
5134 loc = gimple_location (stmt);
5135 if (!is_gimple_assign (stmt))
5136 return false;
5137 gsi_next (&gsi);
5138 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5139 return false;
5140 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5141 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5142 gcc_checking_assert (!need_old || !need_new);
5144 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5145 return false;
5147 /* Check for one of the supported fetch-op operations. */
5148 code = gimple_assign_rhs_code (stmt);
5149 switch (code)
5151 case PLUS_EXPR:
5152 case POINTER_PLUS_EXPR:
5153 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5154 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5155 break;
5156 case MINUS_EXPR:
5157 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5158 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5159 break;
5160 case BIT_AND_EXPR:
5161 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5162 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5163 break;
5164 case BIT_IOR_EXPR:
5165 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5166 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5167 break;
5168 case BIT_XOR_EXPR:
5169 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5170 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5171 break;
5172 default:
5173 return false;
5176 /* Make sure the expression is of the proper form. */
5177 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5178 rhs = gimple_assign_rhs2 (stmt);
5179 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5180 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5181 rhs = gimple_assign_rhs1 (stmt);
5182 else
5183 return false;
5185 tmpbase = ((enum built_in_function)
5186 ((need_new ? newbase : oldbase) + index + 1));
5187 decl = builtin_decl_explicit (tmpbase);
5188 if (decl == NULL_TREE)
5189 return false;
5190 itype = TREE_TYPE (TREE_TYPE (decl));
5191 imode = TYPE_MODE (itype);
5193 /* We could test all of the various optabs involved, but the fact of the
5194 matter is that (with the exception of i486 vs i586 and xadd) all targets
5195 that support any atomic operaton optab also implements compare-and-swap.
5196 Let optabs.c take care of expanding any compare-and-swap loop. */
5197 if (!can_compare_and_swap_p (imode, true))
5198 return false;
5200 gsi = gsi_last_bb (load_bb);
5201 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5203 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5204 It only requires that the operation happen atomically. Thus we can
5205 use the RELAXED memory model. */
5206 call = build_call_expr_loc (loc, decl, 3, addr,
5207 fold_convert_loc (loc, itype, rhs),
5208 build_int_cst (NULL, MEMMODEL_RELAXED));
5210 if (need_old || need_new)
5212 lhs = need_old ? loaded_val : stored_val;
5213 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5214 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5216 else
5217 call = fold_convert_loc (loc, void_type_node, call);
5218 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5219 gsi_remove (&gsi, true);
5221 gsi = gsi_last_bb (store_bb);
5222 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5223 gsi_remove (&gsi, true);
5224 gsi = gsi_last_bb (store_bb);
5225 gsi_remove (&gsi, true);
5227 if (gimple_in_ssa_p (cfun))
5228 update_ssa (TODO_update_ssa_no_phi);
5230 return true;
5233 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5235 oldval = *addr;
5236 repeat:
5237 newval = rhs; // with oldval replacing *addr in rhs
5238 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5239 if (oldval != newval)
5240 goto repeat;
5242 INDEX is log2 of the size of the data type, and thus usable to find the
5243 index of the builtin decl. */
5245 static bool
5246 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5247 tree addr, tree loaded_val, tree stored_val,
5248 int index)
5250 tree loadedi, storedi, initial, new_storedi, old_vali;
5251 tree type, itype, cmpxchg, iaddr;
5252 gimple_stmt_iterator si;
5253 basic_block loop_header = single_succ (load_bb);
5254 gimple phi, stmt;
5255 edge e;
5256 enum built_in_function fncode;
5258 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5259 order to use the RELAXED memory model effectively. */
5260 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5261 + index + 1);
5262 cmpxchg = builtin_decl_explicit (fncode);
5263 if (cmpxchg == NULL_TREE)
5264 return false;
5265 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5266 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5268 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5269 return false;
5271 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5272 si = gsi_last_bb (load_bb);
5273 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5275 /* For floating-point values, we'll need to view-convert them to integers
5276 so that we can perform the atomic compare and swap. Simplify the
5277 following code by always setting up the "i"ntegral variables. */
5278 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5280 tree iaddr_val;
5282 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5283 true), NULL);
5284 iaddr_val
5285 = force_gimple_operand_gsi (&si,
5286 fold_convert (TREE_TYPE (iaddr), addr),
5287 false, NULL_TREE, true, GSI_SAME_STMT);
5288 stmt = gimple_build_assign (iaddr, iaddr_val);
5289 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5290 loadedi = create_tmp_var (itype, NULL);
5291 if (gimple_in_ssa_p (cfun))
5292 loadedi = make_ssa_name (loadedi, NULL);
5294 else
5296 iaddr = addr;
5297 loadedi = loaded_val;
5300 initial
5301 = force_gimple_operand_gsi (&si,
5302 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5303 iaddr,
5304 build_int_cst (TREE_TYPE (iaddr), 0)),
5305 true, NULL_TREE, true, GSI_SAME_STMT);
5307 /* Move the value to the LOADEDI temporary. */
5308 if (gimple_in_ssa_p (cfun))
5310 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5311 phi = create_phi_node (loadedi, loop_header);
5312 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5313 initial);
5315 else
5316 gsi_insert_before (&si,
5317 gimple_build_assign (loadedi, initial),
5318 GSI_SAME_STMT);
5319 if (loadedi != loaded_val)
5321 gimple_stmt_iterator gsi2;
5322 tree x;
5324 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5325 gsi2 = gsi_start_bb (loop_header);
5326 if (gimple_in_ssa_p (cfun))
5328 gimple stmt;
5329 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5330 true, GSI_SAME_STMT);
5331 stmt = gimple_build_assign (loaded_val, x);
5332 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5334 else
5336 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5337 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5338 true, GSI_SAME_STMT);
5341 gsi_remove (&si, true);
5343 si = gsi_last_bb (store_bb);
5344 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5346 if (iaddr == addr)
5347 storedi = stored_val;
5348 else
5349 storedi =
5350 force_gimple_operand_gsi (&si,
5351 build1 (VIEW_CONVERT_EXPR, itype,
5352 stored_val), true, NULL_TREE, true,
5353 GSI_SAME_STMT);
5355 /* Build the compare&swap statement. */
5356 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5357 new_storedi = force_gimple_operand_gsi (&si,
5358 fold_convert (TREE_TYPE (loadedi),
5359 new_storedi),
5360 true, NULL_TREE,
5361 true, GSI_SAME_STMT);
5363 if (gimple_in_ssa_p (cfun))
5364 old_vali = loadedi;
5365 else
5367 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5368 stmt = gimple_build_assign (old_vali, loadedi);
5369 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5371 stmt = gimple_build_assign (loadedi, new_storedi);
5372 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5375 /* Note that we always perform the comparison as an integer, even for
5376 floating point. This allows the atomic operation to properly
5377 succeed even with NaNs and -0.0. */
5378 stmt = gimple_build_cond_empty
5379 (build2 (NE_EXPR, boolean_type_node,
5380 new_storedi, old_vali));
5381 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5383 /* Update cfg. */
5384 e = single_succ_edge (store_bb);
5385 e->flags &= ~EDGE_FALLTHRU;
5386 e->flags |= EDGE_FALSE_VALUE;
5388 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5390 /* Copy the new value to loadedi (we already did that before the condition
5391 if we are not in SSA). */
5392 if (gimple_in_ssa_p (cfun))
5394 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5395 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5398 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5399 gsi_remove (&si, true);
5401 if (gimple_in_ssa_p (cfun))
5402 update_ssa (TODO_update_ssa_no_phi);
5404 return true;
5407 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5409 GOMP_atomic_start ();
5410 *addr = rhs;
5411 GOMP_atomic_end ();
5413 The result is not globally atomic, but works so long as all parallel
5414 references are within #pragma omp atomic directives. According to
5415 responses received from omp@openmp.org, appears to be within spec.
5416 Which makes sense, since that's how several other compilers handle
5417 this situation as well.
5418 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5419 expanding. STORED_VAL is the operand of the matching
5420 GIMPLE_OMP_ATOMIC_STORE.
5422 We replace
5423 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5424 loaded_val = *addr;
5426 and replace
5427 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5428 *addr = stored_val;
5431 static bool
5432 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5433 tree addr, tree loaded_val, tree stored_val)
5435 gimple_stmt_iterator si;
5436 gimple stmt;
5437 tree t;
5439 si = gsi_last_bb (load_bb);
5440 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5442 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5443 t = build_call_expr (t, 0);
5444 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5446 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5447 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5448 gsi_remove (&si, true);
5450 si = gsi_last_bb (store_bb);
5451 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5453 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5454 stored_val);
5455 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5457 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5458 t = build_call_expr (t, 0);
5459 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5460 gsi_remove (&si, true);
5462 if (gimple_in_ssa_p (cfun))
5463 update_ssa (TODO_update_ssa_no_phi);
5464 return true;
5467 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5468 using expand_omp_atomic_fetch_op. If it failed, we try to
5469 call expand_omp_atomic_pipeline, and if it fails too, the
5470 ultimate fallback is wrapping the operation in a mutex
5471 (expand_omp_atomic_mutex). REGION is the atomic region built
5472 by build_omp_regions_1(). */
5474 static void
5475 expand_omp_atomic (struct omp_region *region)
5477 basic_block load_bb = region->entry, store_bb = region->exit;
5478 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5479 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5480 tree addr = gimple_omp_atomic_load_rhs (load);
5481 tree stored_val = gimple_omp_atomic_store_val (store);
5482 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5483 HOST_WIDE_INT index;
5485 /* Make sure the type is one of the supported sizes. */
5486 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5487 index = exact_log2 (index);
5488 if (index >= 0 && index <= 4)
5490 unsigned int align = TYPE_ALIGN_UNIT (type);
5492 /* __sync builtins require strict data alignment. */
5493 if (exact_log2 (align) >= index)
5495 /* Atomic load. */
5496 if (loaded_val == stored_val
5497 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5498 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5499 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5500 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5501 return;
5503 /* Atomic store. */
5504 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5505 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5506 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5507 && store_bb == single_succ (load_bb)
5508 && first_stmt (store_bb) == store
5509 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5510 stored_val, index))
5511 return;
5513 /* When possible, use specialized atomic update functions. */
5514 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5515 && store_bb == single_succ (load_bb)
5516 && expand_omp_atomic_fetch_op (load_bb, addr,
5517 loaded_val, stored_val, index))
5518 return;
5520 /* If we don't have specialized __sync builtins, try and implement
5521 as a compare and swap loop. */
5522 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5523 loaded_val, stored_val, index))
5524 return;
5528 /* The ultimate fallback is wrapping the operation in a mutex. */
5529 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5533 /* Expand the parallel region tree rooted at REGION. Expansion
5534 proceeds in depth-first order. Innermost regions are expanded
5535 first. This way, parallel regions that require a new function to
5536 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5537 internal dependencies in their body. */
5539 static void
5540 expand_omp (struct omp_region *region)
5542 while (region)
5544 location_t saved_location;
5546 /* First, determine whether this is a combined parallel+workshare
5547 region. */
5548 if (region->type == GIMPLE_OMP_PARALLEL)
5549 determine_parallel_type (region);
5551 if (region->inner)
5552 expand_omp (region->inner);
5554 saved_location = input_location;
5555 if (gimple_has_location (last_stmt (region->entry)))
5556 input_location = gimple_location (last_stmt (region->entry));
5558 switch (region->type)
5560 case GIMPLE_OMP_PARALLEL:
5561 case GIMPLE_OMP_TASK:
5562 expand_omp_taskreg (region);
5563 break;
5565 case GIMPLE_OMP_FOR:
5566 expand_omp_for (region);
5567 break;
5569 case GIMPLE_OMP_SECTIONS:
5570 expand_omp_sections (region);
5571 break;
5573 case GIMPLE_OMP_SECTION:
5574 /* Individual omp sections are handled together with their
5575 parent GIMPLE_OMP_SECTIONS region. */
5576 break;
5578 case GIMPLE_OMP_SINGLE:
5579 expand_omp_single (region);
5580 break;
5582 case GIMPLE_OMP_MASTER:
5583 case GIMPLE_OMP_ORDERED:
5584 case GIMPLE_OMP_CRITICAL:
5585 expand_omp_synch (region);
5586 break;
5588 case GIMPLE_OMP_ATOMIC_LOAD:
5589 expand_omp_atomic (region);
5590 break;
5592 default:
5593 gcc_unreachable ();
5596 input_location = saved_location;
5597 region = region->next;
5602 /* Helper for build_omp_regions. Scan the dominator tree starting at
5603 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5604 true, the function ends once a single tree is built (otherwise, whole
5605 forest of OMP constructs may be built). */
5607 static void
5608 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5609 bool single_tree)
5611 gimple_stmt_iterator gsi;
5612 gimple stmt;
5613 basic_block son;
5615 gsi = gsi_last_bb (bb);
5616 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5618 struct omp_region *region;
5619 enum gimple_code code;
5621 stmt = gsi_stmt (gsi);
5622 code = gimple_code (stmt);
5623 if (code == GIMPLE_OMP_RETURN)
5625 /* STMT is the return point out of region PARENT. Mark it
5626 as the exit point and make PARENT the immediately
5627 enclosing region. */
5628 gcc_assert (parent);
5629 region = parent;
5630 region->exit = bb;
5631 parent = parent->outer;
5633 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5635 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5636 GIMPLE_OMP_RETURN, but matches with
5637 GIMPLE_OMP_ATOMIC_LOAD. */
5638 gcc_assert (parent);
5639 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5640 region = parent;
5641 region->exit = bb;
5642 parent = parent->outer;
5645 else if (code == GIMPLE_OMP_CONTINUE)
5647 gcc_assert (parent);
5648 parent->cont = bb;
5650 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5652 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5653 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5656 else
5658 /* Otherwise, this directive becomes the parent for a new
5659 region. */
5660 region = new_omp_region (bb, code, parent);
5661 parent = region;
5665 if (single_tree && !parent)
5666 return;
5668 for (son = first_dom_son (CDI_DOMINATORS, bb);
5669 son;
5670 son = next_dom_son (CDI_DOMINATORS, son))
5671 build_omp_regions_1 (son, parent, single_tree);
5674 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5675 root_omp_region. */
5677 static void
5678 build_omp_regions_root (basic_block root)
5680 gcc_assert (root_omp_region == NULL);
5681 build_omp_regions_1 (root, NULL, true);
5682 gcc_assert (root_omp_region != NULL);
5685 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5687 void
5688 omp_expand_local (basic_block head)
5690 build_omp_regions_root (head);
5691 if (dump_file && (dump_flags & TDF_DETAILS))
5693 fprintf (dump_file, "\nOMP region tree\n\n");
5694 dump_omp_region (dump_file, root_omp_region, 0);
5695 fprintf (dump_file, "\n");
5698 remove_exit_barriers (root_omp_region);
5699 expand_omp (root_omp_region);
5701 free_omp_regions ();
5704 /* Scan the CFG and build a tree of OMP regions. Return the root of
5705 the OMP region tree. */
5707 static void
5708 build_omp_regions (void)
5710 gcc_assert (root_omp_region == NULL);
5711 calculate_dominance_info (CDI_DOMINATORS);
5712 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5715 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5717 static unsigned int
5718 execute_expand_omp (void)
5720 build_omp_regions ();
5722 if (!root_omp_region)
5723 return 0;
5725 if (dump_file)
5727 fprintf (dump_file, "\nOMP region tree\n\n");
5728 dump_omp_region (dump_file, root_omp_region, 0);
5729 fprintf (dump_file, "\n");
5732 remove_exit_barriers (root_omp_region);
5734 expand_omp (root_omp_region);
5736 cleanup_tree_cfg ();
5738 free_omp_regions ();
5740 return 0;
5743 /* OMP expansion -- the default pass, run before creation of SSA form. */
5745 static bool
5746 gate_expand_omp (void)
5748 return (flag_openmp != 0 && !seen_error ());
5751 struct gimple_opt_pass pass_expand_omp =
5754 GIMPLE_PASS,
5755 "ompexp", /* name */
5756 gate_expand_omp, /* gate */
5757 execute_expand_omp, /* execute */
5758 NULL, /* sub */
5759 NULL, /* next */
5760 0, /* static_pass_number */
5761 TV_NONE, /* tv_id */
5762 PROP_gimple_any, /* properties_required */
5763 0, /* properties_provided */
5764 0, /* properties_destroyed */
5765 0, /* todo_flags_start */
5766 0 /* todo_flags_finish */
5770 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5772 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5773 CTX is the enclosing OMP context for the current statement. */
5775 static void
5776 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5778 tree block, control;
5779 gimple_stmt_iterator tgsi;
5780 gimple stmt, new_stmt, bind, t;
5781 gimple_seq ilist, dlist, olist, new_body;
5782 struct gimplify_ctx gctx;
5784 stmt = gsi_stmt (*gsi_p);
5786 push_gimplify_context (&gctx);
5788 dlist = NULL;
5789 ilist = NULL;
5790 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5791 &ilist, &dlist, ctx);
5793 new_body = gimple_omp_body (stmt);
5794 gimple_omp_set_body (stmt, NULL);
5795 tgsi = gsi_start (new_body);
5796 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
5798 omp_context *sctx;
5799 gimple sec_start;
5801 sec_start = gsi_stmt (tgsi);
5802 sctx = maybe_lookup_ctx (sec_start);
5803 gcc_assert (sctx);
5805 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5806 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5807 GSI_CONTINUE_LINKING);
5808 gimple_omp_set_body (sec_start, NULL);
5810 if (gsi_one_before_end_p (tgsi))
5812 gimple_seq l = NULL;
5813 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5814 &l, ctx);
5815 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
5816 gimple_omp_section_set_last (sec_start);
5819 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5820 GSI_CONTINUE_LINKING);
5823 block = make_node (BLOCK);
5824 bind = gimple_build_bind (NULL, new_body, block);
5826 olist = NULL;
5827 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5829 block = make_node (BLOCK);
5830 new_stmt = gimple_build_bind (NULL, NULL, block);
5831 gsi_replace (gsi_p, new_stmt, true);
5833 pop_gimplify_context (new_stmt);
5834 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5835 BLOCK_VARS (block) = gimple_bind_vars (bind);
5836 if (BLOCK_VARS (block))
5837 TREE_USED (block) = 1;
5839 new_body = NULL;
5840 gimple_seq_add_seq (&new_body, ilist);
5841 gimple_seq_add_stmt (&new_body, stmt);
5842 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5843 gimple_seq_add_stmt (&new_body, bind);
5845 control = create_tmp_var (unsigned_type_node, ".section");
5846 t = gimple_build_omp_continue (control, control);
5847 gimple_omp_sections_set_control (stmt, control);
5848 gimple_seq_add_stmt (&new_body, t);
5850 gimple_seq_add_seq (&new_body, olist);
5851 gimple_seq_add_seq (&new_body, dlist);
5853 new_body = maybe_catch_exception (new_body);
5855 t = gimple_build_omp_return
5856 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5857 OMP_CLAUSE_NOWAIT));
5858 gimple_seq_add_stmt (&new_body, t);
5860 gimple_bind_set_body (new_stmt, new_body);
5864 /* A subroutine of lower_omp_single. Expand the simple form of
5865 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5867 if (GOMP_single_start ())
5868 BODY;
5869 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5871 FIXME. It may be better to delay expanding the logic of this until
5872 pass_expand_omp. The expanded logic may make the job more difficult
5873 to a synchronization analysis pass. */
5875 static void
5876 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5878 location_t loc = gimple_location (single_stmt);
5879 tree tlabel = create_artificial_label (loc);
5880 tree flabel = create_artificial_label (loc);
5881 gimple call, cond;
5882 tree lhs, decl;
5884 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5885 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5886 call = gimple_build_call (decl, 0);
5887 gimple_call_set_lhs (call, lhs);
5888 gimple_seq_add_stmt (pre_p, call);
5890 cond = gimple_build_cond (EQ_EXPR, lhs,
5891 fold_convert_loc (loc, TREE_TYPE (lhs),
5892 boolean_true_node),
5893 tlabel, flabel);
5894 gimple_seq_add_stmt (pre_p, cond);
5895 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5896 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5897 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5901 /* A subroutine of lower_omp_single. Expand the simple form of
5902 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5904 #pragma omp single copyprivate (a, b, c)
5906 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5909 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5911 BODY;
5912 copyout.a = a;
5913 copyout.b = b;
5914 copyout.c = c;
5915 GOMP_single_copy_end (&copyout);
5917 else
5919 a = copyout_p->a;
5920 b = copyout_p->b;
5921 c = copyout_p->c;
5923 GOMP_barrier ();
5926 FIXME. It may be better to delay expanding the logic of this until
5927 pass_expand_omp. The expanded logic may make the job more difficult
5928 to a synchronization analysis pass. */
5930 static void
5931 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5933 tree ptr_type, t, l0, l1, l2, bfn_decl;
5934 gimple_seq copyin_seq;
5935 location_t loc = gimple_location (single_stmt);
5937 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5939 ptr_type = build_pointer_type (ctx->record_type);
5940 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5942 l0 = create_artificial_label (loc);
5943 l1 = create_artificial_label (loc);
5944 l2 = create_artificial_label (loc);
5946 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5947 t = build_call_expr_loc (loc, bfn_decl, 0);
5948 t = fold_convert_loc (loc, ptr_type, t);
5949 gimplify_assign (ctx->receiver_decl, t, pre_p);
5951 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5952 build_int_cst (ptr_type, 0));
5953 t = build3 (COND_EXPR, void_type_node, t,
5954 build_and_jump (&l0), build_and_jump (&l1));
5955 gimplify_and_add (t, pre_p);
5957 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5959 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5961 copyin_seq = NULL;
5962 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5963 &copyin_seq, ctx);
5965 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5966 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5967 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5968 gimplify_and_add (t, pre_p);
5970 t = build_and_jump (&l2);
5971 gimplify_and_add (t, pre_p);
5973 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5975 gimple_seq_add_seq (pre_p, copyin_seq);
5977 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5981 /* Expand code for an OpenMP single directive. */
5983 static void
5984 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5986 tree block;
5987 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5988 gimple_seq bind_body, dlist;
5989 struct gimplify_ctx gctx;
5991 push_gimplify_context (&gctx);
5993 block = make_node (BLOCK);
5994 bind = gimple_build_bind (NULL, NULL, block);
5995 gsi_replace (gsi_p, bind, true);
5996 bind_body = NULL;
5997 dlist = NULL;
5998 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5999 &bind_body, &dlist, ctx);
6000 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6002 gimple_seq_add_stmt (&bind_body, single_stmt);
6004 if (ctx->record_type)
6005 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6006 else
6007 lower_omp_single_simple (single_stmt, &bind_body);
6009 gimple_omp_set_body (single_stmt, NULL);
6011 gimple_seq_add_seq (&bind_body, dlist);
6013 bind_body = maybe_catch_exception (bind_body);
6015 t = gimple_build_omp_return
6016 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6017 OMP_CLAUSE_NOWAIT));
6018 gimple_seq_add_stmt (&bind_body, t);
6019 gimple_bind_set_body (bind, bind_body);
6021 pop_gimplify_context (bind);
6023 gimple_bind_append_vars (bind, ctx->block_vars);
6024 BLOCK_VARS (block) = ctx->block_vars;
6025 if (BLOCK_VARS (block))
6026 TREE_USED (block) = 1;
6030 /* Expand code for an OpenMP master directive. */
6032 static void
6033 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6035 tree block, lab = NULL, x, bfn_decl;
6036 gimple stmt = gsi_stmt (*gsi_p), bind;
6037 location_t loc = gimple_location (stmt);
6038 gimple_seq tseq;
6039 struct gimplify_ctx gctx;
6041 push_gimplify_context (&gctx);
6043 block = make_node (BLOCK);
6044 bind = gimple_build_bind (NULL, NULL, block);
6045 gsi_replace (gsi_p, bind, true);
6046 gimple_bind_add_stmt (bind, stmt);
6048 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6049 x = build_call_expr_loc (loc, bfn_decl, 0);
6050 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6051 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6052 tseq = NULL;
6053 gimplify_and_add (x, &tseq);
6054 gimple_bind_add_seq (bind, tseq);
6056 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6057 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6058 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6059 gimple_omp_set_body (stmt, NULL);
6061 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6063 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6065 pop_gimplify_context (bind);
6067 gimple_bind_append_vars (bind, ctx->block_vars);
6068 BLOCK_VARS (block) = ctx->block_vars;
6072 /* Expand code for an OpenMP ordered directive. */
6074 static void
6075 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6077 tree block;
6078 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6079 struct gimplify_ctx gctx;
6081 push_gimplify_context (&gctx);
6083 block = make_node (BLOCK);
6084 bind = gimple_build_bind (NULL, NULL, block);
6085 gsi_replace (gsi_p, bind, true);
6086 gimple_bind_add_stmt (bind, stmt);
6088 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6090 gimple_bind_add_stmt (bind, x);
6092 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6093 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6094 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6095 gimple_omp_set_body (stmt, NULL);
6097 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6098 gimple_bind_add_stmt (bind, x);
6100 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6102 pop_gimplify_context (bind);
6104 gimple_bind_append_vars (bind, ctx->block_vars);
6105 BLOCK_VARS (block) = gimple_bind_vars (bind);
6109 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6110 substitution of a couple of function calls. But in the NAMED case,
6111 requires that languages coordinate a symbol name. It is therefore
6112 best put here in common code. */
6114 static GTY((param1_is (tree), param2_is (tree)))
6115 splay_tree critical_name_mutexes;
6117 static void
6118 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6120 tree block;
6121 tree name, lock, unlock;
6122 gimple stmt = gsi_stmt (*gsi_p), bind;
6123 location_t loc = gimple_location (stmt);
6124 gimple_seq tbody;
6125 struct gimplify_ctx gctx;
6127 name = gimple_omp_critical_name (stmt);
6128 if (name)
6130 tree decl;
6131 splay_tree_node n;
6133 if (!critical_name_mutexes)
6134 critical_name_mutexes
6135 = splay_tree_new_ggc (splay_tree_compare_pointers,
6136 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6137 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6139 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6140 if (n == NULL)
6142 char *new_str;
6144 decl = create_tmp_var_raw (ptr_type_node, NULL);
6146 new_str = ACONCAT ((".gomp_critical_user_",
6147 IDENTIFIER_POINTER (name), NULL));
6148 DECL_NAME (decl) = get_identifier (new_str);
6149 TREE_PUBLIC (decl) = 1;
6150 TREE_STATIC (decl) = 1;
6151 DECL_COMMON (decl) = 1;
6152 DECL_ARTIFICIAL (decl) = 1;
6153 DECL_IGNORED_P (decl) = 1;
6154 varpool_finalize_decl (decl);
6156 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6157 (splay_tree_value) decl);
6159 else
6160 decl = (tree) n->value;
6162 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6163 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6165 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6166 unlock = build_call_expr_loc (loc, unlock, 1,
6167 build_fold_addr_expr_loc (loc, decl));
6169 else
6171 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6172 lock = build_call_expr_loc (loc, lock, 0);
6174 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6175 unlock = build_call_expr_loc (loc, unlock, 0);
6178 push_gimplify_context (&gctx);
6180 block = make_node (BLOCK);
6181 bind = gimple_build_bind (NULL, NULL, block);
6182 gsi_replace (gsi_p, bind, true);
6183 gimple_bind_add_stmt (bind, stmt);
6185 tbody = gimple_bind_body (bind);
6186 gimplify_and_add (lock, &tbody);
6187 gimple_bind_set_body (bind, tbody);
6189 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6190 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6191 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6192 gimple_omp_set_body (stmt, NULL);
6194 tbody = gimple_bind_body (bind);
6195 gimplify_and_add (unlock, &tbody);
6196 gimple_bind_set_body (bind, tbody);
6198 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6200 pop_gimplify_context (bind);
6201 gimple_bind_append_vars (bind, ctx->block_vars);
6202 BLOCK_VARS (block) = gimple_bind_vars (bind);
6206 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6207 for a lastprivate clause. Given a loop control predicate of (V
6208 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6209 is appended to *DLIST, iterator initialization is appended to
6210 *BODY_P. */
6212 static void
6213 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6214 gimple_seq *dlist, struct omp_context *ctx)
6216 tree clauses, cond, vinit;
6217 enum tree_code cond_code;
6218 gimple_seq stmts;
6220 cond_code = fd->loop.cond_code;
6221 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6223 /* When possible, use a strict equality expression. This can let VRP
6224 type optimizations deduce the value and remove a copy. */
6225 if (host_integerp (fd->loop.step, 0))
6227 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6228 if (step == 1 || step == -1)
6229 cond_code = EQ_EXPR;
6232 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6234 clauses = gimple_omp_for_clauses (fd->for_stmt);
6235 stmts = NULL;
6236 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6237 if (!gimple_seq_empty_p (stmts))
6239 gimple_seq_add_seq (&stmts, *dlist);
6240 *dlist = stmts;
6242 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6243 vinit = fd->loop.n1;
6244 if (cond_code == EQ_EXPR
6245 && host_integerp (fd->loop.n2, 0)
6246 && ! integer_zerop (fd->loop.n2))
6247 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6249 /* Initialize the iterator variable, so that threads that don't execute
6250 any iterations don't execute the lastprivate clauses by accident. */
6251 gimplify_assign (fd->loop.v, vinit, body_p);
6256 /* Lower code for an OpenMP loop directive. */
6258 static void
6259 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6261 tree *rhs_p, block;
6262 struct omp_for_data fd;
6263 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6264 gimple_seq omp_for_body, body, dlist;
6265 size_t i;
6266 struct gimplify_ctx gctx;
6268 push_gimplify_context (&gctx);
6270 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6271 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6273 block = make_node (BLOCK);
6274 new_stmt = gimple_build_bind (NULL, NULL, block);
6275 /* Replace at gsi right away, so that 'stmt' is no member
6276 of a sequence anymore as we're going to add to to a different
6277 one below. */
6278 gsi_replace (gsi_p, new_stmt, true);
6280 /* Move declaration of temporaries in the loop body before we make
6281 it go away. */
6282 omp_for_body = gimple_omp_body (stmt);
6283 if (!gimple_seq_empty_p (omp_for_body)
6284 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6286 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6287 gimple_bind_append_vars (new_stmt, vars);
6290 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6291 dlist = NULL;
6292 body = NULL;
6293 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6294 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6296 /* Lower the header expressions. At this point, we can assume that
6297 the header is of the form:
6299 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6301 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6302 using the .omp_data_s mapping, if needed. */
6303 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6305 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6306 if (!is_gimple_min_invariant (*rhs_p))
6307 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6309 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6310 if (!is_gimple_min_invariant (*rhs_p))
6311 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6313 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6314 if (!is_gimple_min_invariant (*rhs_p))
6315 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6318 /* Once lowered, extract the bounds and clauses. */
6319 extract_omp_for_data (stmt, &fd, NULL);
6321 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6323 gimple_seq_add_stmt (&body, stmt);
6324 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6326 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6327 fd.loop.v));
6329 /* After the loop, add exit clauses. */
6330 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6331 gimple_seq_add_seq (&body, dlist);
6333 body = maybe_catch_exception (body);
6335 /* Region exit marker goes at the end of the loop body. */
6336 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6338 pop_gimplify_context (new_stmt);
6340 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6341 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6342 if (BLOCK_VARS (block))
6343 TREE_USED (block) = 1;
6345 gimple_bind_set_body (new_stmt, body);
6346 gimple_omp_set_body (stmt, NULL);
6347 gimple_omp_for_set_pre_body (stmt, NULL);
6350 /* Callback for walk_stmts. Check if the current statement only contains
6351 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6353 static tree
6354 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6355 bool *handled_ops_p,
6356 struct walk_stmt_info *wi)
6358 int *info = (int *) wi->info;
6359 gimple stmt = gsi_stmt (*gsi_p);
6361 *handled_ops_p = true;
6362 switch (gimple_code (stmt))
6364 WALK_SUBSTMTS;
6366 case GIMPLE_OMP_FOR:
6367 case GIMPLE_OMP_SECTIONS:
6368 *info = *info == 0 ? 1 : -1;
6369 break;
6370 default:
6371 *info = -1;
6372 break;
6374 return NULL;
6377 struct omp_taskcopy_context
6379 /* This field must be at the beginning, as we do "inheritance": Some
6380 callback functions for tree-inline.c (e.g., omp_copy_decl)
6381 receive a copy_body_data pointer that is up-casted to an
6382 omp_context pointer. */
6383 copy_body_data cb;
6384 omp_context *ctx;
6387 static tree
6388 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6390 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6392 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6393 return create_tmp_var (TREE_TYPE (var), NULL);
6395 return var;
6398 static tree
6399 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6401 tree name, new_fields = NULL, type, f;
6403 type = lang_hooks.types.make_type (RECORD_TYPE);
6404 name = DECL_NAME (TYPE_NAME (orig_type));
6405 name = build_decl (gimple_location (tcctx->ctx->stmt),
6406 TYPE_DECL, name, type);
6407 TYPE_NAME (type) = name;
6409 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6411 tree new_f = copy_node (f);
6412 DECL_CONTEXT (new_f) = type;
6413 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6414 TREE_CHAIN (new_f) = new_fields;
6415 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6416 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6417 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6418 &tcctx->cb, NULL);
6419 new_fields = new_f;
6420 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6422 TYPE_FIELDS (type) = nreverse (new_fields);
6423 layout_type (type);
6424 return type;
6427 /* Create task copyfn. */
6429 static void
6430 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6432 struct function *child_cfun;
6433 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6434 tree record_type, srecord_type, bind, list;
6435 bool record_needs_remap = false, srecord_needs_remap = false;
6436 splay_tree_node n;
6437 struct omp_taskcopy_context tcctx;
6438 struct gimplify_ctx gctx;
6439 location_t loc = gimple_location (task_stmt);
6441 child_fn = gimple_omp_task_copy_fn (task_stmt);
6442 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6443 gcc_assert (child_cfun->cfg == NULL);
6444 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6446 /* Reset DECL_CONTEXT on function arguments. */
6447 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6448 DECL_CONTEXT (t) = child_fn;
6450 /* Populate the function. */
6451 push_gimplify_context (&gctx);
6452 push_cfun (child_cfun);
6454 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6455 TREE_SIDE_EFFECTS (bind) = 1;
6456 list = NULL;
6457 DECL_SAVED_TREE (child_fn) = bind;
6458 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6460 /* Remap src and dst argument types if needed. */
6461 record_type = ctx->record_type;
6462 srecord_type = ctx->srecord_type;
6463 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6464 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6466 record_needs_remap = true;
6467 break;
6469 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6470 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6472 srecord_needs_remap = true;
6473 break;
6476 if (record_needs_remap || srecord_needs_remap)
6478 memset (&tcctx, '\0', sizeof (tcctx));
6479 tcctx.cb.src_fn = ctx->cb.src_fn;
6480 tcctx.cb.dst_fn = child_fn;
6481 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6482 gcc_checking_assert (tcctx.cb.src_node);
6483 tcctx.cb.dst_node = tcctx.cb.src_node;
6484 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6485 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6486 tcctx.cb.eh_lp_nr = 0;
6487 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6488 tcctx.cb.decl_map = pointer_map_create ();
6489 tcctx.ctx = ctx;
6491 if (record_needs_remap)
6492 record_type = task_copyfn_remap_type (&tcctx, record_type);
6493 if (srecord_needs_remap)
6494 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6496 else
6497 tcctx.cb.decl_map = NULL;
6499 arg = DECL_ARGUMENTS (child_fn);
6500 TREE_TYPE (arg) = build_pointer_type (record_type);
6501 sarg = DECL_CHAIN (arg);
6502 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6504 /* First pass: initialize temporaries used in record_type and srecord_type
6505 sizes and field offsets. */
6506 if (tcctx.cb.decl_map)
6507 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6510 tree *p;
6512 decl = OMP_CLAUSE_DECL (c);
6513 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6514 if (p == NULL)
6515 continue;
6516 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6517 sf = (tree) n->value;
6518 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6519 src = build_simple_mem_ref_loc (loc, sarg);
6520 src = omp_build_component_ref (src, sf);
6521 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6522 append_to_statement_list (t, &list);
6525 /* Second pass: copy shared var pointers and copy construct non-VLA
6526 firstprivate vars. */
6527 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6528 switch (OMP_CLAUSE_CODE (c))
6530 case OMP_CLAUSE_SHARED:
6531 decl = OMP_CLAUSE_DECL (c);
6532 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6533 if (n == NULL)
6534 break;
6535 f = (tree) n->value;
6536 if (tcctx.cb.decl_map)
6537 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6538 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6539 sf = (tree) n->value;
6540 if (tcctx.cb.decl_map)
6541 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6542 src = build_simple_mem_ref_loc (loc, sarg);
6543 src = omp_build_component_ref (src, sf);
6544 dst = build_simple_mem_ref_loc (loc, arg);
6545 dst = omp_build_component_ref (dst, f);
6546 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6547 append_to_statement_list (t, &list);
6548 break;
6549 case OMP_CLAUSE_FIRSTPRIVATE:
6550 decl = OMP_CLAUSE_DECL (c);
6551 if (is_variable_sized (decl))
6552 break;
6553 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6554 if (n == NULL)
6555 break;
6556 f = (tree) n->value;
6557 if (tcctx.cb.decl_map)
6558 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6559 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6560 if (n != NULL)
6562 sf = (tree) n->value;
6563 if (tcctx.cb.decl_map)
6564 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6565 src = build_simple_mem_ref_loc (loc, sarg);
6566 src = omp_build_component_ref (src, sf);
6567 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6568 src = build_simple_mem_ref_loc (loc, src);
6570 else
6571 src = decl;
6572 dst = build_simple_mem_ref_loc (loc, arg);
6573 dst = omp_build_component_ref (dst, f);
6574 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6575 append_to_statement_list (t, &list);
6576 break;
6577 case OMP_CLAUSE_PRIVATE:
6578 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6579 break;
6580 decl = OMP_CLAUSE_DECL (c);
6581 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6582 f = (tree) n->value;
6583 if (tcctx.cb.decl_map)
6584 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6585 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6586 if (n != NULL)
6588 sf = (tree) n->value;
6589 if (tcctx.cb.decl_map)
6590 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6591 src = build_simple_mem_ref_loc (loc, sarg);
6592 src = omp_build_component_ref (src, sf);
6593 if (use_pointer_for_field (decl, NULL))
6594 src = build_simple_mem_ref_loc (loc, src);
6596 else
6597 src = decl;
6598 dst = build_simple_mem_ref_loc (loc, arg);
6599 dst = omp_build_component_ref (dst, f);
6600 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6601 append_to_statement_list (t, &list);
6602 break;
6603 default:
6604 break;
6607 /* Last pass: handle VLA firstprivates. */
6608 if (tcctx.cb.decl_map)
6609 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6610 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6612 tree ind, ptr, df;
6614 decl = OMP_CLAUSE_DECL (c);
6615 if (!is_variable_sized (decl))
6616 continue;
6617 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6618 if (n == NULL)
6619 continue;
6620 f = (tree) n->value;
6621 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6622 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6623 ind = DECL_VALUE_EXPR (decl);
6624 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6625 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6626 n = splay_tree_lookup (ctx->sfield_map,
6627 (splay_tree_key) TREE_OPERAND (ind, 0));
6628 sf = (tree) n->value;
6629 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6630 src = build_simple_mem_ref_loc (loc, sarg);
6631 src = omp_build_component_ref (src, sf);
6632 src = build_simple_mem_ref_loc (loc, src);
6633 dst = build_simple_mem_ref_loc (loc, arg);
6634 dst = omp_build_component_ref (dst, f);
6635 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6636 append_to_statement_list (t, &list);
6637 n = splay_tree_lookup (ctx->field_map,
6638 (splay_tree_key) TREE_OPERAND (ind, 0));
6639 df = (tree) n->value;
6640 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6641 ptr = build_simple_mem_ref_loc (loc, arg);
6642 ptr = omp_build_component_ref (ptr, df);
6643 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6644 build_fold_addr_expr_loc (loc, dst));
6645 append_to_statement_list (t, &list);
6648 t = build1 (RETURN_EXPR, void_type_node, NULL);
6649 append_to_statement_list (t, &list);
6651 if (tcctx.cb.decl_map)
6652 pointer_map_destroy (tcctx.cb.decl_map);
6653 pop_gimplify_context (NULL);
6654 BIND_EXPR_BODY (bind) = list;
6655 pop_cfun ();
6658 /* Lower the OpenMP parallel or task directive in the current statement
6659 in GSI_P. CTX holds context information for the directive. */
6661 static void
6662 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6664 tree clauses;
6665 tree child_fn, t;
6666 gimple stmt = gsi_stmt (*gsi_p);
6667 gimple par_bind, bind;
6668 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6669 struct gimplify_ctx gctx;
6670 location_t loc = gimple_location (stmt);
6672 clauses = gimple_omp_taskreg_clauses (stmt);
6673 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6674 par_body = gimple_bind_body (par_bind);
6675 child_fn = ctx->cb.dst_fn;
6676 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6677 && !gimple_omp_parallel_combined_p (stmt))
6679 struct walk_stmt_info wi;
6680 int ws_num = 0;
6682 memset (&wi, 0, sizeof (wi));
6683 wi.info = &ws_num;
6684 wi.val_only = true;
6685 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6686 if (ws_num == 1)
6687 gimple_omp_parallel_set_combined_p (stmt, true);
6689 if (ctx->srecord_type)
6690 create_task_copyfn (stmt, ctx);
6692 push_gimplify_context (&gctx);
6694 par_olist = NULL;
6695 par_ilist = NULL;
6696 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6697 lower_omp (&par_body, ctx);
6698 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6699 lower_reduction_clauses (clauses, &par_olist, ctx);
6701 /* Declare all the variables created by mapping and the variables
6702 declared in the scope of the parallel body. */
6703 record_vars_into (ctx->block_vars, child_fn);
6704 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6706 if (ctx->record_type)
6708 ctx->sender_decl
6709 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6710 : ctx->record_type, ".omp_data_o");
6711 DECL_NAMELESS (ctx->sender_decl) = 1;
6712 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6713 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6716 olist = NULL;
6717 ilist = NULL;
6718 lower_send_clauses (clauses, &ilist, &olist, ctx);
6719 lower_send_shared_vars (&ilist, &olist, ctx);
6721 /* Once all the expansions are done, sequence all the different
6722 fragments inside gimple_omp_body. */
6724 new_body = NULL;
6726 if (ctx->record_type)
6728 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6729 /* fixup_child_record_type might have changed receiver_decl's type. */
6730 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6731 gimple_seq_add_stmt (&new_body,
6732 gimple_build_assign (ctx->receiver_decl, t));
6735 gimple_seq_add_seq (&new_body, par_ilist);
6736 gimple_seq_add_seq (&new_body, par_body);
6737 gimple_seq_add_seq (&new_body, par_olist);
6738 new_body = maybe_catch_exception (new_body);
6739 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6740 gimple_omp_set_body (stmt, new_body);
6742 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6743 gsi_replace (gsi_p, bind, true);
6744 gimple_bind_add_seq (bind, ilist);
6745 gimple_bind_add_stmt (bind, stmt);
6746 gimple_bind_add_seq (bind, olist);
6748 pop_gimplify_context (NULL);
6751 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6752 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6753 of OpenMP context, but with task_shared_vars set. */
6755 static tree
6756 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6757 void *data)
6759 tree t = *tp;
6761 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6762 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6763 return t;
6765 if (task_shared_vars
6766 && DECL_P (t)
6767 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6768 return t;
6770 /* If a global variable has been privatized, TREE_CONSTANT on
6771 ADDR_EXPR might be wrong. */
6772 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6773 recompute_tree_invariant_for_addr_expr (t);
6775 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6776 return NULL_TREE;
6779 static void
6780 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6782 gimple stmt = gsi_stmt (*gsi_p);
6783 struct walk_stmt_info wi;
6785 if (gimple_has_location (stmt))
6786 input_location = gimple_location (stmt);
6788 if (task_shared_vars)
6789 memset (&wi, '\0', sizeof (wi));
6791 /* If we have issued syntax errors, avoid doing any heavy lifting.
6792 Just replace the OpenMP directives with a NOP to avoid
6793 confusing RTL expansion. */
6794 if (seen_error () && is_gimple_omp (stmt))
6796 gsi_replace (gsi_p, gimple_build_nop (), true);
6797 return;
6800 switch (gimple_code (stmt))
6802 case GIMPLE_COND:
6803 if ((ctx || task_shared_vars)
6804 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6805 ctx ? NULL : &wi, NULL)
6806 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6807 ctx ? NULL : &wi, NULL)))
6808 gimple_regimplify_operands (stmt, gsi_p);
6809 break;
6810 case GIMPLE_CATCH:
6811 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
6812 break;
6813 case GIMPLE_EH_FILTER:
6814 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
6815 break;
6816 case GIMPLE_TRY:
6817 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6818 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
6819 break;
6820 case GIMPLE_TRANSACTION:
6821 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
6822 break;
6823 case GIMPLE_BIND:
6824 lower_omp (gimple_bind_body_ptr (stmt), ctx);
6825 break;
6826 case GIMPLE_OMP_PARALLEL:
6827 case GIMPLE_OMP_TASK:
6828 ctx = maybe_lookup_ctx (stmt);
6829 lower_omp_taskreg (gsi_p, ctx);
6830 break;
6831 case GIMPLE_OMP_FOR:
6832 ctx = maybe_lookup_ctx (stmt);
6833 gcc_assert (ctx);
6834 lower_omp_for (gsi_p, ctx);
6835 break;
6836 case GIMPLE_OMP_SECTIONS:
6837 ctx = maybe_lookup_ctx (stmt);
6838 gcc_assert (ctx);
6839 lower_omp_sections (gsi_p, ctx);
6840 break;
6841 case GIMPLE_OMP_SINGLE:
6842 ctx = maybe_lookup_ctx (stmt);
6843 gcc_assert (ctx);
6844 lower_omp_single (gsi_p, ctx);
6845 break;
6846 case GIMPLE_OMP_MASTER:
6847 ctx = maybe_lookup_ctx (stmt);
6848 gcc_assert (ctx);
6849 lower_omp_master (gsi_p, ctx);
6850 break;
6851 case GIMPLE_OMP_ORDERED:
6852 ctx = maybe_lookup_ctx (stmt);
6853 gcc_assert (ctx);
6854 lower_omp_ordered (gsi_p, ctx);
6855 break;
6856 case GIMPLE_OMP_CRITICAL:
6857 ctx = maybe_lookup_ctx (stmt);
6858 gcc_assert (ctx);
6859 lower_omp_critical (gsi_p, ctx);
6860 break;
6861 case GIMPLE_OMP_ATOMIC_LOAD:
6862 if ((ctx || task_shared_vars)
6863 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6864 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6865 gimple_regimplify_operands (stmt, gsi_p);
6866 break;
6867 default:
6868 if ((ctx || task_shared_vars)
6869 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6870 ctx ? NULL : &wi))
6871 gimple_regimplify_operands (stmt, gsi_p);
6872 break;
6876 static void
6877 lower_omp (gimple_seq *body, omp_context *ctx)
6879 location_t saved_location = input_location;
6880 gimple_stmt_iterator gsi;
6881 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
6882 lower_omp_1 (&gsi, ctx);
6883 input_location = saved_location;
6886 /* Main entry point. */
6888 static unsigned int
6889 execute_lower_omp (void)
6891 gimple_seq body;
6893 /* This pass always runs, to provide PROP_gimple_lomp.
6894 But there is nothing to do unless -fopenmp is given. */
6895 if (flag_openmp == 0)
6896 return 0;
6898 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6899 delete_omp_context);
6901 body = gimple_body (current_function_decl);
6902 scan_omp (&body, NULL);
6903 gcc_assert (taskreg_nesting_level == 0);
6905 if (all_contexts->root)
6907 struct gimplify_ctx gctx;
6909 if (task_shared_vars)
6910 push_gimplify_context (&gctx);
6911 lower_omp (&body, NULL);
6912 if (task_shared_vars)
6913 pop_gimplify_context (NULL);
6916 if (all_contexts)
6918 splay_tree_delete (all_contexts);
6919 all_contexts = NULL;
6921 BITMAP_FREE (task_shared_vars);
6922 return 0;
6925 struct gimple_opt_pass pass_lower_omp =
6928 GIMPLE_PASS,
6929 "omplower", /* name */
6930 NULL, /* gate */
6931 execute_lower_omp, /* execute */
6932 NULL, /* sub */
6933 NULL, /* next */
6934 0, /* static_pass_number */
6935 TV_NONE, /* tv_id */
6936 PROP_gimple_any, /* properties_required */
6937 PROP_gimple_lomp, /* properties_provided */
6938 0, /* properties_destroyed */
6939 0, /* todo_flags_start */
6940 0 /* todo_flags_finish */
6944 /* The following is a utility to diagnose OpenMP structured block violations.
6945 It is not part of the "omplower" pass, as that's invoked too late. It
6946 should be invoked by the respective front ends after gimplification. */
6948 static splay_tree all_labels;
6950 /* Check for mismatched contexts and generate an error if needed. Return
6951 true if an error is detected. */
6953 static bool
6954 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6955 gimple branch_ctx, gimple label_ctx)
6957 if (label_ctx == branch_ctx)
6958 return false;
6962 Previously we kept track of the label's entire context in diagnose_sb_[12]
6963 so we could traverse it and issue a correct "exit" or "enter" error
6964 message upon a structured block violation.
6966 We built the context by building a list with tree_cons'ing, but there is
6967 no easy counterpart in gimple tuples. It seems like far too much work
6968 for issuing exit/enter error messages. If someone really misses the
6969 distinct error message... patches welcome.
6972 #if 0
6973 /* Try to avoid confusing the user by producing and error message
6974 with correct "exit" or "enter" verbiage. We prefer "exit"
6975 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6976 if (branch_ctx == NULL)
6977 exit_p = false;
6978 else
6980 while (label_ctx)
6982 if (TREE_VALUE (label_ctx) == branch_ctx)
6984 exit_p = false;
6985 break;
6987 label_ctx = TREE_CHAIN (label_ctx);
6991 if (exit_p)
6992 error ("invalid exit from OpenMP structured block");
6993 else
6994 error ("invalid entry to OpenMP structured block");
6995 #endif
6997 /* If it's obvious we have an invalid entry, be specific about the error. */
6998 if (branch_ctx == NULL)
6999 error ("invalid entry to OpenMP structured block");
7000 else
7001 /* Otherwise, be vague and lazy, but efficient. */
7002 error ("invalid branch to/from an OpenMP structured block");
7004 gsi_replace (gsi_p, gimple_build_nop (), false);
7005 return true;
7008 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7009 where each label is found. */
7011 static tree
7012 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7013 struct walk_stmt_info *wi)
7015 gimple context = (gimple) wi->info;
7016 gimple inner_context;
7017 gimple stmt = gsi_stmt (*gsi_p);
7019 *handled_ops_p = true;
7021 switch (gimple_code (stmt))
7023 WALK_SUBSTMTS;
7025 case GIMPLE_OMP_PARALLEL:
7026 case GIMPLE_OMP_TASK:
7027 case GIMPLE_OMP_SECTIONS:
7028 case GIMPLE_OMP_SINGLE:
7029 case GIMPLE_OMP_SECTION:
7030 case GIMPLE_OMP_MASTER:
7031 case GIMPLE_OMP_ORDERED:
7032 case GIMPLE_OMP_CRITICAL:
7033 /* The minimal context here is just the current OMP construct. */
7034 inner_context = stmt;
7035 wi->info = inner_context;
7036 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7037 wi->info = context;
7038 break;
7040 case GIMPLE_OMP_FOR:
7041 inner_context = stmt;
7042 wi->info = inner_context;
7043 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7044 walk them. */
7045 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7046 diagnose_sb_1, NULL, wi);
7047 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7048 wi->info = context;
7049 break;
7051 case GIMPLE_LABEL:
7052 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7053 (splay_tree_value) context);
7054 break;
7056 default:
7057 break;
7060 return NULL_TREE;
7063 /* Pass 2: Check each branch and see if its context differs from that of
7064 the destination label's context. */
7066 static tree
7067 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7068 struct walk_stmt_info *wi)
7070 gimple context = (gimple) wi->info;
7071 splay_tree_node n;
7072 gimple stmt = gsi_stmt (*gsi_p);
7074 *handled_ops_p = true;
7076 switch (gimple_code (stmt))
7078 WALK_SUBSTMTS;
7080 case GIMPLE_OMP_PARALLEL:
7081 case GIMPLE_OMP_TASK:
7082 case GIMPLE_OMP_SECTIONS:
7083 case GIMPLE_OMP_SINGLE:
7084 case GIMPLE_OMP_SECTION:
7085 case GIMPLE_OMP_MASTER:
7086 case GIMPLE_OMP_ORDERED:
7087 case GIMPLE_OMP_CRITICAL:
7088 wi->info = stmt;
7089 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7090 wi->info = context;
7091 break;
7093 case GIMPLE_OMP_FOR:
7094 wi->info = stmt;
7095 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7096 walk them. */
7097 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7098 diagnose_sb_2, NULL, wi);
7099 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7100 wi->info = context;
7101 break;
7103 case GIMPLE_COND:
7105 tree lab = gimple_cond_true_label (stmt);
7106 if (lab)
7108 n = splay_tree_lookup (all_labels,
7109 (splay_tree_key) lab);
7110 diagnose_sb_0 (gsi_p, context,
7111 n ? (gimple) n->value : NULL);
7113 lab = gimple_cond_false_label (stmt);
7114 if (lab)
7116 n = splay_tree_lookup (all_labels,
7117 (splay_tree_key) lab);
7118 diagnose_sb_0 (gsi_p, context,
7119 n ? (gimple) n->value : NULL);
7122 break;
7124 case GIMPLE_GOTO:
7126 tree lab = gimple_goto_dest (stmt);
7127 if (TREE_CODE (lab) != LABEL_DECL)
7128 break;
7130 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7131 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7133 break;
7135 case GIMPLE_SWITCH:
7137 unsigned int i;
7138 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7140 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7141 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7142 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7143 break;
7146 break;
7148 case GIMPLE_RETURN:
7149 diagnose_sb_0 (gsi_p, context, NULL);
7150 break;
7152 default:
7153 break;
7156 return NULL_TREE;
7159 static unsigned int
7160 diagnose_omp_structured_block_errors (void)
7162 struct walk_stmt_info wi;
7163 gimple_seq body = gimple_body (current_function_decl);
7165 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7167 memset (&wi, 0, sizeof (wi));
7168 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7170 memset (&wi, 0, sizeof (wi));
7171 wi.want_locations = true;
7172 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7174 gimple_set_body (current_function_decl, body);
7176 splay_tree_delete (all_labels);
7177 all_labels = NULL;
7179 return 0;
7182 static bool
7183 gate_diagnose_omp_blocks (void)
7185 return flag_openmp != 0;
7188 struct gimple_opt_pass pass_diagnose_omp_blocks =
7191 GIMPLE_PASS,
7192 "*diagnose_omp_blocks", /* name */
7193 gate_diagnose_omp_blocks, /* gate */
7194 diagnose_omp_structured_block_errors, /* execute */
7195 NULL, /* sub */
7196 NULL, /* next */
7197 0, /* static_pass_number */
7198 TV_NONE, /* tv_id */
7199 PROP_gimple_any, /* properties_required */
7200 0, /* properties_provided */
7201 0, /* properties_destroyed */
7202 0, /* todo_flags_start */
7203 0, /* todo_flags_finish */
7207 #include "gt-omp-low.h"