2012-03-17 Janne Blomqvist <jb@gcc.gnu.org>
[official-gcc.git] / gcc / omp-low.c
blob84986efcddad8b16fc913f94fd0fcda44edfb6ee
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 case GIMPLE_TRANSACTION: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 static inline tree
150 scan_omp_op (tree *tp, omp_context *ctx)
152 struct walk_stmt_info wi;
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 tree
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
174 return NULL_TREE;
177 /* Return true if CTX is for an omp parallel. */
179 static inline bool
180 is_parallel_ctx (omp_context *ctx)
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
186 /* Return true if CTX is for an omp task. */
188 static inline bool
189 is_task_ctx (omp_context *ctx)
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
195 /* Return true if CTX is for an omp parallel or omp task. */
197 static inline bool
198 is_taskreg_ctx (omp_context *ctx)
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
205 /* Return true if REGION is a combined parallel+workshare region. */
207 static inline bool
208 is_combined_parallel (struct omp_region *region)
210 return region->is_combined_parallel;
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
217 static void
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
261 default:
262 break;
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
288 for (i = 0; i < fd->collapse; i++)
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
338 case PLUS_EXPR:
339 case POINTER_PLUS_EXPR:
340 loop->step = TREE_OPERAND (t, 1);
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
352 if (iter_type != long_long_unsigned_type_node)
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
360 tree n;
362 if (loop->cond_code == LT_EXPR)
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
375 tree n1, n2;
377 if (loop->cond_code == LT_EXPR)
379 n1 = loop->n1;
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
384 else
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
388 loop->n2, loop->step);
389 n2 = loop->n1;
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
399 if (collapse_count && *collapse_count == NULL)
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
407 tree itype = TREE_TYPE (loop->v);
409 if (POINTER_TYPE_P (itype))
410 itype = signed_type_for (itype);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
413 PLUS_EXPR, itype,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
424 loop->step)));
425 else
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
432 count, t);
433 else
434 count = t;
435 if (TREE_CODE (count) != INTEGER_CST)
436 count = NULL_TREE;
438 else
439 count = NULL_TREE;
443 if (count)
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
447 else
448 iter_type = long_integer_type_node;
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
457 if (count)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
459 else
460 *collapse_count = create_tmp_var (iter_type, ".count");
463 if (fd->collapse > 1)
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
486 for (j ...)
488 Is lowered into:
490 # BLOCK 2 (PAR_ENTRY_BB)
491 .omp_data_o.i = i;
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
497 D.1598 = D.1667 * 4;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
509 call.
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
516 static bool
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
523 return true;
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
527 extract_omp_for_data (ws_stmt, &fd, NULL);
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
530 return false;
531 if (fd.iter_type != long_integer_type_node)
532 return false;
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
538 see through this. */
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
543 return false;
545 return true;
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
551 expanded. */
553 static VEC(tree,gc) *
554 get_ws_args_for (gimple ws_stmt)
556 tree t;
557 location_t loc = gimple_location (ws_stmt);
558 VEC(tree,gc) *ws_args;
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
562 struct omp_for_data fd;
564 extract_omp_for_data (ws_stmt, &fd, NULL);
566 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 VEC_quick_push (tree, ws_args, t);
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 VEC_quick_push (tree, ws_args, t);
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 VEC_quick_push (tree, ws_args, t);
577 if (fd.chunk_size)
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 VEC_quick_push (tree, ws_args, t);
583 return ws_args;
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 ws_args = VEC_alloc (tree, gc, 1);
593 VEC_quick_push (tree, ws_args, t);
594 return ws_args;
597 gcc_unreachable ();
601 /* Discover whether REGION is a combined parallel+workshare region. */
603 static void
604 determine_parallel_type (struct omp_region *region)
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
612 return;
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
618 return;
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (region->inner->type == GIMPLE_OMP_FOR)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
649 if (c == NULL
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
655 return;
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
666 /* Return true if EXPR is variable sized. */
668 static inline bool
669 is_variable_sized (const_tree expr)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
674 /* Return true if DECL is a reference type. */
676 static inline bool
677 is_reference (tree decl)
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
686 static inline tree
687 lookup_decl (tree var, omp_context *ctx)
689 tree *n;
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
691 return *n;
694 static inline tree
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
697 tree *n;
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
702 static inline tree
703 lookup_field (tree var, omp_context *ctx)
705 splay_tree_node n;
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
710 static inline tree
711 lookup_sfield (tree var, omp_context *ctx)
713 splay_tree_node n;
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
720 static inline tree
721 maybe_lookup_field (tree var, omp_context *ctx)
723 splay_tree_node n;
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
731 static bool
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
735 return true;
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
739 if (shared_ctx)
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
746 return true;
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
753 return true;
755 /* Do not use copy-in/copy-out for variables that have their
756 address taken. */
757 if (TREE_ADDRESSABLE (decl))
758 return true;
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767 omp_context *up;
769 for (up = shared_ctx->outer; up; up = up->outer)
770 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
771 break;
773 if (up)
775 tree c;
777 for (c = gimple_omp_taskreg_clauses (up->stmt);
778 c; c = OMP_CLAUSE_CHAIN (c))
779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c) == decl)
781 break;
783 if (c)
784 goto maybe_mark_addressable_and_ret;
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794 tree outer;
795 maybe_mark_addressable_and_ret:
796 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
797 if (is_gimple_reg (outer))
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
801 variable. */
802 if (!task_shared_vars)
803 task_shared_vars = BITMAP_ALLOC (NULL);
804 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
805 TREE_ADDRESSABLE (outer) = 1;
807 return true;
811 return false;
814 /* Create a new VAR_DECL and copy information from VAR to it. */
816 tree
817 copy_var_decl (tree var, tree name, tree type)
819 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
822 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
823 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
824 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
825 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
826 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
827 TREE_USED (copy) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830 return copy;
833 /* Construct a new automatic decl similar to VAR. */
835 static tree
836 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 tree copy = copy_var_decl (var, name, type);
840 DECL_CONTEXT (copy) = current_function_decl;
841 DECL_CHAIN (copy) = ctx->block_vars;
842 ctx->block_vars = copy;
844 return copy;
847 static tree
848 omp_copy_decl_1 (tree var, omp_context *ctx)
850 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 /* Build tree nodes to access the field for VAR on the receiver side. */
855 static tree
856 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 tree x, field = lookup_field (var, ctx);
860 /* If the receiver record type was remapped in the child function,
861 remap the field into the new record type. */
862 x = maybe_lookup_field (field, ctx);
863 if (x != NULL)
864 field = x;
866 x = build_simple_mem_ref (ctx->receiver_decl);
867 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
868 if (by_ref)
869 x = build_simple_mem_ref (x);
871 return x;
874 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
875 of a parallel, this is a component reference; for workshare constructs
876 this is some variable. */
878 static tree
879 build_outer_var_ref (tree var, omp_context *ctx)
881 tree x;
883 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
884 x = var;
885 else if (is_variable_sized (var))
887 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
888 x = build_outer_var_ref (x, ctx);
889 x = build_simple_mem_ref (x);
891 else if (is_taskreg_ctx (ctx))
893 bool by_ref = use_pointer_for_field (var, NULL);
894 x = build_receiver_ref (var, by_ref, ctx);
896 else if (ctx->outer)
897 x = lookup_decl (var, ctx->outer);
898 else if (is_reference (var))
899 /* This can happen with orphaned constructs. If var is reference, it is
900 possible it is shared and as such valid. */
901 x = var;
902 else
903 gcc_unreachable ();
905 if (is_reference (var))
906 x = build_simple_mem_ref (x);
908 return x;
911 /* Build tree nodes to access the field for VAR on the sender side. */
913 static tree
914 build_sender_ref (tree var, omp_context *ctx)
916 tree field = lookup_sfield (var, ctx);
917 return build3 (COMPONENT_REF, TREE_TYPE (field),
918 ctx->sender_decl, field, NULL);
921 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923 static void
924 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 tree field, type, sfield = NULL_TREE;
928 gcc_assert ((mask & 1) == 0
929 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
930 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
931 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933 type = TREE_TYPE (var);
934 if (by_ref)
935 type = build_pointer_type (type);
936 else if ((mask & 3) == 1 && is_reference (var))
937 type = TREE_TYPE (type);
939 field = build_decl (DECL_SOURCE_LOCATION (var),
940 FIELD_DECL, DECL_NAME (var), type);
942 /* Remember what variable this field was created for. This does have a
943 side effect of making dwarf2out ignore this member, so for helpful
944 debugging we clear it later in delete_omp_context. */
945 DECL_ABSTRACT_ORIGIN (field) = var;
946 if (type == TREE_TYPE (var))
948 DECL_ALIGN (field) = DECL_ALIGN (var);
949 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
950 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 else
953 DECL_ALIGN (field) = TYPE_ALIGN (type);
955 if ((mask & 3) == 3)
957 insert_field_into_struct (ctx->record_type, field);
958 if (ctx->srecord_type)
960 sfield = build_decl (DECL_SOURCE_LOCATION (var),
961 FIELD_DECL, DECL_NAME (var), type);
962 DECL_ABSTRACT_ORIGIN (sfield) = var;
963 DECL_ALIGN (sfield) = DECL_ALIGN (field);
964 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
965 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
966 insert_field_into_struct (ctx->srecord_type, sfield);
969 else
971 if (ctx->srecord_type == NULL_TREE)
973 tree t;
975 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
976 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
977 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 sfield = build_decl (DECL_SOURCE_LOCATION (var),
980 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
981 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
982 insert_field_into_struct (ctx->srecord_type, sfield);
983 splay_tree_insert (ctx->sfield_map,
984 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
985 (splay_tree_value) sfield);
988 sfield = field;
989 insert_field_into_struct ((mask & 1) ? ctx->record_type
990 : ctx->srecord_type, field);
993 if (mask & 1)
994 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
995 (splay_tree_value) field);
996 if ((mask & 2) && ctx->sfield_map)
997 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
998 (splay_tree_value) sfield);
1001 static tree
1002 install_var_local (tree var, omp_context *ctx)
1004 tree new_var = omp_copy_decl_1 (var, ctx);
1005 insert_decl_map (&ctx->cb, var, new_var);
1006 return new_var;
1009 /* Adjust the replacement for DECL in CTX for the new context. This means
1010 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012 static void
1013 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 tree new_decl, size;
1017 new_decl = lookup_decl (decl, ctx);
1019 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1022 && DECL_HAS_VALUE_EXPR_P (decl))
1024 tree ve = DECL_VALUE_EXPR (decl);
1025 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1026 SET_DECL_VALUE_EXPR (new_decl, ve);
1027 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1033 if (size == error_mark_node)
1034 size = TYPE_SIZE (TREE_TYPE (new_decl));
1035 DECL_SIZE (new_decl) = size;
1037 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1038 if (size == error_mark_node)
1039 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1040 DECL_SIZE_UNIT (new_decl) = size;
1044 /* The callback for remap_decl. Search all containing contexts for a
1045 mapping of the variable; this avoids having to duplicate the splay
1046 tree ahead of time. We know a mapping doesn't already exist in the
1047 given context. Create new mappings to implement default semantics. */
1049 static tree
1050 omp_copy_decl (tree var, copy_body_data *cb)
1052 omp_context *ctx = (omp_context *) cb;
1053 tree new_var;
1055 if (TREE_CODE (var) == LABEL_DECL)
1057 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1058 DECL_CONTEXT (new_var) = current_function_decl;
1059 insert_decl_map (&ctx->cb, var, new_var);
1060 return new_var;
1063 while (!is_taskreg_ctx (ctx))
1065 ctx = ctx->outer;
1066 if (ctx == NULL)
1067 return var;
1068 new_var = maybe_lookup_decl (var, ctx);
1069 if (new_var)
1070 return new_var;
1073 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1074 return var;
1076 return error_mark_node;
1080 /* Return the parallel region associated with STMT. */
1082 /* Debugging dumps for parallel regions. */
1083 void dump_omp_region (FILE *, struct omp_region *, int);
1084 void debug_omp_region (struct omp_region *);
1085 void debug_all_omp_regions (void);
1087 /* Dump the parallel region tree rooted at REGION. */
1089 void
1090 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1093 gimple_code_name[region->type]);
1095 if (region->inner)
1096 dump_omp_region (file, region->inner, indent + 4);
1098 if (region->cont)
1100 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1101 region->cont->index);
1104 if (region->exit)
1105 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1106 region->exit->index);
1107 else
1108 fprintf (file, "%*s[no exit marker]\n", indent, "");
1110 if (region->next)
1111 dump_omp_region (file, region->next, indent);
1114 DEBUG_FUNCTION void
1115 debug_omp_region (struct omp_region *region)
1117 dump_omp_region (stderr, region, 0);
1120 DEBUG_FUNCTION void
1121 debug_all_omp_regions (void)
1123 dump_omp_region (stderr, root_omp_region, 0);
1127 /* Create a new parallel region starting at STMT inside region PARENT. */
1129 struct omp_region *
1130 new_omp_region (basic_block bb, enum gimple_code type,
1131 struct omp_region *parent)
1133 struct omp_region *region = XCNEW (struct omp_region);
1135 region->outer = parent;
1136 region->entry = bb;
1137 region->type = type;
1139 if (parent)
1141 /* This is a nested region. Add it to the list of inner
1142 regions in PARENT. */
1143 region->next = parent->inner;
1144 parent->inner = region;
1146 else
1148 /* This is a toplevel region. Add it to the list of toplevel
1149 regions in ROOT_OMP_REGION. */
1150 region->next = root_omp_region;
1151 root_omp_region = region;
1154 return region;
1157 /* Release the memory associated with the region tree rooted at REGION. */
1159 static void
1160 free_omp_region_1 (struct omp_region *region)
1162 struct omp_region *i, *n;
1164 for (i = region->inner; i ; i = n)
1166 n = i->next;
1167 free_omp_region_1 (i);
1170 free (region);
1173 /* Release the memory for the entire omp region tree. */
1175 void
1176 free_omp_regions (void)
1178 struct omp_region *r, *n;
1179 for (r = root_omp_region; r ; r = n)
1181 n = r->next;
1182 free_omp_region_1 (r);
1184 root_omp_region = NULL;
1188 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190 static omp_context *
1191 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 omp_context *ctx = XCNEW (omp_context);
1195 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1196 (splay_tree_value) ctx);
1197 ctx->stmt = stmt;
1199 if (outer_ctx)
1201 ctx->outer = outer_ctx;
1202 ctx->cb = outer_ctx->cb;
1203 ctx->cb.block = NULL;
1204 ctx->depth = outer_ctx->depth + 1;
1206 else
1208 ctx->cb.src_fn = current_function_decl;
1209 ctx->cb.dst_fn = current_function_decl;
1210 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1211 gcc_checking_assert (ctx->cb.src_node);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1220 ctx->cb.decl_map = pointer_map_create ();
1222 return ctx;
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1227 /* Finalize task copyfn. */
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1264 cgraph_add_new_function (child_fn, false);
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1270 static void
1271 delete_omp_context (splay_tree_value value)
1273 omp_context *ctx = (omp_context *) value;
1275 pointer_map_destroy (ctx->cb.decl_map);
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1290 if (ctx->srecord_type)
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1300 XDELETE (ctx);
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1309 tree f, type = ctx->record_type;
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1320 tree name, new_fields = NULL;
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 DECL_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1364 bool by_ref;
1366 switch (OMP_CLAUSE_CODE (c))
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1416 else if (is_taskreg_ctx (ctx))
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1432 install_var_local (decl, ctx);
1433 break;
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1440 break;
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1446 case OMP_CLAUSE_FINAL:
1447 case OMP_CLAUSE_IF:
1448 case OMP_CLAUSE_NUM_THREADS:
1449 case OMP_CLAUSE_SCHEDULE:
1450 if (ctx->outer)
1451 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1452 break;
1454 case OMP_CLAUSE_NOWAIT:
1455 case OMP_CLAUSE_ORDERED:
1456 case OMP_CLAUSE_COLLAPSE:
1457 case OMP_CLAUSE_UNTIED:
1458 case OMP_CLAUSE_MERGEABLE:
1459 break;
1461 default:
1462 gcc_unreachable ();
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468 switch (OMP_CLAUSE_CODE (c))
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 case OMP_CLAUSE_FINAL:
1510 case OMP_CLAUSE_MERGEABLE:
1511 break;
1513 default:
1514 gcc_unreachable ();
1518 if (scan_array_reductions)
1519 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1520 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1521 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1526 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1527 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1528 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1531 /* Create a new name for omp child function. Returns an identifier. */
1533 static GTY(()) unsigned int tmp_ompfn_id_num;
1535 static tree
1536 create_omp_child_function_name (bool task_copy)
1538 return (clone_function_name (current_function_decl,
1539 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1542 /* Build a decl for the omp child function. It'll not contain a body
1543 yet, just the bare decl. */
1545 static void
1546 create_omp_child_function (omp_context *ctx, bool task_copy)
1548 tree decl, type, name, t;
1550 name = create_omp_child_function_name (task_copy);
1551 if (task_copy)
1552 type = build_function_type_list (void_type_node, ptr_type_node,
1553 ptr_type_node, NULL_TREE);
1554 else
1555 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1557 decl = build_decl (gimple_location (ctx->stmt),
1558 FUNCTION_DECL, name, type);
1560 if (!task_copy)
1561 ctx->cb.dst_fn = decl;
1562 else
1563 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1565 TREE_STATIC (decl) = 1;
1566 TREE_USED (decl) = 1;
1567 DECL_ARTIFICIAL (decl) = 1;
1568 DECL_NAMELESS (decl) = 1;
1569 DECL_IGNORED_P (decl) = 0;
1570 TREE_PUBLIC (decl) = 0;
1571 DECL_UNINLINABLE (decl) = 1;
1572 DECL_EXTERNAL (decl) = 0;
1573 DECL_CONTEXT (decl) = NULL_TREE;
1574 DECL_INITIAL (decl) = make_node (BLOCK);
1576 t = build_decl (DECL_SOURCE_LOCATION (decl),
1577 RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_CONTEXT (t) = decl;
1581 DECL_RESULT (decl) = t;
1583 t = build_decl (DECL_SOURCE_LOCATION (decl),
1584 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1585 DECL_ARTIFICIAL (t) = 1;
1586 DECL_NAMELESS (t) = 1;
1587 DECL_ARG_TYPE (t) = ptr_type_node;
1588 DECL_CONTEXT (t) = current_function_decl;
1589 TREE_USED (t) = 1;
1590 DECL_ARGUMENTS (decl) = t;
1591 if (!task_copy)
1592 ctx->receiver_decl = t;
1593 else
1595 t = build_decl (DECL_SOURCE_LOCATION (decl),
1596 PARM_DECL, get_identifier (".omp_data_o"),
1597 ptr_type_node);
1598 DECL_ARTIFICIAL (t) = 1;
1599 DECL_NAMELESS (t) = 1;
1600 DECL_ARG_TYPE (t) = ptr_type_node;
1601 DECL_CONTEXT (t) = current_function_decl;
1602 TREE_USED (t) = 1;
1603 TREE_ADDRESSABLE (t) = 1;
1604 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1605 DECL_ARGUMENTS (decl) = t;
1608 /* Allocate memory for the function structure. The call to
1609 allocate_struct_function clobbers CFUN, so we need to restore
1610 it afterward. */
1611 push_struct_function (decl);
1612 cfun->function_end_locus = gimple_location (ctx->stmt);
1613 pop_cfun ();
1617 /* Scan an OpenMP parallel directive. */
1619 static void
1620 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1622 omp_context *ctx;
1623 tree name;
1624 gimple stmt = gsi_stmt (*gsi);
1626 /* Ignore parallel directives with empty bodies, unless there
1627 are copyin clauses. */
1628 if (optimize > 0
1629 && empty_body_p (gimple_omp_body (stmt))
1630 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1631 OMP_CLAUSE_COPYIN) == NULL)
1633 gsi_replace (gsi, gimple_build_nop (), false);
1634 return;
1637 ctx = new_omp_context (stmt, outer_ctx);
1638 if (taskreg_nesting_level > 1)
1639 ctx->is_nested = true;
1640 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1641 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1642 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1643 name = create_tmp_var_name (".omp_data_s");
1644 name = build_decl (gimple_location (stmt),
1645 TYPE_DECL, name, ctx->record_type);
1646 DECL_ARTIFICIAL (name) = 1;
1647 DECL_NAMELESS (name) = 1;
1648 TYPE_NAME (ctx->record_type) = name;
1649 create_omp_child_function (ctx, false);
1650 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1652 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1653 scan_omp (gimple_omp_body (stmt), ctx);
1655 if (TYPE_FIELDS (ctx->record_type) == NULL)
1656 ctx->record_type = ctx->receiver_decl = NULL;
1657 else
1659 layout_type (ctx->record_type);
1660 fixup_child_record_type (ctx);
1664 /* Scan an OpenMP task directive. */
1666 static void
1667 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1669 omp_context *ctx;
1670 tree name, t;
1671 gimple stmt = gsi_stmt (*gsi);
1672 location_t loc = gimple_location (stmt);
1674 /* Ignore task directives with empty bodies. */
1675 if (optimize > 0
1676 && empty_body_p (gimple_omp_body (stmt)))
1678 gsi_replace (gsi, gimple_build_nop (), false);
1679 return;
1682 ctx = new_omp_context (stmt, outer_ctx);
1683 if (taskreg_nesting_level > 1)
1684 ctx->is_nested = true;
1685 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1686 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1687 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1688 name = create_tmp_var_name (".omp_data_s");
1689 name = build_decl (gimple_location (stmt),
1690 TYPE_DECL, name, ctx->record_type);
1691 DECL_ARTIFICIAL (name) = 1;
1692 DECL_NAMELESS (name) = 1;
1693 TYPE_NAME (ctx->record_type) = name;
1694 create_omp_child_function (ctx, false);
1695 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1697 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1699 if (ctx->srecord_type)
1701 name = create_tmp_var_name (".omp_data_a");
1702 name = build_decl (gimple_location (stmt),
1703 TYPE_DECL, name, ctx->srecord_type);
1704 DECL_ARTIFICIAL (name) = 1;
1705 DECL_NAMELESS (name) = 1;
1706 TYPE_NAME (ctx->srecord_type) = name;
1707 create_omp_child_function (ctx, true);
1710 scan_omp (gimple_omp_body (stmt), ctx);
1712 if (TYPE_FIELDS (ctx->record_type) == NULL)
1714 ctx->record_type = ctx->receiver_decl = NULL;
1715 t = build_int_cst (long_integer_type_node, 0);
1716 gimple_omp_task_set_arg_size (stmt, t);
1717 t = build_int_cst (long_integer_type_node, 1);
1718 gimple_omp_task_set_arg_align (stmt, t);
1720 else
1722 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1723 /* Move VLA fields to the end. */
1724 p = &TYPE_FIELDS (ctx->record_type);
1725 while (*p)
1726 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1727 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729 *q = *p;
1730 *p = TREE_CHAIN (*p);
1731 TREE_CHAIN (*q) = NULL_TREE;
1732 q = &TREE_CHAIN (*q);
1734 else
1735 p = &DECL_CHAIN (*p);
1736 *p = vla_fields;
1737 layout_type (ctx->record_type);
1738 fixup_child_record_type (ctx);
1739 if (ctx->srecord_type)
1740 layout_type (ctx->srecord_type);
1741 t = fold_convert_loc (loc, long_integer_type_node,
1742 TYPE_SIZE_UNIT (ctx->record_type));
1743 gimple_omp_task_set_arg_size (stmt, t);
1744 t = build_int_cst (long_integer_type_node,
1745 TYPE_ALIGN_UNIT (ctx->record_type));
1746 gimple_omp_task_set_arg_align (stmt, t);
1751 /* Scan an OpenMP loop directive. */
1753 static void
1754 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1756 omp_context *ctx;
1757 size_t i;
1759 ctx = new_omp_context (stmt, outer_ctx);
1761 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1763 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1764 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1766 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1771 scan_omp (gimple_omp_body (stmt), ctx);
1774 /* Scan an OpenMP sections directive. */
1776 static void
1777 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1779 omp_context *ctx;
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1783 scan_omp (gimple_omp_body (stmt), ctx);
1786 /* Scan an OpenMP single directive. */
1788 static void
1789 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1791 omp_context *ctx;
1792 tree name;
1794 ctx = new_omp_context (stmt, outer_ctx);
1795 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1796 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1797 name = create_tmp_var_name (".omp_copy_s");
1798 name = build_decl (gimple_location (stmt),
1799 TYPE_DECL, name, ctx->record_type);
1800 TYPE_NAME (ctx->record_type) = name;
1802 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1803 scan_omp (gimple_omp_body (stmt), ctx);
1805 if (TYPE_FIELDS (ctx->record_type) == NULL)
1806 ctx->record_type = NULL;
1807 else
1808 layout_type (ctx->record_type);
1812 /* Check OpenMP nesting restrictions. */
1813 static void
1814 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1816 switch (gimple_code (stmt))
1818 case GIMPLE_OMP_FOR:
1819 case GIMPLE_OMP_SECTIONS:
1820 case GIMPLE_OMP_SINGLE:
1821 case GIMPLE_CALL:
1822 for (; ctx != NULL; ctx = ctx->outer)
1823 switch (gimple_code (ctx->stmt))
1825 case GIMPLE_OMP_FOR:
1826 case GIMPLE_OMP_SECTIONS:
1827 case GIMPLE_OMP_SINGLE:
1828 case GIMPLE_OMP_ORDERED:
1829 case GIMPLE_OMP_MASTER:
1830 case GIMPLE_OMP_TASK:
1831 if (is_gimple_call (stmt))
1833 warning (0, "barrier region may not be closely nested inside "
1834 "of work-sharing, critical, ordered, master or "
1835 "explicit task region");
1836 return;
1838 warning (0, "work-sharing region may not be closely nested inside "
1839 "of work-sharing, critical, ordered, master or explicit "
1840 "task region");
1841 return;
1842 case GIMPLE_OMP_PARALLEL:
1843 return;
1844 default:
1845 break;
1847 break;
1848 case GIMPLE_OMP_MASTER:
1849 for (; ctx != NULL; ctx = ctx->outer)
1850 switch (gimple_code (ctx->stmt))
1852 case GIMPLE_OMP_FOR:
1853 case GIMPLE_OMP_SECTIONS:
1854 case GIMPLE_OMP_SINGLE:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "master region may not be closely nested inside "
1857 "of work-sharing or explicit task region");
1858 return;
1859 case GIMPLE_OMP_PARALLEL:
1860 return;
1861 default:
1862 break;
1864 break;
1865 case GIMPLE_OMP_ORDERED:
1866 for (; ctx != NULL; ctx = ctx->outer)
1867 switch (gimple_code (ctx->stmt))
1869 case GIMPLE_OMP_CRITICAL:
1870 case GIMPLE_OMP_TASK:
1871 warning (0, "ordered region may not be closely nested inside "
1872 "of critical or explicit task region");
1873 return;
1874 case GIMPLE_OMP_FOR:
1875 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1876 OMP_CLAUSE_ORDERED) == NULL)
1877 warning (0, "ordered region must be closely nested inside "
1878 "a loop region with an ordered clause");
1879 return;
1880 case GIMPLE_OMP_PARALLEL:
1881 return;
1882 default:
1883 break;
1885 break;
1886 case GIMPLE_OMP_CRITICAL:
1887 for (; ctx != NULL; ctx = ctx->outer)
1888 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1889 && (gimple_omp_critical_name (stmt)
1890 == gimple_omp_critical_name (ctx->stmt)))
1892 warning (0, "critical region may not be nested inside a critical "
1893 "region with the same name");
1894 return;
1896 break;
1897 default:
1898 break;
1903 /* Helper function scan_omp.
1905 Callback for walk_tree or operators in walk_gimple_stmt used to
1906 scan for OpenMP directives in TP. */
1908 static tree
1909 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1911 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1912 omp_context *ctx = (omp_context *) wi->info;
1913 tree t = *tp;
1915 switch (TREE_CODE (t))
1917 case VAR_DECL:
1918 case PARM_DECL:
1919 case LABEL_DECL:
1920 case RESULT_DECL:
1921 if (ctx)
1922 *tp = remap_decl (t, &ctx->cb);
1923 break;
1925 default:
1926 if (ctx && TYPE_P (t))
1927 *tp = remap_type (t, &ctx->cb);
1928 else if (!DECL_P (t))
1930 *walk_subtrees = 1;
1931 if (ctx)
1933 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1934 if (tem != TREE_TYPE (t))
1936 if (TREE_CODE (t) == INTEGER_CST)
1937 *tp = build_int_cst_wide (tem,
1938 TREE_INT_CST_LOW (t),
1939 TREE_INT_CST_HIGH (t));
1940 else
1941 TREE_TYPE (t) = tem;
1945 break;
1948 return NULL_TREE;
1952 /* Helper function for scan_omp.
1954 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1955 the current statement in GSI. */
1957 static tree
1958 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1959 struct walk_stmt_info *wi)
1961 gimple stmt = gsi_stmt (*gsi);
1962 omp_context *ctx = (omp_context *) wi->info;
1964 if (gimple_has_location (stmt))
1965 input_location = gimple_location (stmt);
1967 /* Check the OpenMP nesting restrictions. */
1968 if (ctx != NULL)
1970 if (is_gimple_omp (stmt))
1971 check_omp_nesting_restrictions (stmt, ctx);
1972 else if (is_gimple_call (stmt))
1974 tree fndecl = gimple_call_fndecl (stmt);
1975 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1976 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1977 check_omp_nesting_restrictions (stmt, ctx);
1981 *handled_ops_p = true;
1983 switch (gimple_code (stmt))
1985 case GIMPLE_OMP_PARALLEL:
1986 taskreg_nesting_level++;
1987 scan_omp_parallel (gsi, ctx);
1988 taskreg_nesting_level--;
1989 break;
1991 case GIMPLE_OMP_TASK:
1992 taskreg_nesting_level++;
1993 scan_omp_task (gsi, ctx);
1994 taskreg_nesting_level--;
1995 break;
1997 case GIMPLE_OMP_FOR:
1998 scan_omp_for (stmt, ctx);
1999 break;
2001 case GIMPLE_OMP_SECTIONS:
2002 scan_omp_sections (stmt, ctx);
2003 break;
2005 case GIMPLE_OMP_SINGLE:
2006 scan_omp_single (stmt, ctx);
2007 break;
2009 case GIMPLE_OMP_SECTION:
2010 case GIMPLE_OMP_MASTER:
2011 case GIMPLE_OMP_ORDERED:
2012 case GIMPLE_OMP_CRITICAL:
2013 ctx = new_omp_context (stmt, ctx);
2014 scan_omp (gimple_omp_body (stmt), ctx);
2015 break;
2017 case GIMPLE_BIND:
2019 tree var;
2021 *handled_ops_p = false;
2022 if (ctx)
2023 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2024 insert_decl_map (&ctx->cb, var, var);
2026 break;
2027 default:
2028 *handled_ops_p = false;
2029 break;
2032 return NULL_TREE;
2036 /* Scan all the statements starting at the current statement. CTX
2037 contains context information about the OpenMP directives and
2038 clauses found during the scan. */
2040 static void
2041 scan_omp (gimple_seq body, omp_context *ctx)
2043 location_t saved_location;
2044 struct walk_stmt_info wi;
2046 memset (&wi, 0, sizeof (wi));
2047 wi.info = ctx;
2048 wi.want_locations = true;
2050 saved_location = input_location;
2051 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2052 input_location = saved_location;
2055 /* Re-gimplification and code generation routines. */
2057 /* Build a call to GOMP_barrier. */
2059 static tree
2060 build_omp_barrier (void)
2062 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2065 /* If a context was created for STMT when it was scanned, return it. */
2067 static omp_context *
2068 maybe_lookup_ctx (gimple stmt)
2070 splay_tree_node n;
2071 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2072 return n ? (omp_context *) n->value : NULL;
2076 /* Find the mapping for DECL in CTX or the immediately enclosing
2077 context that has a mapping for DECL.
2079 If CTX is a nested parallel directive, we may have to use the decl
2080 mappings created in CTX's parent context. Suppose that we have the
2081 following parallel nesting (variable UIDs showed for clarity):
2083 iD.1562 = 0;
2084 #omp parallel shared(iD.1562) -> outer parallel
2085 iD.1562 = iD.1562 + 1;
2087 #omp parallel shared (iD.1562) -> inner parallel
2088 iD.1562 = iD.1562 - 1;
2090 Each parallel structure will create a distinct .omp_data_s structure
2091 for copying iD.1562 in/out of the directive:
2093 outer parallel .omp_data_s.1.i -> iD.1562
2094 inner parallel .omp_data_s.2.i -> iD.1562
2096 A shared variable mapping will produce a copy-out operation before
2097 the parallel directive and a copy-in operation after it. So, in
2098 this case we would have:
2100 iD.1562 = 0;
2101 .omp_data_o.1.i = iD.1562;
2102 #omp parallel shared(iD.1562) -> outer parallel
2103 .omp_data_i.1 = &.omp_data_o.1
2104 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106 .omp_data_o.2.i = iD.1562; -> **
2107 #omp parallel shared(iD.1562) -> inner parallel
2108 .omp_data_i.2 = &.omp_data_o.2
2109 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2112 ** This is a problem. The symbol iD.1562 cannot be referenced
2113 inside the body of the outer parallel region. But since we are
2114 emitting this copy operation while expanding the inner parallel
2115 directive, we need to access the CTX structure of the outer
2116 parallel directive to get the correct mapping:
2118 .omp_data_o.2.i = .omp_data_i.1->i
2120 Since there may be other workshare or parallel directives enclosing
2121 the parallel directive, it may be necessary to walk up the context
2122 parent chain. This is not a problem in general because nested
2123 parallelism happens only rarely. */
2125 static tree
2126 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2128 tree t;
2129 omp_context *up;
2131 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2132 t = maybe_lookup_decl (decl, up);
2134 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2136 return t ? t : decl;
2140 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2141 in outer contexts. */
2143 static tree
2144 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2146 tree t = NULL;
2147 omp_context *up;
2149 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2150 t = maybe_lookup_decl (decl, up);
2152 return t ? t : decl;
2156 /* Construct the initialization value for reduction CLAUSE. */
2158 tree
2159 omp_reduction_init (tree clause, tree type)
2161 location_t loc = OMP_CLAUSE_LOCATION (clause);
2162 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2164 case PLUS_EXPR:
2165 case MINUS_EXPR:
2166 case BIT_IOR_EXPR:
2167 case BIT_XOR_EXPR:
2168 case TRUTH_OR_EXPR:
2169 case TRUTH_ORIF_EXPR:
2170 case TRUTH_XOR_EXPR:
2171 case NE_EXPR:
2172 return build_zero_cst (type);
2174 case MULT_EXPR:
2175 case TRUTH_AND_EXPR:
2176 case TRUTH_ANDIF_EXPR:
2177 case EQ_EXPR:
2178 return fold_convert_loc (loc, type, integer_one_node);
2180 case BIT_AND_EXPR:
2181 return fold_convert_loc (loc, type, integer_minus_one_node);
2183 case MAX_EXPR:
2184 if (SCALAR_FLOAT_TYPE_P (type))
2186 REAL_VALUE_TYPE max, min;
2187 if (HONOR_INFINITIES (TYPE_MODE (type)))
2189 real_inf (&max);
2190 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2192 else
2193 real_maxval (&min, 1, TYPE_MODE (type));
2194 return build_real (type, min);
2196 else
2198 gcc_assert (INTEGRAL_TYPE_P (type));
2199 return TYPE_MIN_VALUE (type);
2202 case MIN_EXPR:
2203 if (SCALAR_FLOAT_TYPE_P (type))
2205 REAL_VALUE_TYPE max;
2206 if (HONOR_INFINITIES (TYPE_MODE (type)))
2207 real_inf (&max);
2208 else
2209 real_maxval (&max, 0, TYPE_MODE (type));
2210 return build_real (type, max);
2212 else
2214 gcc_assert (INTEGRAL_TYPE_P (type));
2215 return TYPE_MAX_VALUE (type);
2218 default:
2219 gcc_unreachable ();
2223 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2224 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2225 private variables. Initialization statements go in ILIST, while calls
2226 to destructors go in DLIST. */
2228 static void
2229 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2230 omp_context *ctx)
2232 gimple_stmt_iterator diter;
2233 tree c, dtor, copyin_seq, x, ptr;
2234 bool copyin_by_ref = false;
2235 bool lastprivate_firstprivate = false;
2236 int pass;
2238 *dlist = gimple_seq_alloc ();
2239 diter = gsi_start (*dlist);
2240 copyin_seq = NULL;
2242 /* Do all the fixed sized types in the first pass, and the variable sized
2243 types in the second pass. This makes sure that the scalar arguments to
2244 the variable sized types are processed before we use them in the
2245 variable sized operations. */
2246 for (pass = 0; pass < 2; ++pass)
2248 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2250 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2251 tree var, new_var;
2252 bool by_ref;
2253 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2255 switch (c_kind)
2257 case OMP_CLAUSE_PRIVATE:
2258 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2259 continue;
2260 break;
2261 case OMP_CLAUSE_SHARED:
2262 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2264 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2265 continue;
2267 case OMP_CLAUSE_FIRSTPRIVATE:
2268 case OMP_CLAUSE_COPYIN:
2269 case OMP_CLAUSE_REDUCTION:
2270 break;
2271 case OMP_CLAUSE_LASTPRIVATE:
2272 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2274 lastprivate_firstprivate = true;
2275 if (pass != 0)
2276 continue;
2278 break;
2279 default:
2280 continue;
2283 new_var = var = OMP_CLAUSE_DECL (c);
2284 if (c_kind != OMP_CLAUSE_COPYIN)
2285 new_var = lookup_decl (var, ctx);
2287 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2289 if (pass != 0)
2290 continue;
2292 else if (is_variable_sized (var))
2294 /* For variable sized types, we need to allocate the
2295 actual storage here. Call alloca and store the
2296 result in the pointer decl that we created elsewhere. */
2297 if (pass == 0)
2298 continue;
2300 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2302 gimple stmt;
2303 tree tmp, atmp;
2305 ptr = DECL_VALUE_EXPR (new_var);
2306 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2307 ptr = TREE_OPERAND (ptr, 0);
2308 gcc_assert (DECL_P (ptr));
2309 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2311 /* void *tmp = __builtin_alloca */
2312 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2313 stmt = gimple_build_call (atmp, 1, x);
2314 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2315 gimple_add_tmp_var (tmp);
2316 gimple_call_set_lhs (stmt, tmp);
2318 gimple_seq_add_stmt (ilist, stmt);
2320 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2321 gimplify_assign (ptr, x, ilist);
2324 else if (is_reference (var))
2326 /* For references that are being privatized for Fortran,
2327 allocate new backing storage for the new pointer
2328 variable. This allows us to avoid changing all the
2329 code that expects a pointer to something that expects
2330 a direct variable. Note that this doesn't apply to
2331 C++, since reference types are disallowed in data
2332 sharing clauses there, except for NRV optimized
2333 return values. */
2334 if (pass == 0)
2335 continue;
2337 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2338 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2340 x = build_receiver_ref (var, false, ctx);
2341 x = build_fold_addr_expr_loc (clause_loc, x);
2343 else if (TREE_CONSTANT (x))
2345 const char *name = NULL;
2346 if (DECL_NAME (var))
2347 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2349 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2350 name);
2351 gimple_add_tmp_var (x);
2352 TREE_ADDRESSABLE (x) = 1;
2353 x = build_fold_addr_expr_loc (clause_loc, x);
2355 else
2357 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2358 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2361 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2362 gimplify_assign (new_var, x, ilist);
2364 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2366 else if (c_kind == OMP_CLAUSE_REDUCTION
2367 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2369 if (pass == 0)
2370 continue;
2372 else if (pass != 0)
2373 continue;
2375 switch (OMP_CLAUSE_CODE (c))
2377 case OMP_CLAUSE_SHARED:
2378 /* Shared global vars are just accessed directly. */
2379 if (is_global_var (new_var))
2380 break;
2381 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2382 needs to be delayed until after fixup_child_record_type so
2383 that we get the correct type during the dereference. */
2384 by_ref = use_pointer_for_field (var, ctx);
2385 x = build_receiver_ref (var, by_ref, ctx);
2386 SET_DECL_VALUE_EXPR (new_var, x);
2387 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2389 /* ??? If VAR is not passed by reference, and the variable
2390 hasn't been initialized yet, then we'll get a warning for
2391 the store into the omp_data_s structure. Ideally, we'd be
2392 able to notice this and not store anything at all, but
2393 we're generating code too early. Suppress the warning. */
2394 if (!by_ref)
2395 TREE_NO_WARNING (var) = 1;
2396 break;
2398 case OMP_CLAUSE_LASTPRIVATE:
2399 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2400 break;
2401 /* FALLTHRU */
2403 case OMP_CLAUSE_PRIVATE:
2404 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2405 x = build_outer_var_ref (var, ctx);
2406 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2408 if (is_task_ctx (ctx))
2409 x = build_receiver_ref (var, false, ctx);
2410 else
2411 x = build_outer_var_ref (var, ctx);
2413 else
2414 x = NULL;
2415 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2416 if (x)
2417 gimplify_and_add (x, ilist);
2418 /* FALLTHRU */
2420 do_dtor:
2421 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2422 if (x)
2424 gimple_seq tseq = NULL;
2426 dtor = x;
2427 gimplify_stmt (&dtor, &tseq);
2428 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2430 break;
2432 case OMP_CLAUSE_FIRSTPRIVATE:
2433 if (is_task_ctx (ctx))
2435 if (is_reference (var) || is_variable_sized (var))
2436 goto do_dtor;
2437 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2438 ctx))
2439 || use_pointer_for_field (var, NULL))
2441 x = build_receiver_ref (var, false, ctx);
2442 SET_DECL_VALUE_EXPR (new_var, x);
2443 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2444 goto do_dtor;
2447 x = build_outer_var_ref (var, ctx);
2448 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2449 gimplify_and_add (x, ilist);
2450 goto do_dtor;
2451 break;
2453 case OMP_CLAUSE_COPYIN:
2454 by_ref = use_pointer_for_field (var, NULL);
2455 x = build_receiver_ref (var, by_ref, ctx);
2456 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2457 append_to_statement_list (x, &copyin_seq);
2458 copyin_by_ref |= by_ref;
2459 break;
2461 case OMP_CLAUSE_REDUCTION:
2462 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2464 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2465 x = build_outer_var_ref (var, ctx);
2467 if (is_reference (var))
2468 x = build_fold_addr_expr_loc (clause_loc, x);
2469 SET_DECL_VALUE_EXPR (placeholder, x);
2470 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2471 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2472 gimple_seq_add_seq (ilist,
2473 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2475 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2477 else
2479 x = omp_reduction_init (c, TREE_TYPE (new_var));
2480 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2481 gimplify_assign (new_var, x, ilist);
2483 break;
2485 default:
2486 gcc_unreachable ();
2491 /* The copyin sequence is not to be executed by the main thread, since
2492 that would result in self-copies. Perhaps not visible to scalars,
2493 but it certainly is to C++ operator=. */
2494 if (copyin_seq)
2496 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2498 x = build2 (NE_EXPR, boolean_type_node, x,
2499 build_int_cst (TREE_TYPE (x), 0));
2500 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2501 gimplify_and_add (x, ilist);
2504 /* If any copyin variable is passed by reference, we must ensure the
2505 master thread doesn't modify it before it is copied over in all
2506 threads. Similarly for variables in both firstprivate and
2507 lastprivate clauses we need to ensure the lastprivate copying
2508 happens after firstprivate copying in all threads. */
2509 if (copyin_by_ref || lastprivate_firstprivate)
2510 gimplify_and_add (build_omp_barrier (), ilist);
2514 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2515 both parallel and workshare constructs. PREDICATE may be NULL if it's
2516 always true. */
2518 static void
2519 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2520 omp_context *ctx)
2522 tree x, c, label = NULL;
2523 bool par_clauses = false;
2525 /* Early exit if there are no lastprivate clauses. */
2526 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2527 if (clauses == NULL)
2529 /* If this was a workshare clause, see if it had been combined
2530 with its parallel. In that case, look for the clauses on the
2531 parallel statement itself. */
2532 if (is_parallel_ctx (ctx))
2533 return;
2535 ctx = ctx->outer;
2536 if (ctx == NULL || !is_parallel_ctx (ctx))
2537 return;
2539 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2540 OMP_CLAUSE_LASTPRIVATE);
2541 if (clauses == NULL)
2542 return;
2543 par_clauses = true;
2546 if (predicate)
2548 gimple stmt;
2549 tree label_true, arm1, arm2;
2551 label = create_artificial_label (UNKNOWN_LOCATION);
2552 label_true = create_artificial_label (UNKNOWN_LOCATION);
2553 arm1 = TREE_OPERAND (predicate, 0);
2554 arm2 = TREE_OPERAND (predicate, 1);
2555 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2556 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2557 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2558 label_true, label);
2559 gimple_seq_add_stmt (stmt_list, stmt);
2560 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2563 for (c = clauses; c ;)
2565 tree var, new_var;
2566 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2568 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2570 var = OMP_CLAUSE_DECL (c);
2571 new_var = lookup_decl (var, ctx);
2573 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2575 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2576 gimple_seq_add_seq (stmt_list,
2577 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2579 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2581 x = build_outer_var_ref (var, ctx);
2582 if (is_reference (var))
2583 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2584 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2585 gimplify_and_add (x, stmt_list);
2587 c = OMP_CLAUSE_CHAIN (c);
2588 if (c == NULL && !par_clauses)
2590 /* If this was a workshare clause, see if it had been combined
2591 with its parallel. In that case, continue looking for the
2592 clauses also on the parallel statement itself. */
2593 if (is_parallel_ctx (ctx))
2594 break;
2596 ctx = ctx->outer;
2597 if (ctx == NULL || !is_parallel_ctx (ctx))
2598 break;
2600 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2601 OMP_CLAUSE_LASTPRIVATE);
2602 par_clauses = true;
2606 if (label)
2607 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2611 /* Generate code to implement the REDUCTION clauses. */
2613 static void
2614 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2616 gimple_seq sub_seq = NULL;
2617 gimple stmt;
2618 tree x, c;
2619 int count = 0;
2621 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2622 update in that case, otherwise use a lock. */
2623 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2624 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2626 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2628 /* Never use OMP_ATOMIC for array reductions. */
2629 count = -1;
2630 break;
2632 count++;
2635 if (count == 0)
2636 return;
2638 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2640 tree var, ref, new_var;
2641 enum tree_code code;
2642 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2644 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2645 continue;
2647 var = OMP_CLAUSE_DECL (c);
2648 new_var = lookup_decl (var, ctx);
2649 if (is_reference (var))
2650 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 code = OMP_CLAUSE_REDUCTION_CODE (c);
2654 /* reduction(-:var) sums up the partial results, so it acts
2655 identically to reduction(+:var). */
2656 if (code == MINUS_EXPR)
2657 code = PLUS_EXPR;
2659 if (count == 1)
2661 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2663 addr = save_expr (addr);
2664 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2665 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2666 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2667 gimplify_and_add (x, stmt_seqp);
2668 return;
2671 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2673 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2675 if (is_reference (var))
2676 ref = build_fold_addr_expr_loc (clause_loc, ref);
2677 SET_DECL_VALUE_EXPR (placeholder, ref);
2678 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2679 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2680 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2681 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2682 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2684 else
2686 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2687 ref = build_outer_var_ref (var, ctx);
2688 gimplify_assign (ref, x, &sub_seq);
2692 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2694 gimple_seq_add_stmt (stmt_seqp, stmt);
2696 gimple_seq_add_seq (stmt_seqp, sub_seq);
2698 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2700 gimple_seq_add_stmt (stmt_seqp, stmt);
2704 /* Generate code to implement the COPYPRIVATE clauses. */
2706 static void
2707 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2708 omp_context *ctx)
2710 tree c;
2712 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2714 tree var, new_var, ref, x;
2715 bool by_ref;
2716 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2718 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2719 continue;
2721 var = OMP_CLAUSE_DECL (c);
2722 by_ref = use_pointer_for_field (var, NULL);
2724 ref = build_sender_ref (var, ctx);
2725 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2726 if (by_ref)
2728 x = build_fold_addr_expr_loc (clause_loc, new_var);
2729 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2731 gimplify_assign (ref, x, slist);
2733 ref = build_receiver_ref (var, false, ctx);
2734 if (by_ref)
2736 ref = fold_convert_loc (clause_loc,
2737 build_pointer_type (TREE_TYPE (new_var)),
2738 ref);
2739 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2741 if (is_reference (var))
2743 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2744 ref = build_simple_mem_ref_loc (clause_loc, ref);
2745 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2747 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2748 gimplify_and_add (x, rlist);
2753 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2754 and REDUCTION from the sender (aka parent) side. */
2756 static void
2757 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2758 omp_context *ctx)
2760 tree c;
2762 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2764 tree val, ref, x, var;
2765 bool by_ref, do_in = false, do_out = false;
2766 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2768 switch (OMP_CLAUSE_CODE (c))
2770 case OMP_CLAUSE_PRIVATE:
2771 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2772 break;
2773 continue;
2774 case OMP_CLAUSE_FIRSTPRIVATE:
2775 case OMP_CLAUSE_COPYIN:
2776 case OMP_CLAUSE_LASTPRIVATE:
2777 case OMP_CLAUSE_REDUCTION:
2778 break;
2779 default:
2780 continue;
2783 val = OMP_CLAUSE_DECL (c);
2784 var = lookup_decl_in_outer_ctx (val, ctx);
2786 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2787 && is_global_var (var))
2788 continue;
2789 if (is_variable_sized (val))
2790 continue;
2791 by_ref = use_pointer_for_field (val, NULL);
2793 switch (OMP_CLAUSE_CODE (c))
2795 case OMP_CLAUSE_PRIVATE:
2796 case OMP_CLAUSE_FIRSTPRIVATE:
2797 case OMP_CLAUSE_COPYIN:
2798 do_in = true;
2799 break;
2801 case OMP_CLAUSE_LASTPRIVATE:
2802 if (by_ref || is_reference (val))
2804 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2805 continue;
2806 do_in = true;
2808 else
2810 do_out = true;
2811 if (lang_hooks.decls.omp_private_outer_ref (val))
2812 do_in = true;
2814 break;
2816 case OMP_CLAUSE_REDUCTION:
2817 do_in = true;
2818 do_out = !(by_ref || is_reference (val));
2819 break;
2821 default:
2822 gcc_unreachable ();
2825 if (do_in)
2827 ref = build_sender_ref (val, ctx);
2828 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2829 gimplify_assign (ref, x, ilist);
2830 if (is_task_ctx (ctx))
2831 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2834 if (do_out)
2836 ref = build_sender_ref (val, ctx);
2837 gimplify_assign (var, ref, olist);
2842 /* Generate code to implement SHARED from the sender (aka parent)
2843 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2844 list things that got automatically shared. */
2846 static void
2847 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2849 tree var, ovar, nvar, f, x, record_type;
2851 if (ctx->record_type == NULL)
2852 return;
2854 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2855 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2857 ovar = DECL_ABSTRACT_ORIGIN (f);
2858 nvar = maybe_lookup_decl (ovar, ctx);
2859 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2860 continue;
2862 /* If CTX is a nested parallel directive. Find the immediately
2863 enclosing parallel or workshare construct that contains a
2864 mapping for OVAR. */
2865 var = lookup_decl_in_outer_ctx (ovar, ctx);
2867 if (use_pointer_for_field (ovar, ctx))
2869 x = build_sender_ref (ovar, ctx);
2870 var = build_fold_addr_expr (var);
2871 gimplify_assign (x, var, ilist);
2873 else
2875 x = build_sender_ref (ovar, ctx);
2876 gimplify_assign (x, var, ilist);
2878 if (!TREE_READONLY (var)
2879 /* We don't need to receive a new reference to a result
2880 or parm decl. In fact we may not store to it as we will
2881 invalidate any pending RSO and generate wrong gimple
2882 during inlining. */
2883 && !((TREE_CODE (var) == RESULT_DECL
2884 || TREE_CODE (var) == PARM_DECL)
2885 && DECL_BY_REFERENCE (var)))
2887 x = build_sender_ref (ovar, ctx);
2888 gimplify_assign (var, x, olist);
2895 /* A convenience function to build an empty GIMPLE_COND with just the
2896 condition. */
2898 static gimple
2899 gimple_build_cond_empty (tree cond)
2901 enum tree_code pred_code;
2902 tree lhs, rhs;
2904 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2905 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2909 /* Build the function calls to GOMP_parallel_start etc to actually
2910 generate the parallel operation. REGION is the parallel region
2911 being expanded. BB is the block where to insert the code. WS_ARGS
2912 will be set if this is a call to a combined parallel+workshare
2913 construct, it contains the list of additional arguments needed by
2914 the workshare construct. */
2916 static void
2917 expand_parallel_call (struct omp_region *region, basic_block bb,
2918 gimple entry_stmt, VEC(tree,gc) *ws_args)
2920 tree t, t1, t2, val, cond, c, clauses;
2921 gimple_stmt_iterator gsi;
2922 gimple stmt;
2923 enum built_in_function start_ix;
2924 int start_ix2;
2925 location_t clause_loc;
2926 VEC(tree,gc) *args;
2928 clauses = gimple_omp_parallel_clauses (entry_stmt);
2930 /* Determine what flavor of GOMP_parallel_start we will be
2931 emitting. */
2932 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2933 if (is_combined_parallel (region))
2935 switch (region->inner->type)
2937 case GIMPLE_OMP_FOR:
2938 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2939 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2940 + (region->inner->sched_kind
2941 == OMP_CLAUSE_SCHEDULE_RUNTIME
2942 ? 3 : region->inner->sched_kind));
2943 start_ix = (enum built_in_function)start_ix2;
2944 break;
2945 case GIMPLE_OMP_SECTIONS:
2946 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2947 break;
2948 default:
2949 gcc_unreachable ();
2953 /* By default, the value of NUM_THREADS is zero (selected at run time)
2954 and there is no conditional. */
2955 cond = NULL_TREE;
2956 val = build_int_cst (unsigned_type_node, 0);
2958 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2959 if (c)
2960 cond = OMP_CLAUSE_IF_EXPR (c);
2962 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2963 if (c)
2965 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2966 clause_loc = OMP_CLAUSE_LOCATION (c);
2968 else
2969 clause_loc = gimple_location (entry_stmt);
2971 /* Ensure 'val' is of the correct type. */
2972 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2974 /* If we found the clause 'if (cond)', build either
2975 (cond != 0) or (cond ? val : 1u). */
2976 if (cond)
2978 gimple_stmt_iterator gsi;
2980 cond = gimple_boolify (cond);
2982 if (integer_zerop (val))
2983 val = fold_build2_loc (clause_loc,
2984 EQ_EXPR, unsigned_type_node, cond,
2985 build_int_cst (TREE_TYPE (cond), 0));
2986 else
2988 basic_block cond_bb, then_bb, else_bb;
2989 edge e, e_then, e_else;
2990 tree tmp_then, tmp_else, tmp_join, tmp_var;
2992 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2993 if (gimple_in_ssa_p (cfun))
2995 tmp_then = make_ssa_name (tmp_var, NULL);
2996 tmp_else = make_ssa_name (tmp_var, NULL);
2997 tmp_join = make_ssa_name (tmp_var, NULL);
2999 else
3001 tmp_then = tmp_var;
3002 tmp_else = tmp_var;
3003 tmp_join = tmp_var;
3006 e = split_block (bb, NULL);
3007 cond_bb = e->src;
3008 bb = e->dest;
3009 remove_edge (e);
3011 then_bb = create_empty_bb (cond_bb);
3012 else_bb = create_empty_bb (then_bb);
3013 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3014 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3016 stmt = gimple_build_cond_empty (cond);
3017 gsi = gsi_start_bb (cond_bb);
3018 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3020 gsi = gsi_start_bb (then_bb);
3021 stmt = gimple_build_assign (tmp_then, val);
3022 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3024 gsi = gsi_start_bb (else_bb);
3025 stmt = gimple_build_assign
3026 (tmp_else, build_int_cst (unsigned_type_node, 1));
3027 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3029 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3030 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3031 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3032 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3034 if (gimple_in_ssa_p (cfun))
3036 gimple phi = create_phi_node (tmp_join, bb);
3037 SSA_NAME_DEF_STMT (tmp_join) = phi;
3038 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3039 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3042 val = tmp_join;
3045 gsi = gsi_start_bb (bb);
3046 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3047 false, GSI_CONTINUE_LINKING);
3050 gsi = gsi_last_bb (bb);
3051 t = gimple_omp_parallel_data_arg (entry_stmt);
3052 if (t == NULL)
3053 t1 = null_pointer_node;
3054 else
3055 t1 = build_fold_addr_expr (t);
3056 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3058 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3059 VEC_quick_push (tree, args, t2);
3060 VEC_quick_push (tree, args, t1);
3061 VEC_quick_push (tree, args, val);
3062 VEC_splice (tree, args, ws_args);
3064 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3065 builtin_decl_explicit (start_ix), args);
3067 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3068 false, GSI_CONTINUE_LINKING);
3070 t = gimple_omp_parallel_data_arg (entry_stmt);
3071 if (t == NULL)
3072 t = null_pointer_node;
3073 else
3074 t = build_fold_addr_expr (t);
3075 t = build_call_expr_loc (gimple_location (entry_stmt),
3076 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3077 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3078 false, GSI_CONTINUE_LINKING);
3080 t = build_call_expr_loc (gimple_location (entry_stmt),
3081 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3083 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3084 false, GSI_CONTINUE_LINKING);
3088 /* Build the function call to GOMP_task to actually
3089 generate the task operation. BB is the block where to insert the code. */
3091 static void
3092 expand_task_call (basic_block bb, gimple entry_stmt)
3094 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3095 gimple_stmt_iterator gsi;
3096 location_t loc = gimple_location (entry_stmt);
3098 clauses = gimple_omp_task_clauses (entry_stmt);
3100 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3101 if (c)
3102 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3103 else
3104 cond = boolean_true_node;
3106 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3107 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3108 flags = build_int_cst (unsigned_type_node,
3109 (c ? 1 : 0) + (c2 ? 4 : 0));
3111 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3112 if (c)
3114 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3115 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3116 build_int_cst (unsigned_type_node, 2),
3117 build_int_cst (unsigned_type_node, 0));
3118 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3121 gsi = gsi_last_bb (bb);
3122 t = gimple_omp_task_data_arg (entry_stmt);
3123 if (t == NULL)
3124 t2 = null_pointer_node;
3125 else
3126 t2 = build_fold_addr_expr_loc (loc, t);
3127 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3128 t = gimple_omp_task_copy_fn (entry_stmt);
3129 if (t == NULL)
3130 t3 = null_pointer_node;
3131 else
3132 t3 = build_fold_addr_expr_loc (loc, t);
3134 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3135 7, t1, t2, t3,
3136 gimple_omp_task_arg_size (entry_stmt),
3137 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3139 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3140 false, GSI_CONTINUE_LINKING);
3144 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3145 catch handler and return it. This prevents programs from violating the
3146 structured block semantics with throws. */
3148 static gimple_seq
3149 maybe_catch_exception (gimple_seq body)
3151 gimple g;
3152 tree decl;
3154 if (!flag_exceptions)
3155 return body;
3157 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3158 decl = lang_hooks.eh_protect_cleanup_actions ();
3159 else
3160 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3162 g = gimple_build_eh_must_not_throw (decl);
3163 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3164 GIMPLE_TRY_CATCH);
3166 return gimple_seq_alloc_with_stmt (g);
3169 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3171 static tree
3172 vec2chain (VEC(tree,gc) *v)
3174 tree chain = NULL_TREE, t;
3175 unsigned ix;
3177 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3179 DECL_CHAIN (t) = chain;
3180 chain = t;
3183 return chain;
3187 /* Remove barriers in REGION->EXIT's block. Note that this is only
3188 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3189 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3190 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3191 removed. */
3193 static void
3194 remove_exit_barrier (struct omp_region *region)
3196 gimple_stmt_iterator gsi;
3197 basic_block exit_bb;
3198 edge_iterator ei;
3199 edge e;
3200 gimple stmt;
3201 int any_addressable_vars = -1;
3203 exit_bb = region->exit;
3205 /* If the parallel region doesn't return, we don't have REGION->EXIT
3206 block at all. */
3207 if (! exit_bb)
3208 return;
3210 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3211 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3212 statements that can appear in between are extremely limited -- no
3213 memory operations at all. Here, we allow nothing at all, so the
3214 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3215 gsi = gsi_last_bb (exit_bb);
3216 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3217 gsi_prev (&gsi);
3218 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3219 return;
3221 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3223 gsi = gsi_last_bb (e->src);
3224 if (gsi_end_p (gsi))
3225 continue;
3226 stmt = gsi_stmt (gsi);
3227 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3228 && !gimple_omp_return_nowait_p (stmt))
3230 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3231 in many cases. If there could be tasks queued, the barrier
3232 might be needed to let the tasks run before some local
3233 variable of the parallel that the task uses as shared
3234 runs out of scope. The task can be spawned either
3235 from within current function (this would be easy to check)
3236 or from some function it calls and gets passed an address
3237 of such a variable. */
3238 if (any_addressable_vars < 0)
3240 gimple parallel_stmt = last_stmt (region->entry);
3241 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3242 tree local_decls, block, decl;
3243 unsigned ix;
3245 any_addressable_vars = 0;
3246 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3247 if (TREE_ADDRESSABLE (decl))
3249 any_addressable_vars = 1;
3250 break;
3252 for (block = gimple_block (stmt);
3253 !any_addressable_vars
3254 && block
3255 && TREE_CODE (block) == BLOCK;
3256 block = BLOCK_SUPERCONTEXT (block))
3258 for (local_decls = BLOCK_VARS (block);
3259 local_decls;
3260 local_decls = DECL_CHAIN (local_decls))
3261 if (TREE_ADDRESSABLE (local_decls))
3263 any_addressable_vars = 1;
3264 break;
3266 if (block == gimple_block (parallel_stmt))
3267 break;
3270 if (!any_addressable_vars)
3271 gimple_omp_return_set_nowait (stmt);
3276 static void
3277 remove_exit_barriers (struct omp_region *region)
3279 if (region->type == GIMPLE_OMP_PARALLEL)
3280 remove_exit_barrier (region);
3282 if (region->inner)
3284 region = region->inner;
3285 remove_exit_barriers (region);
3286 while (region->next)
3288 region = region->next;
3289 remove_exit_barriers (region);
3294 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3295 calls. These can't be declared as const functions, but
3296 within one parallel body they are constant, so they can be
3297 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3298 which are declared const. Similarly for task body, except
3299 that in untied task omp_get_thread_num () can change at any task
3300 scheduling point. */
3302 static void
3303 optimize_omp_library_calls (gimple entry_stmt)
3305 basic_block bb;
3306 gimple_stmt_iterator gsi;
3307 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3308 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3309 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3310 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3311 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3312 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3313 OMP_CLAUSE_UNTIED) != NULL);
3315 FOR_EACH_BB (bb)
3316 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3318 gimple call = gsi_stmt (gsi);
3319 tree decl;
3321 if (is_gimple_call (call)
3322 && (decl = gimple_call_fndecl (call))
3323 && DECL_EXTERNAL (decl)
3324 && TREE_PUBLIC (decl)
3325 && DECL_INITIAL (decl) == NULL)
3327 tree built_in;
3329 if (DECL_NAME (decl) == thr_num_id)
3331 /* In #pragma omp task untied omp_get_thread_num () can change
3332 during the execution of the task region. */
3333 if (untied_task)
3334 continue;
3335 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3337 else if (DECL_NAME (decl) == num_thr_id)
3338 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3339 else
3340 continue;
3342 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3343 || gimple_call_num_args (call) != 0)
3344 continue;
3346 if (flag_exceptions && !TREE_NOTHROW (decl))
3347 continue;
3349 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3350 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3351 TREE_TYPE (TREE_TYPE (built_in))))
3352 continue;
3354 gimple_call_set_fndecl (call, built_in);
3359 /* Expand the OpenMP parallel or task directive starting at REGION. */
3361 static void
3362 expand_omp_taskreg (struct omp_region *region)
3364 basic_block entry_bb, exit_bb, new_bb;
3365 struct function *child_cfun;
3366 tree child_fn, block, t;
3367 tree save_current;
3368 gimple_stmt_iterator gsi;
3369 gimple entry_stmt, stmt;
3370 edge e;
3371 VEC(tree,gc) *ws_args;
3373 entry_stmt = last_stmt (region->entry);
3374 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3375 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3376 /* If this function has been already instrumented, make sure
3377 the child function isn't instrumented again. */
3378 child_cfun->after_tree_profile = cfun->after_tree_profile;
3380 entry_bb = region->entry;
3381 exit_bb = region->exit;
3383 if (is_combined_parallel (region))
3384 ws_args = region->ws_args;
3385 else
3386 ws_args = NULL;
3388 if (child_cfun->cfg)
3390 /* Due to inlining, it may happen that we have already outlined
3391 the region, in which case all we need to do is make the
3392 sub-graph unreachable and emit the parallel call. */
3393 edge entry_succ_e, exit_succ_e;
3394 gimple_stmt_iterator gsi;
3396 entry_succ_e = single_succ_edge (entry_bb);
3398 gsi = gsi_last_bb (entry_bb);
3399 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3400 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3401 gsi_remove (&gsi, true);
3403 new_bb = entry_bb;
3404 if (exit_bb)
3406 exit_succ_e = single_succ_edge (exit_bb);
3407 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3409 remove_edge_and_dominated_blocks (entry_succ_e);
3411 else
3413 unsigned srcidx, dstidx, num;
3415 /* If the parallel region needs data sent from the parent
3416 function, then the very first statement (except possible
3417 tree profile counter updates) of the parallel body
3418 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3419 &.OMP_DATA_O is passed as an argument to the child function,
3420 we need to replace it with the argument as seen by the child
3421 function.
3423 In most cases, this will end up being the identity assignment
3424 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3425 a function call that has been inlined, the original PARM_DECL
3426 .OMP_DATA_I may have been converted into a different local
3427 variable. In which case, we need to keep the assignment. */
3428 if (gimple_omp_taskreg_data_arg (entry_stmt))
3430 basic_block entry_succ_bb = single_succ (entry_bb);
3431 gimple_stmt_iterator gsi;
3432 tree arg, narg;
3433 gimple parcopy_stmt = NULL;
3435 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3437 gimple stmt;
3439 gcc_assert (!gsi_end_p (gsi));
3440 stmt = gsi_stmt (gsi);
3441 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3442 continue;
3444 if (gimple_num_ops (stmt) == 2)
3446 tree arg = gimple_assign_rhs1 (stmt);
3448 /* We're ignore the subcode because we're
3449 effectively doing a STRIP_NOPS. */
3451 if (TREE_CODE (arg) == ADDR_EXPR
3452 && TREE_OPERAND (arg, 0)
3453 == gimple_omp_taskreg_data_arg (entry_stmt))
3455 parcopy_stmt = stmt;
3456 break;
3461 gcc_assert (parcopy_stmt != NULL);
3462 arg = DECL_ARGUMENTS (child_fn);
3464 if (!gimple_in_ssa_p (cfun))
3466 if (gimple_assign_lhs (parcopy_stmt) == arg)
3467 gsi_remove (&gsi, true);
3468 else
3470 /* ?? Is setting the subcode really necessary ?? */
3471 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3472 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3475 else
3477 /* If we are in ssa form, we must load the value from the default
3478 definition of the argument. That should not be defined now,
3479 since the argument is not used uninitialized. */
3480 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3481 narg = make_ssa_name (arg, gimple_build_nop ());
3482 set_default_def (arg, narg);
3483 /* ?? Is setting the subcode really necessary ?? */
3484 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3485 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3486 update_stmt (parcopy_stmt);
3490 /* Declare local variables needed in CHILD_CFUN. */
3491 block = DECL_INITIAL (child_fn);
3492 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3493 /* The gimplifier could record temporaries in parallel/task block
3494 rather than in containing function's local_decls chain,
3495 which would mean cgraph missed finalizing them. Do it now. */
3496 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3497 if (TREE_CODE (t) == VAR_DECL
3498 && TREE_STATIC (t)
3499 && !DECL_EXTERNAL (t))
3500 varpool_finalize_decl (t);
3501 DECL_SAVED_TREE (child_fn) = NULL;
3502 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3503 TREE_USED (block) = 1;
3505 /* Reset DECL_CONTEXT on function arguments. */
3506 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3507 DECL_CONTEXT (t) = child_fn;
3509 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3510 so that it can be moved to the child function. */
3511 gsi = gsi_last_bb (entry_bb);
3512 stmt = gsi_stmt (gsi);
3513 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3514 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3515 gsi_remove (&gsi, true);
3516 e = split_block (entry_bb, stmt);
3517 entry_bb = e->dest;
3518 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3520 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3521 if (exit_bb)
3523 gsi = gsi_last_bb (exit_bb);
3524 gcc_assert (!gsi_end_p (gsi)
3525 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3526 stmt = gimple_build_return (NULL);
3527 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3528 gsi_remove (&gsi, true);
3531 /* Move the parallel region into CHILD_CFUN. */
3533 if (gimple_in_ssa_p (cfun))
3535 push_cfun (child_cfun);
3536 init_tree_ssa (child_cfun);
3537 init_ssa_operands ();
3538 cfun->gimple_df->in_ssa_p = true;
3539 pop_cfun ();
3540 block = NULL_TREE;
3542 else
3543 block = gimple_block (entry_stmt);
3545 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3546 if (exit_bb)
3547 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3549 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3550 num = VEC_length (tree, child_cfun->local_decls);
3551 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3553 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3554 if (DECL_CONTEXT (t) == cfun->decl)
3555 continue;
3556 if (srcidx != dstidx)
3557 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3558 dstidx++;
3560 if (dstidx != num)
3561 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3563 /* Inform the callgraph about the new function. */
3564 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3565 = cfun->curr_properties;
3566 cgraph_add_new_function (child_fn, true);
3568 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3569 fixed in a following pass. */
3570 push_cfun (child_cfun);
3571 save_current = current_function_decl;
3572 current_function_decl = child_fn;
3573 if (optimize)
3574 optimize_omp_library_calls (entry_stmt);
3575 rebuild_cgraph_edges ();
3577 /* Some EH regions might become dead, see PR34608. If
3578 pass_cleanup_cfg isn't the first pass to happen with the
3579 new child, these dead EH edges might cause problems.
3580 Clean them up now. */
3581 if (flag_exceptions)
3583 basic_block bb;
3584 bool changed = false;
3586 FOR_EACH_BB (bb)
3587 changed |= gimple_purge_dead_eh_edges (bb);
3588 if (changed)
3589 cleanup_tree_cfg ();
3591 if (gimple_in_ssa_p (cfun))
3592 update_ssa (TODO_update_ssa);
3593 current_function_decl = save_current;
3594 pop_cfun ();
3597 /* Emit a library call to launch the children threads. */
3598 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3599 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3600 else
3601 expand_task_call (new_bb, entry_stmt);
3602 update_ssa (TODO_update_ssa_only_virtuals);
3606 /* A subroutine of expand_omp_for. Generate code for a parallel
3607 loop with any schedule. Given parameters:
3609 for (V = N1; V cond N2; V += STEP) BODY;
3611 where COND is "<" or ">", we generate pseudocode
3613 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3614 if (more) goto L0; else goto L3;
3616 V = istart0;
3617 iend = iend0;
3619 BODY;
3620 V += STEP;
3621 if (V cond iend) goto L1; else goto L2;
3623 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3626 If this is a combined omp parallel loop, instead of the call to
3627 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3629 For collapsed loops, given parameters:
3630 collapse(3)
3631 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3632 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3633 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3634 BODY;
3636 we generate pseudocode
3638 if (cond3 is <)
3639 adj = STEP3 - 1;
3640 else
3641 adj = STEP3 + 1;
3642 count3 = (adj + N32 - N31) / STEP3;
3643 if (cond2 is <)
3644 adj = STEP2 - 1;
3645 else
3646 adj = STEP2 + 1;
3647 count2 = (adj + N22 - N21) / STEP2;
3648 if (cond1 is <)
3649 adj = STEP1 - 1;
3650 else
3651 adj = STEP1 + 1;
3652 count1 = (adj + N12 - N11) / STEP1;
3653 count = count1 * count2 * count3;
3654 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3655 if (more) goto L0; else goto L3;
3657 V = istart0;
3658 T = V;
3659 V3 = N31 + (T % count3) * STEP3;
3660 T = T / count3;
3661 V2 = N21 + (T % count2) * STEP2;
3662 T = T / count2;
3663 V1 = N11 + T * STEP1;
3664 iend = iend0;
3666 BODY;
3667 V += 1;
3668 if (V < iend) goto L10; else goto L2;
3669 L10:
3670 V3 += STEP3;
3671 if (V3 cond3 N32) goto L1; else goto L11;
3672 L11:
3673 V3 = N31;
3674 V2 += STEP2;
3675 if (V2 cond2 N22) goto L1; else goto L12;
3676 L12:
3677 V2 = N21;
3678 V1 += STEP1;
3679 goto L1;
3681 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3686 static void
3687 expand_omp_for_generic (struct omp_region *region,
3688 struct omp_for_data *fd,
3689 enum built_in_function start_fn,
3690 enum built_in_function next_fn)
3692 tree type, istart0, iend0, iend;
3693 tree t, vmain, vback, bias = NULL_TREE;
3694 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3695 basic_block l2_bb = NULL, l3_bb = NULL;
3696 gimple_stmt_iterator gsi;
3697 gimple stmt;
3698 bool in_combined_parallel = is_combined_parallel (region);
3699 bool broken_loop = region->cont == NULL;
3700 edge e, ne;
3701 tree *counts = NULL;
3702 int i;
3704 gcc_assert (!broken_loop || !in_combined_parallel);
3705 gcc_assert (fd->iter_type == long_integer_type_node
3706 || !in_combined_parallel);
3708 type = TREE_TYPE (fd->loop.v);
3709 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3710 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3711 TREE_ADDRESSABLE (istart0) = 1;
3712 TREE_ADDRESSABLE (iend0) = 1;
3713 if (gimple_in_ssa_p (cfun))
3715 add_referenced_var (istart0);
3716 add_referenced_var (iend0);
3719 /* See if we need to bias by LLONG_MIN. */
3720 if (fd->iter_type == long_long_unsigned_type_node
3721 && TREE_CODE (type) == INTEGER_TYPE
3722 && !TYPE_UNSIGNED (type))
3724 tree n1, n2;
3726 if (fd->loop.cond_code == LT_EXPR)
3728 n1 = fd->loop.n1;
3729 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3731 else
3733 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3734 n2 = fd->loop.n1;
3736 if (TREE_CODE (n1) != INTEGER_CST
3737 || TREE_CODE (n2) != INTEGER_CST
3738 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3739 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3742 entry_bb = region->entry;
3743 cont_bb = region->cont;
3744 collapse_bb = NULL;
3745 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3746 gcc_assert (broken_loop
3747 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3748 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3749 l1_bb = single_succ (l0_bb);
3750 if (!broken_loop)
3752 l2_bb = create_empty_bb (cont_bb);
3753 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3754 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3756 else
3757 l2_bb = NULL;
3758 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3759 exit_bb = region->exit;
3761 gsi = gsi_last_bb (entry_bb);
3763 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3764 if (fd->collapse > 1)
3766 /* collapsed loops need work for expansion in SSA form. */
3767 gcc_assert (!gimple_in_ssa_p (cfun));
3768 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3769 for (i = 0; i < fd->collapse; i++)
3771 tree itype = TREE_TYPE (fd->loops[i].v);
3773 if (POINTER_TYPE_P (itype))
3774 itype = signed_type_for (itype);
3775 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3776 ? -1 : 1));
3777 t = fold_build2 (PLUS_EXPR, itype,
3778 fold_convert (itype, fd->loops[i].step), t);
3779 t = fold_build2 (PLUS_EXPR, itype, t,
3780 fold_convert (itype, fd->loops[i].n2));
3781 t = fold_build2 (MINUS_EXPR, itype, t,
3782 fold_convert (itype, fd->loops[i].n1));
3783 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3784 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3785 fold_build1 (NEGATE_EXPR, itype, t),
3786 fold_build1 (NEGATE_EXPR, itype,
3787 fold_convert (itype,
3788 fd->loops[i].step)));
3789 else
3790 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3791 fold_convert (itype, fd->loops[i].step));
3792 t = fold_convert (type, t);
3793 if (TREE_CODE (t) == INTEGER_CST)
3794 counts[i] = t;
3795 else
3797 counts[i] = create_tmp_var (type, ".count");
3798 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3799 true, GSI_SAME_STMT);
3800 stmt = gimple_build_assign (counts[i], t);
3801 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3803 if (SSA_VAR_P (fd->loop.n2))
3805 if (i == 0)
3806 t = counts[0];
3807 else
3809 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3810 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3811 true, GSI_SAME_STMT);
3813 stmt = gimple_build_assign (fd->loop.n2, t);
3814 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3818 if (in_combined_parallel)
3820 /* In a combined parallel loop, emit a call to
3821 GOMP_loop_foo_next. */
3822 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3823 build_fold_addr_expr (istart0),
3824 build_fold_addr_expr (iend0));
3826 else
3828 tree t0, t1, t2, t3, t4;
3829 /* If this is not a combined parallel loop, emit a call to
3830 GOMP_loop_foo_start in ENTRY_BB. */
3831 t4 = build_fold_addr_expr (iend0);
3832 t3 = build_fold_addr_expr (istart0);
3833 t2 = fold_convert (fd->iter_type, fd->loop.step);
3834 if (POINTER_TYPE_P (type)
3835 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3837 /* Avoid casting pointers to integer of a different size. */
3838 tree itype = signed_type_for (type);
3839 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3840 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3842 else
3844 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3845 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3847 if (bias)
3849 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3850 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3852 if (fd->iter_type == long_integer_type_node)
3854 if (fd->chunk_size)
3856 t = fold_convert (fd->iter_type, fd->chunk_size);
3857 t = build_call_expr (builtin_decl_explicit (start_fn),
3858 6, t0, t1, t2, t, t3, t4);
3860 else
3861 t = build_call_expr (builtin_decl_explicit (start_fn),
3862 5, t0, t1, t2, t3, t4);
3864 else
3866 tree t5;
3867 tree c_bool_type;
3868 tree bfn_decl;
3870 /* The GOMP_loop_ull_*start functions have additional boolean
3871 argument, true for < loops and false for > loops.
3872 In Fortran, the C bool type can be different from
3873 boolean_type_node. */
3874 bfn_decl = builtin_decl_explicit (start_fn);
3875 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3876 t5 = build_int_cst (c_bool_type,
3877 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3878 if (fd->chunk_size)
3880 tree bfn_decl = builtin_decl_explicit (start_fn);
3881 t = fold_convert (fd->iter_type, fd->chunk_size);
3882 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3884 else
3885 t = build_call_expr (builtin_decl_explicit (start_fn),
3886 6, t5, t0, t1, t2, t3, t4);
3889 if (TREE_TYPE (t) != boolean_type_node)
3890 t = fold_build2 (NE_EXPR, boolean_type_node,
3891 t, build_int_cst (TREE_TYPE (t), 0));
3892 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3893 true, GSI_SAME_STMT);
3894 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3896 /* Remove the GIMPLE_OMP_FOR statement. */
3897 gsi_remove (&gsi, true);
3899 /* Iteration setup for sequential loop goes in L0_BB. */
3900 gsi = gsi_start_bb (l0_bb);
3901 t = istart0;
3902 if (bias)
3903 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3904 if (POINTER_TYPE_P (type))
3905 t = fold_convert (signed_type_for (type), t);
3906 t = fold_convert (type, t);
3907 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3908 false, GSI_CONTINUE_LINKING);
3909 stmt = gimple_build_assign (fd->loop.v, t);
3910 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3912 t = iend0;
3913 if (bias)
3914 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3915 if (POINTER_TYPE_P (type))
3916 t = fold_convert (signed_type_for (type), t);
3917 t = fold_convert (type, t);
3918 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3919 false, GSI_CONTINUE_LINKING);
3920 if (fd->collapse > 1)
3922 tree tem = create_tmp_var (type, ".tem");
3924 stmt = gimple_build_assign (tem, fd->loop.v);
3925 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3926 for (i = fd->collapse - 1; i >= 0; i--)
3928 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3929 itype = vtype;
3930 if (POINTER_TYPE_P (vtype))
3931 itype = signed_type_for (vtype);
3932 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3933 t = fold_convert (itype, t);
3934 t = fold_build2 (MULT_EXPR, itype, t,
3935 fold_convert (itype, fd->loops[i].step));
3936 if (POINTER_TYPE_P (vtype))
3937 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3938 else
3939 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3940 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3941 false, GSI_CONTINUE_LINKING);
3942 stmt = gimple_build_assign (fd->loops[i].v, t);
3943 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3944 if (i != 0)
3946 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3947 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3948 false, GSI_CONTINUE_LINKING);
3949 stmt = gimple_build_assign (tem, t);
3950 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3955 if (!broken_loop)
3957 /* Code to control the increment and predicate for the sequential
3958 loop goes in the CONT_BB. */
3959 gsi = gsi_last_bb (cont_bb);
3960 stmt = gsi_stmt (gsi);
3961 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3962 vmain = gimple_omp_continue_control_use (stmt);
3963 vback = gimple_omp_continue_control_def (stmt);
3965 if (POINTER_TYPE_P (type))
3966 t = fold_build_pointer_plus (vmain, fd->loop.step);
3967 else
3968 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3969 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3970 true, GSI_SAME_STMT);
3971 stmt = gimple_build_assign (vback, t);
3972 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3974 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3975 stmt = gimple_build_cond_empty (t);
3976 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3978 /* Remove GIMPLE_OMP_CONTINUE. */
3979 gsi_remove (&gsi, true);
3981 if (fd->collapse > 1)
3983 basic_block last_bb, bb;
3985 last_bb = cont_bb;
3986 for (i = fd->collapse - 1; i >= 0; i--)
3988 tree vtype = TREE_TYPE (fd->loops[i].v);
3990 bb = create_empty_bb (last_bb);
3991 gsi = gsi_start_bb (bb);
3993 if (i < fd->collapse - 1)
3995 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3996 e->probability = REG_BR_PROB_BASE / 8;
3998 t = fd->loops[i + 1].n1;
3999 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4000 false, GSI_CONTINUE_LINKING);
4001 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4004 else
4005 collapse_bb = bb;
4007 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4009 if (POINTER_TYPE_P (vtype))
4010 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4011 else
4012 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4013 fd->loops[i].step);
4014 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4015 false, GSI_CONTINUE_LINKING);
4016 stmt = gimple_build_assign (fd->loops[i].v, t);
4017 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4019 if (i > 0)
4021 t = fd->loops[i].n2;
4022 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4023 false, GSI_CONTINUE_LINKING);
4024 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4025 fd->loops[i].v, t);
4026 stmt = gimple_build_cond_empty (t);
4027 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4028 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4029 e->probability = REG_BR_PROB_BASE * 7 / 8;
4031 else
4032 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4033 last_bb = bb;
4037 /* Emit code to get the next parallel iteration in L2_BB. */
4038 gsi = gsi_start_bb (l2_bb);
4040 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4041 build_fold_addr_expr (istart0),
4042 build_fold_addr_expr (iend0));
4043 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4044 false, GSI_CONTINUE_LINKING);
4045 if (TREE_TYPE (t) != boolean_type_node)
4046 t = fold_build2 (NE_EXPR, boolean_type_node,
4047 t, build_int_cst (TREE_TYPE (t), 0));
4048 stmt = gimple_build_cond_empty (t);
4049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4052 /* Add the loop cleanup function. */
4053 gsi = gsi_last_bb (exit_bb);
4054 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4055 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4056 else
4057 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4058 stmt = gimple_build_call (t, 0);
4059 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4060 gsi_remove (&gsi, true);
4062 /* Connect the new blocks. */
4063 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4064 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4066 if (!broken_loop)
4068 gimple_seq phis;
4070 e = find_edge (cont_bb, l3_bb);
4071 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4073 phis = phi_nodes (l3_bb);
4074 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4076 gimple phi = gsi_stmt (gsi);
4077 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4078 PHI_ARG_DEF_FROM_EDGE (phi, e));
4080 remove_edge (e);
4082 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4083 if (fd->collapse > 1)
4085 e = find_edge (cont_bb, l1_bb);
4086 remove_edge (e);
4087 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4089 else
4091 e = find_edge (cont_bb, l1_bb);
4092 e->flags = EDGE_TRUE_VALUE;
4094 e->probability = REG_BR_PROB_BASE * 7 / 8;
4095 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4096 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4098 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4099 recompute_dominator (CDI_DOMINATORS, l2_bb));
4100 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4101 recompute_dominator (CDI_DOMINATORS, l3_bb));
4102 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4103 recompute_dominator (CDI_DOMINATORS, l0_bb));
4104 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4105 recompute_dominator (CDI_DOMINATORS, l1_bb));
4110 /* A subroutine of expand_omp_for. Generate code for a parallel
4111 loop with static schedule and no specified chunk size. Given
4112 parameters:
4114 for (V = N1; V cond N2; V += STEP) BODY;
4116 where COND is "<" or ">", we generate pseudocode
4118 if (cond is <)
4119 adj = STEP - 1;
4120 else
4121 adj = STEP + 1;
4122 if ((__typeof (V)) -1 > 0 && cond is >)
4123 n = -(adj + N2 - N1) / -STEP;
4124 else
4125 n = (adj + N2 - N1) / STEP;
4126 q = n / nthreads;
4127 tt = n % nthreads;
4128 if (threadid < tt) goto L3; else goto L4;
4130 tt = 0;
4131 q = q + 1;
4133 s0 = q * threadid + tt;
4134 e0 = s0 + q;
4135 V = s0 * STEP + N1;
4136 if (s0 >= e0) goto L2; else goto L0;
4138 e = e0 * STEP + N1;
4140 BODY;
4141 V += STEP;
4142 if (V cond e) goto L1;
4146 static void
4147 expand_omp_for_static_nochunk (struct omp_region *region,
4148 struct omp_for_data *fd)
4150 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4151 tree type, itype, vmain, vback;
4152 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4153 basic_block body_bb, cont_bb;
4154 basic_block fin_bb;
4155 gimple_stmt_iterator gsi;
4156 gimple stmt;
4157 edge ep;
4159 itype = type = TREE_TYPE (fd->loop.v);
4160 if (POINTER_TYPE_P (type))
4161 itype = signed_type_for (type);
4163 entry_bb = region->entry;
4164 cont_bb = region->cont;
4165 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4166 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4167 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4168 body_bb = single_succ (seq_start_bb);
4169 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4170 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4171 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4172 exit_bb = region->exit;
4174 /* Iteration space partitioning goes in ENTRY_BB. */
4175 gsi = gsi_last_bb (entry_bb);
4176 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4178 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4179 t = fold_convert (itype, t);
4180 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4181 true, GSI_SAME_STMT);
4183 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4184 t = fold_convert (itype, t);
4185 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4186 true, GSI_SAME_STMT);
4188 fd->loop.n1
4189 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4190 true, NULL_TREE, true, GSI_SAME_STMT);
4191 fd->loop.n2
4192 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4193 true, NULL_TREE, true, GSI_SAME_STMT);
4194 fd->loop.step
4195 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4196 true, NULL_TREE, true, GSI_SAME_STMT);
4198 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4199 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4200 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4201 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4202 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4203 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4204 fold_build1 (NEGATE_EXPR, itype, t),
4205 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4206 else
4207 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4208 t = fold_convert (itype, t);
4209 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4211 q = create_tmp_var (itype, "q");
4212 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4213 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4214 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4216 tt = create_tmp_var (itype, "tt");
4217 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4218 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4219 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4221 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4222 stmt = gimple_build_cond_empty (t);
4223 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4225 second_bb = split_block (entry_bb, stmt)->dest;
4226 gsi = gsi_last_bb (second_bb);
4227 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4229 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4230 GSI_SAME_STMT);
4231 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4232 build_int_cst (itype, 1));
4233 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4235 third_bb = split_block (second_bb, stmt)->dest;
4236 gsi = gsi_last_bb (third_bb);
4237 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4239 t = build2 (MULT_EXPR, itype, q, threadid);
4240 t = build2 (PLUS_EXPR, itype, t, tt);
4241 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4243 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4244 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4246 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4247 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4249 /* Remove the GIMPLE_OMP_FOR statement. */
4250 gsi_remove (&gsi, true);
4252 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4253 gsi = gsi_start_bb (seq_start_bb);
4255 t = fold_convert (itype, s0);
4256 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4257 if (POINTER_TYPE_P (type))
4258 t = fold_build_pointer_plus (fd->loop.n1, t);
4259 else
4260 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4261 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4262 false, GSI_CONTINUE_LINKING);
4263 stmt = gimple_build_assign (fd->loop.v, t);
4264 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4266 t = fold_convert (itype, e0);
4267 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4268 if (POINTER_TYPE_P (type))
4269 t = fold_build_pointer_plus (fd->loop.n1, t);
4270 else
4271 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4272 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4273 false, GSI_CONTINUE_LINKING);
4275 /* The code controlling the sequential loop replaces the
4276 GIMPLE_OMP_CONTINUE. */
4277 gsi = gsi_last_bb (cont_bb);
4278 stmt = gsi_stmt (gsi);
4279 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4280 vmain = gimple_omp_continue_control_use (stmt);
4281 vback = gimple_omp_continue_control_def (stmt);
4283 if (POINTER_TYPE_P (type))
4284 t = fold_build_pointer_plus (vmain, fd->loop.step);
4285 else
4286 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4287 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4288 true, GSI_SAME_STMT);
4289 stmt = gimple_build_assign (vback, t);
4290 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4292 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4293 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4295 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4296 gsi_remove (&gsi, true);
4298 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4299 gsi = gsi_last_bb (exit_bb);
4300 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4301 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4302 false, GSI_SAME_STMT);
4303 gsi_remove (&gsi, true);
4305 /* Connect all the blocks. */
4306 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4307 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4308 ep = find_edge (entry_bb, second_bb);
4309 ep->flags = EDGE_TRUE_VALUE;
4310 ep->probability = REG_BR_PROB_BASE / 4;
4311 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4312 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4314 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4315 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4317 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4318 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4319 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4320 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4321 recompute_dominator (CDI_DOMINATORS, body_bb));
4322 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4323 recompute_dominator (CDI_DOMINATORS, fin_bb));
4327 /* A subroutine of expand_omp_for. Generate code for a parallel
4328 loop with static schedule and a specified chunk size. Given
4329 parameters:
4331 for (V = N1; V cond N2; V += STEP) BODY;
4333 where COND is "<" or ">", we generate pseudocode
4335 if (cond is <)
4336 adj = STEP - 1;
4337 else
4338 adj = STEP + 1;
4339 if ((__typeof (V)) -1 > 0 && cond is >)
4340 n = -(adj + N2 - N1) / -STEP;
4341 else
4342 n = (adj + N2 - N1) / STEP;
4343 trip = 0;
4344 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4345 here so that V is defined
4346 if the loop is not entered
4348 s0 = (trip * nthreads + threadid) * CHUNK;
4349 e0 = min(s0 + CHUNK, n);
4350 if (s0 < n) goto L1; else goto L4;
4352 V = s0 * STEP + N1;
4353 e = e0 * STEP + N1;
4355 BODY;
4356 V += STEP;
4357 if (V cond e) goto L2; else goto L3;
4359 trip += 1;
4360 goto L0;
4364 static void
4365 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4367 tree n, s0, e0, e, t;
4368 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4369 tree type, itype, v_main, v_back, v_extra;
4370 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4371 basic_block trip_update_bb, cont_bb, fin_bb;
4372 gimple_stmt_iterator si;
4373 gimple stmt;
4374 edge se;
4376 itype = type = TREE_TYPE (fd->loop.v);
4377 if (POINTER_TYPE_P (type))
4378 itype = signed_type_for (type);
4380 entry_bb = region->entry;
4381 se = split_block (entry_bb, last_stmt (entry_bb));
4382 entry_bb = se->src;
4383 iter_part_bb = se->dest;
4384 cont_bb = region->cont;
4385 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4386 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4387 == FALLTHRU_EDGE (cont_bb)->dest);
4388 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4389 body_bb = single_succ (seq_start_bb);
4390 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4391 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4392 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4393 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4394 exit_bb = region->exit;
4396 /* Trip and adjustment setup goes in ENTRY_BB. */
4397 si = gsi_last_bb (entry_bb);
4398 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4400 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4401 t = fold_convert (itype, t);
4402 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4403 true, GSI_SAME_STMT);
4405 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4406 t = fold_convert (itype, t);
4407 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4408 true, GSI_SAME_STMT);
4410 fd->loop.n1
4411 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4412 true, NULL_TREE, true, GSI_SAME_STMT);
4413 fd->loop.n2
4414 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4415 true, NULL_TREE, true, GSI_SAME_STMT);
4416 fd->loop.step
4417 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4418 true, NULL_TREE, true, GSI_SAME_STMT);
4419 fd->chunk_size
4420 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4421 true, NULL_TREE, true, GSI_SAME_STMT);
4423 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4424 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4425 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4426 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4427 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4428 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4429 fold_build1 (NEGATE_EXPR, itype, t),
4430 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4431 else
4432 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4433 t = fold_convert (itype, t);
4434 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4435 true, GSI_SAME_STMT);
4437 trip_var = create_tmp_var (itype, ".trip");
4438 if (gimple_in_ssa_p (cfun))
4440 add_referenced_var (trip_var);
4441 trip_init = make_ssa_name (trip_var, NULL);
4442 trip_main = make_ssa_name (trip_var, NULL);
4443 trip_back = make_ssa_name (trip_var, NULL);
4445 else
4447 trip_init = trip_var;
4448 trip_main = trip_var;
4449 trip_back = trip_var;
4452 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4453 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4455 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4456 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4457 if (POINTER_TYPE_P (type))
4458 t = fold_build_pointer_plus (fd->loop.n1, t);
4459 else
4460 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4461 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4462 true, GSI_SAME_STMT);
4464 /* Remove the GIMPLE_OMP_FOR. */
4465 gsi_remove (&si, true);
4467 /* Iteration space partitioning goes in ITER_PART_BB. */
4468 si = gsi_last_bb (iter_part_bb);
4470 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4471 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4472 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4473 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4474 false, GSI_CONTINUE_LINKING);
4476 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4477 t = fold_build2 (MIN_EXPR, itype, t, n);
4478 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4479 false, GSI_CONTINUE_LINKING);
4481 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4482 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4484 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4485 si = gsi_start_bb (seq_start_bb);
4487 t = fold_convert (itype, s0);
4488 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4489 if (POINTER_TYPE_P (type))
4490 t = fold_build_pointer_plus (fd->loop.n1, t);
4491 else
4492 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4493 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4494 false, GSI_CONTINUE_LINKING);
4495 stmt = gimple_build_assign (fd->loop.v, t);
4496 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4498 t = fold_convert (itype, e0);
4499 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4500 if (POINTER_TYPE_P (type))
4501 t = fold_build_pointer_plus (fd->loop.n1, t);
4502 else
4503 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4504 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4505 false, GSI_CONTINUE_LINKING);
4507 /* The code controlling the sequential loop goes in CONT_BB,
4508 replacing the GIMPLE_OMP_CONTINUE. */
4509 si = gsi_last_bb (cont_bb);
4510 stmt = gsi_stmt (si);
4511 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4512 v_main = gimple_omp_continue_control_use (stmt);
4513 v_back = gimple_omp_continue_control_def (stmt);
4515 if (POINTER_TYPE_P (type))
4516 t = fold_build_pointer_plus (v_main, fd->loop.step);
4517 else
4518 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4519 stmt = gimple_build_assign (v_back, t);
4520 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4522 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4523 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4525 /* Remove GIMPLE_OMP_CONTINUE. */
4526 gsi_remove (&si, true);
4528 /* Trip update code goes into TRIP_UPDATE_BB. */
4529 si = gsi_start_bb (trip_update_bb);
4531 t = build_int_cst (itype, 1);
4532 t = build2 (PLUS_EXPR, itype, trip_main, t);
4533 stmt = gimple_build_assign (trip_back, t);
4534 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4536 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4537 si = gsi_last_bb (exit_bb);
4538 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4539 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4540 false, GSI_SAME_STMT);
4541 gsi_remove (&si, true);
4543 /* Connect the new blocks. */
4544 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4545 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4547 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4548 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4550 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4552 if (gimple_in_ssa_p (cfun))
4554 gimple_stmt_iterator psi;
4555 gimple phi;
4556 edge re, ene;
4557 edge_var_map_vector head;
4558 edge_var_map *vm;
4559 size_t i;
4561 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4562 remove arguments of the phi nodes in fin_bb. We need to create
4563 appropriate phi nodes in iter_part_bb instead. */
4564 se = single_pred_edge (fin_bb);
4565 re = single_succ_edge (trip_update_bb);
4566 head = redirect_edge_var_map_vector (re);
4567 ene = single_succ_edge (entry_bb);
4569 psi = gsi_start_phis (fin_bb);
4570 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4571 gsi_next (&psi), ++i)
4573 gimple nphi;
4574 source_location locus;
4576 phi = gsi_stmt (psi);
4577 t = gimple_phi_result (phi);
4578 gcc_assert (t == redirect_edge_var_map_result (vm));
4579 nphi = create_phi_node (t, iter_part_bb);
4580 SSA_NAME_DEF_STMT (t) = nphi;
4582 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4583 locus = gimple_phi_arg_location_from_edge (phi, se);
4585 /* A special case -- fd->loop.v is not yet computed in
4586 iter_part_bb, we need to use v_extra instead. */
4587 if (t == fd->loop.v)
4588 t = v_extra;
4589 add_phi_arg (nphi, t, ene, locus);
4590 locus = redirect_edge_var_map_location (vm);
4591 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4593 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4594 redirect_edge_var_map_clear (re);
4595 while (1)
4597 psi = gsi_start_phis (fin_bb);
4598 if (gsi_end_p (psi))
4599 break;
4600 remove_phi_node (&psi, false);
4603 /* Make phi node for trip. */
4604 phi = create_phi_node (trip_main, iter_part_bb);
4605 SSA_NAME_DEF_STMT (trip_main) = phi;
4606 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4607 UNKNOWN_LOCATION);
4608 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4609 UNKNOWN_LOCATION);
4612 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4613 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4614 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4615 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4616 recompute_dominator (CDI_DOMINATORS, fin_bb));
4617 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4618 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4619 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4620 recompute_dominator (CDI_DOMINATORS, body_bb));
4624 /* Expand the OpenMP loop defined by REGION. */
4626 static void
4627 expand_omp_for (struct omp_region *region)
4629 struct omp_for_data fd;
4630 struct omp_for_data_loop *loops;
4632 loops
4633 = (struct omp_for_data_loop *)
4634 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4635 * sizeof (struct omp_for_data_loop));
4636 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4637 region->sched_kind = fd.sched_kind;
4639 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4640 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4641 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4642 if (region->cont)
4644 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4645 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4646 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4649 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4650 && !fd.have_ordered
4651 && fd.collapse == 1
4652 && region->cont != NULL)
4654 if (fd.chunk_size == NULL)
4655 expand_omp_for_static_nochunk (region, &fd);
4656 else
4657 expand_omp_for_static_chunk (region, &fd);
4659 else
4661 int fn_index, start_ix, next_ix;
4663 if (fd.chunk_size == NULL
4664 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4665 fd.chunk_size = integer_zero_node;
4666 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4667 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4668 ? 3 : fd.sched_kind;
4669 fn_index += fd.have_ordered * 4;
4670 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4671 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4672 if (fd.iter_type == long_long_unsigned_type_node)
4674 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4675 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4676 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4677 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4679 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4680 (enum built_in_function) next_ix);
4683 update_ssa (TODO_update_ssa_only_virtuals);
4687 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4689 v = GOMP_sections_start (n);
4691 switch (v)
4693 case 0:
4694 goto L2;
4695 case 1:
4696 section 1;
4697 goto L1;
4698 case 2:
4700 case n:
4702 default:
4703 abort ();
4706 v = GOMP_sections_next ();
4707 goto L0;
4709 reduction;
4711 If this is a combined parallel sections, replace the call to
4712 GOMP_sections_start with call to GOMP_sections_next. */
4714 static void
4715 expand_omp_sections (struct omp_region *region)
4717 tree t, u, vin = NULL, vmain, vnext, l2;
4718 VEC (tree,heap) *label_vec;
4719 unsigned len;
4720 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4721 gimple_stmt_iterator si, switch_si;
4722 gimple sections_stmt, stmt, cont;
4723 edge_iterator ei;
4724 edge e;
4725 struct omp_region *inner;
4726 unsigned i, casei;
4727 bool exit_reachable = region->cont != NULL;
4729 gcc_assert (exit_reachable == (region->exit != NULL));
4730 entry_bb = region->entry;
4731 l0_bb = single_succ (entry_bb);
4732 l1_bb = region->cont;
4733 l2_bb = region->exit;
4734 if (exit_reachable)
4736 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4737 l2 = gimple_block_label (l2_bb);
4738 else
4740 /* This can happen if there are reductions. */
4741 len = EDGE_COUNT (l0_bb->succs);
4742 gcc_assert (len > 0);
4743 e = EDGE_SUCC (l0_bb, len - 1);
4744 si = gsi_last_bb (e->dest);
4745 l2 = NULL_TREE;
4746 if (gsi_end_p (si)
4747 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4748 l2 = gimple_block_label (e->dest);
4749 else
4750 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4752 si = gsi_last_bb (e->dest);
4753 if (gsi_end_p (si)
4754 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4756 l2 = gimple_block_label (e->dest);
4757 break;
4761 default_bb = create_empty_bb (l1_bb->prev_bb);
4763 else
4765 default_bb = create_empty_bb (l0_bb);
4766 l2 = gimple_block_label (default_bb);
4769 /* We will build a switch() with enough cases for all the
4770 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4771 and a default case to abort if something goes wrong. */
4772 len = EDGE_COUNT (l0_bb->succs);
4774 /* Use VEC_quick_push on label_vec throughout, since we know the size
4775 in advance. */
4776 label_vec = VEC_alloc (tree, heap, len);
4778 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4779 GIMPLE_OMP_SECTIONS statement. */
4780 si = gsi_last_bb (entry_bb);
4781 sections_stmt = gsi_stmt (si);
4782 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4783 vin = gimple_omp_sections_control (sections_stmt);
4784 if (!is_combined_parallel (region))
4786 /* If we are not inside a combined parallel+sections region,
4787 call GOMP_sections_start. */
4788 t = build_int_cst (unsigned_type_node,
4789 exit_reachable ? len - 1 : len);
4790 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4791 stmt = gimple_build_call (u, 1, t);
4793 else
4795 /* Otherwise, call GOMP_sections_next. */
4796 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4797 stmt = gimple_build_call (u, 0);
4799 gimple_call_set_lhs (stmt, vin);
4800 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4801 gsi_remove (&si, true);
4803 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4804 L0_BB. */
4805 switch_si = gsi_last_bb (l0_bb);
4806 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4807 if (exit_reachable)
4809 cont = last_stmt (l1_bb);
4810 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4811 vmain = gimple_omp_continue_control_use (cont);
4812 vnext = gimple_omp_continue_control_def (cont);
4814 else
4816 vmain = vin;
4817 vnext = NULL_TREE;
4820 i = 0;
4821 if (exit_reachable)
4823 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4824 VEC_quick_push (tree, label_vec, t);
4825 i++;
4828 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4829 for (inner = region->inner, casei = 1;
4830 inner;
4831 inner = inner->next, i++, casei++)
4833 basic_block s_entry_bb, s_exit_bb;
4835 /* Skip optional reduction region. */
4836 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4838 --i;
4839 --casei;
4840 continue;
4843 s_entry_bb = inner->entry;
4844 s_exit_bb = inner->exit;
4846 t = gimple_block_label (s_entry_bb);
4847 u = build_int_cst (unsigned_type_node, casei);
4848 u = build_case_label (u, NULL, t);
4849 VEC_quick_push (tree, label_vec, u);
4851 si = gsi_last_bb (s_entry_bb);
4852 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4853 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4854 gsi_remove (&si, true);
4855 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4857 if (s_exit_bb == NULL)
4858 continue;
4860 si = gsi_last_bb (s_exit_bb);
4861 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4862 gsi_remove (&si, true);
4864 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4867 /* Error handling code goes in DEFAULT_BB. */
4868 t = gimple_block_label (default_bb);
4869 u = build_case_label (NULL, NULL, t);
4870 make_edge (l0_bb, default_bb, 0);
4872 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4873 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4874 gsi_remove (&switch_si, true);
4875 VEC_free (tree, heap, label_vec);
4877 si = gsi_start_bb (default_bb);
4878 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4879 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4881 if (exit_reachable)
4883 tree bfn_decl;
4885 /* Code to get the next section goes in L1_BB. */
4886 si = gsi_last_bb (l1_bb);
4887 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4889 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4890 stmt = gimple_build_call (bfn_decl, 0);
4891 gimple_call_set_lhs (stmt, vnext);
4892 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4893 gsi_remove (&si, true);
4895 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4897 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4898 si = gsi_last_bb (l2_bb);
4899 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4900 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4901 else
4902 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4903 stmt = gimple_build_call (t, 0);
4904 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4905 gsi_remove (&si, true);
4908 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4912 /* Expand code for an OpenMP single directive. We've already expanded
4913 much of the code, here we simply place the GOMP_barrier call. */
4915 static void
4916 expand_omp_single (struct omp_region *region)
4918 basic_block entry_bb, exit_bb;
4919 gimple_stmt_iterator si;
4920 bool need_barrier = false;
4922 entry_bb = region->entry;
4923 exit_bb = region->exit;
4925 si = gsi_last_bb (entry_bb);
4926 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4927 be removed. We need to ensure that the thread that entered the single
4928 does not exit before the data is copied out by the other threads. */
4929 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4930 OMP_CLAUSE_COPYPRIVATE))
4931 need_barrier = true;
4932 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4933 gsi_remove (&si, true);
4934 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4936 si = gsi_last_bb (exit_bb);
4937 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4938 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4939 false, GSI_SAME_STMT);
4940 gsi_remove (&si, true);
4941 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4945 /* Generic expansion for OpenMP synchronization directives: master,
4946 ordered and critical. All we need to do here is remove the entry
4947 and exit markers for REGION. */
4949 static void
4950 expand_omp_synch (struct omp_region *region)
4952 basic_block entry_bb, exit_bb;
4953 gimple_stmt_iterator si;
4955 entry_bb = region->entry;
4956 exit_bb = region->exit;
4958 si = gsi_last_bb (entry_bb);
4959 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4960 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4961 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4962 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4963 gsi_remove (&si, true);
4964 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4966 if (exit_bb)
4968 si = gsi_last_bb (exit_bb);
4969 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4970 gsi_remove (&si, true);
4971 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4975 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4976 operation as a normal volatile load. */
4978 static bool
4979 expand_omp_atomic_load (basic_block load_bb, tree addr,
4980 tree loaded_val, int index)
4982 enum built_in_function tmpbase;
4983 gimple_stmt_iterator gsi;
4984 basic_block store_bb;
4985 location_t loc;
4986 gimple stmt;
4987 tree decl, call, type, itype;
4989 gsi = gsi_last_bb (load_bb);
4990 stmt = gsi_stmt (gsi);
4991 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4992 loc = gimple_location (stmt);
4994 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4995 is smaller than word size, then expand_atomic_load assumes that the load
4996 is atomic. We could avoid the builtin entirely in this case. */
4998 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
4999 decl = builtin_decl_explicit (tmpbase);
5000 if (decl == NULL_TREE)
5001 return false;
5003 type = TREE_TYPE (loaded_val);
5004 itype = TREE_TYPE (TREE_TYPE (decl));
5006 call = build_call_expr_loc (loc, decl, 2, addr,
5007 build_int_cst (NULL, MEMMODEL_RELAXED));
5008 if (!useless_type_conversion_p (type, itype))
5009 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5010 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5012 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5013 gsi_remove (&gsi, true);
5015 store_bb = single_succ (load_bb);
5016 gsi = gsi_last_bb (store_bb);
5017 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5018 gsi_remove (&gsi, true);
5020 if (gimple_in_ssa_p (cfun))
5021 update_ssa (TODO_update_ssa_no_phi);
5023 return true;
5026 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5027 operation as a normal volatile store. */
5029 static bool
5030 expand_omp_atomic_store (basic_block load_bb, tree addr,
5031 tree loaded_val, tree stored_val, int index)
5033 enum built_in_function tmpbase;
5034 gimple_stmt_iterator gsi;
5035 basic_block store_bb = single_succ (load_bb);
5036 location_t loc;
5037 gimple stmt;
5038 tree decl, call, type, itype;
5039 enum machine_mode imode;
5040 bool exchange;
5042 gsi = gsi_last_bb (load_bb);
5043 stmt = gsi_stmt (gsi);
5044 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5046 /* If the load value is needed, then this isn't a store but an exchange. */
5047 exchange = gimple_omp_atomic_need_value_p (stmt);
5049 gsi = gsi_last_bb (store_bb);
5050 stmt = gsi_stmt (gsi);
5051 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5052 loc = gimple_location (stmt);
5054 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5055 is smaller than word size, then expand_atomic_store assumes that the store
5056 is atomic. We could avoid the builtin entirely in this case. */
5058 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5059 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5060 decl = builtin_decl_explicit (tmpbase);
5061 if (decl == NULL_TREE)
5062 return false;
5064 type = TREE_TYPE (stored_val);
5066 /* Dig out the type of the function's second argument. */
5067 itype = TREE_TYPE (decl);
5068 itype = TYPE_ARG_TYPES (itype);
5069 itype = TREE_CHAIN (itype);
5070 itype = TREE_VALUE (itype);
5071 imode = TYPE_MODE (itype);
5073 if (exchange && !can_atomic_exchange_p (imode, true))
5074 return false;
5076 if (!useless_type_conversion_p (itype, type))
5077 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5078 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5079 build_int_cst (NULL, MEMMODEL_RELAXED));
5080 if (exchange)
5082 if (!useless_type_conversion_p (type, itype))
5083 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5084 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5087 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5088 gsi_remove (&gsi, true);
5090 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5091 gsi = gsi_last_bb (load_bb);
5092 gsi_remove (&gsi, true);
5094 if (gimple_in_ssa_p (cfun))
5095 update_ssa (TODO_update_ssa_no_phi);
5097 return true;
5100 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5101 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5102 size of the data type, and thus usable to find the index of the builtin
5103 decl. Returns false if the expression is not of the proper form. */
5105 static bool
5106 expand_omp_atomic_fetch_op (basic_block load_bb,
5107 tree addr, tree loaded_val,
5108 tree stored_val, int index)
5110 enum built_in_function oldbase, newbase, tmpbase;
5111 tree decl, itype, call;
5112 tree lhs, rhs;
5113 basic_block store_bb = single_succ (load_bb);
5114 gimple_stmt_iterator gsi;
5115 gimple stmt;
5116 location_t loc;
5117 enum tree_code code;
5118 bool need_old, need_new;
5119 enum machine_mode imode;
5121 /* We expect to find the following sequences:
5123 load_bb:
5124 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5126 store_bb:
5127 val = tmp OP something; (or: something OP tmp)
5128 GIMPLE_OMP_STORE (val)
5130 ???FIXME: Allow a more flexible sequence.
5131 Perhaps use data flow to pick the statements.
5135 gsi = gsi_after_labels (store_bb);
5136 stmt = gsi_stmt (gsi);
5137 loc = gimple_location (stmt);
5138 if (!is_gimple_assign (stmt))
5139 return false;
5140 gsi_next (&gsi);
5141 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5142 return false;
5143 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5144 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5145 gcc_checking_assert (!need_old || !need_new);
5147 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5148 return false;
5150 /* Check for one of the supported fetch-op operations. */
5151 code = gimple_assign_rhs_code (stmt);
5152 switch (code)
5154 case PLUS_EXPR:
5155 case POINTER_PLUS_EXPR:
5156 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5157 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5158 break;
5159 case MINUS_EXPR:
5160 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5161 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5162 break;
5163 case BIT_AND_EXPR:
5164 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5165 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5166 break;
5167 case BIT_IOR_EXPR:
5168 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5169 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5170 break;
5171 case BIT_XOR_EXPR:
5172 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5173 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5174 break;
5175 default:
5176 return false;
5179 /* Make sure the expression is of the proper form. */
5180 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5181 rhs = gimple_assign_rhs2 (stmt);
5182 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5183 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5184 rhs = gimple_assign_rhs1 (stmt);
5185 else
5186 return false;
5188 tmpbase = ((enum built_in_function)
5189 ((need_new ? newbase : oldbase) + index + 1));
5190 decl = builtin_decl_explicit (tmpbase);
5191 if (decl == NULL_TREE)
5192 return false;
5193 itype = TREE_TYPE (TREE_TYPE (decl));
5194 imode = TYPE_MODE (itype);
5196 /* We could test all of the various optabs involved, but the fact of the
5197 matter is that (with the exception of i486 vs i586 and xadd) all targets
5198 that support any atomic operaton optab also implements compare-and-swap.
5199 Let optabs.c take care of expanding any compare-and-swap loop. */
5200 if (!can_compare_and_swap_p (imode, true))
5201 return false;
5203 gsi = gsi_last_bb (load_bb);
5204 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5206 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5207 It only requires that the operation happen atomically. Thus we can
5208 use the RELAXED memory model. */
5209 call = build_call_expr_loc (loc, decl, 3, addr,
5210 fold_convert_loc (loc, itype, rhs),
5211 build_int_cst (NULL, MEMMODEL_RELAXED));
5213 if (need_old || need_new)
5215 lhs = need_old ? loaded_val : stored_val;
5216 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5217 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5219 else
5220 call = fold_convert_loc (loc, void_type_node, call);
5221 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5222 gsi_remove (&gsi, true);
5224 gsi = gsi_last_bb (store_bb);
5225 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5226 gsi_remove (&gsi, true);
5227 gsi = gsi_last_bb (store_bb);
5228 gsi_remove (&gsi, true);
5230 if (gimple_in_ssa_p (cfun))
5231 update_ssa (TODO_update_ssa_no_phi);
5233 return true;
5236 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5238 oldval = *addr;
5239 repeat:
5240 newval = rhs; // with oldval replacing *addr in rhs
5241 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5242 if (oldval != newval)
5243 goto repeat;
5245 INDEX is log2 of the size of the data type, and thus usable to find the
5246 index of the builtin decl. */
5248 static bool
5249 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5250 tree addr, tree loaded_val, tree stored_val,
5251 int index)
5253 tree loadedi, storedi, initial, new_storedi, old_vali;
5254 tree type, itype, cmpxchg, iaddr;
5255 gimple_stmt_iterator si;
5256 basic_block loop_header = single_succ (load_bb);
5257 gimple phi, stmt;
5258 edge e;
5259 enum built_in_function fncode;
5261 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5262 order to use the RELAXED memory model effectively. */
5263 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5264 + index + 1);
5265 cmpxchg = builtin_decl_explicit (fncode);
5266 if (cmpxchg == NULL_TREE)
5267 return false;
5268 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5269 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5271 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5272 return false;
5274 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5275 si = gsi_last_bb (load_bb);
5276 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5278 /* For floating-point values, we'll need to view-convert them to integers
5279 so that we can perform the atomic compare and swap. Simplify the
5280 following code by always setting up the "i"ntegral variables. */
5281 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5283 tree iaddr_val;
5285 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5286 true), NULL);
5287 iaddr_val
5288 = force_gimple_operand_gsi (&si,
5289 fold_convert (TREE_TYPE (iaddr), addr),
5290 false, NULL_TREE, true, GSI_SAME_STMT);
5291 stmt = gimple_build_assign (iaddr, iaddr_val);
5292 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5293 loadedi = create_tmp_var (itype, NULL);
5294 if (gimple_in_ssa_p (cfun))
5296 add_referenced_var (iaddr);
5297 add_referenced_var (loadedi);
5298 loadedi = make_ssa_name (loadedi, NULL);
5301 else
5303 iaddr = addr;
5304 loadedi = loaded_val;
5307 initial
5308 = force_gimple_operand_gsi (&si,
5309 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5310 iaddr,
5311 build_int_cst (TREE_TYPE (iaddr), 0)),
5312 true, NULL_TREE, true, GSI_SAME_STMT);
5314 /* Move the value to the LOADEDI temporary. */
5315 if (gimple_in_ssa_p (cfun))
5317 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5318 phi = create_phi_node (loadedi, loop_header);
5319 SSA_NAME_DEF_STMT (loadedi) = phi;
5320 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5321 initial);
5323 else
5324 gsi_insert_before (&si,
5325 gimple_build_assign (loadedi, initial),
5326 GSI_SAME_STMT);
5327 if (loadedi != loaded_val)
5329 gimple_stmt_iterator gsi2;
5330 tree x;
5332 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5333 gsi2 = gsi_start_bb (loop_header);
5334 if (gimple_in_ssa_p (cfun))
5336 gimple stmt;
5337 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5338 true, GSI_SAME_STMT);
5339 stmt = gimple_build_assign (loaded_val, x);
5340 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5342 else
5344 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5345 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5346 true, GSI_SAME_STMT);
5349 gsi_remove (&si, true);
5351 si = gsi_last_bb (store_bb);
5352 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5354 if (iaddr == addr)
5355 storedi = stored_val;
5356 else
5357 storedi =
5358 force_gimple_operand_gsi (&si,
5359 build1 (VIEW_CONVERT_EXPR, itype,
5360 stored_val), true, NULL_TREE, true,
5361 GSI_SAME_STMT);
5363 /* Build the compare&swap statement. */
5364 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5365 new_storedi = force_gimple_operand_gsi (&si,
5366 fold_convert (TREE_TYPE (loadedi),
5367 new_storedi),
5368 true, NULL_TREE,
5369 true, GSI_SAME_STMT);
5371 if (gimple_in_ssa_p (cfun))
5372 old_vali = loadedi;
5373 else
5375 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5376 if (gimple_in_ssa_p (cfun))
5377 add_referenced_var (old_vali);
5378 stmt = gimple_build_assign (old_vali, loadedi);
5379 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5381 stmt = gimple_build_assign (loadedi, new_storedi);
5382 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5385 /* Note that we always perform the comparison as an integer, even for
5386 floating point. This allows the atomic operation to properly
5387 succeed even with NaNs and -0.0. */
5388 stmt = gimple_build_cond_empty
5389 (build2 (NE_EXPR, boolean_type_node,
5390 new_storedi, old_vali));
5391 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5393 /* Update cfg. */
5394 e = single_succ_edge (store_bb);
5395 e->flags &= ~EDGE_FALLTHRU;
5396 e->flags |= EDGE_FALSE_VALUE;
5398 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5400 /* Copy the new value to loadedi (we already did that before the condition
5401 if we are not in SSA). */
5402 if (gimple_in_ssa_p (cfun))
5404 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5405 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5408 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5409 gsi_remove (&si, true);
5411 if (gimple_in_ssa_p (cfun))
5412 update_ssa (TODO_update_ssa_no_phi);
5414 return true;
5417 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5419 GOMP_atomic_start ();
5420 *addr = rhs;
5421 GOMP_atomic_end ();
5423 The result is not globally atomic, but works so long as all parallel
5424 references are within #pragma omp atomic directives. According to
5425 responses received from omp@openmp.org, appears to be within spec.
5426 Which makes sense, since that's how several other compilers handle
5427 this situation as well.
5428 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5429 expanding. STORED_VAL is the operand of the matching
5430 GIMPLE_OMP_ATOMIC_STORE.
5432 We replace
5433 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5434 loaded_val = *addr;
5436 and replace
5437 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5438 *addr = stored_val;
5441 static bool
5442 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5443 tree addr, tree loaded_val, tree stored_val)
5445 gimple_stmt_iterator si;
5446 gimple stmt;
5447 tree t;
5449 si = gsi_last_bb (load_bb);
5450 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5452 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5453 t = build_call_expr (t, 0);
5454 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5456 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5457 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5458 gsi_remove (&si, true);
5460 si = gsi_last_bb (store_bb);
5461 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5463 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5464 stored_val);
5465 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5467 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5468 t = build_call_expr (t, 0);
5469 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5470 gsi_remove (&si, true);
5472 if (gimple_in_ssa_p (cfun))
5473 update_ssa (TODO_update_ssa_no_phi);
5474 return true;
5477 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5478 using expand_omp_atomic_fetch_op. If it failed, we try to
5479 call expand_omp_atomic_pipeline, and if it fails too, the
5480 ultimate fallback is wrapping the operation in a mutex
5481 (expand_omp_atomic_mutex). REGION is the atomic region built
5482 by build_omp_regions_1(). */
5484 static void
5485 expand_omp_atomic (struct omp_region *region)
5487 basic_block load_bb = region->entry, store_bb = region->exit;
5488 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5489 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5490 tree addr = gimple_omp_atomic_load_rhs (load);
5491 tree stored_val = gimple_omp_atomic_store_val (store);
5492 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5493 HOST_WIDE_INT index;
5495 /* Make sure the type is one of the supported sizes. */
5496 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5497 index = exact_log2 (index);
5498 if (index >= 0 && index <= 4)
5500 unsigned int align = TYPE_ALIGN_UNIT (type);
5502 /* __sync builtins require strict data alignment. */
5503 if (exact_log2 (align) >= index)
5505 /* Atomic load. */
5506 if (loaded_val == stored_val
5507 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5508 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5509 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5510 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5511 return;
5513 /* Atomic store. */
5514 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5515 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5516 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5517 && store_bb == single_succ (load_bb)
5518 && first_stmt (store_bb) == store
5519 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5520 stored_val, index))
5521 return;
5523 /* When possible, use specialized atomic update functions. */
5524 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5525 && store_bb == single_succ (load_bb)
5526 && expand_omp_atomic_fetch_op (load_bb, addr,
5527 loaded_val, stored_val, index))
5528 return;
5530 /* If we don't have specialized __sync builtins, try and implement
5531 as a compare and swap loop. */
5532 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5533 loaded_val, stored_val, index))
5534 return;
5538 /* The ultimate fallback is wrapping the operation in a mutex. */
5539 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5543 /* Expand the parallel region tree rooted at REGION. Expansion
5544 proceeds in depth-first order. Innermost regions are expanded
5545 first. This way, parallel regions that require a new function to
5546 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5547 internal dependencies in their body. */
5549 static void
5550 expand_omp (struct omp_region *region)
5552 while (region)
5554 location_t saved_location;
5556 /* First, determine whether this is a combined parallel+workshare
5557 region. */
5558 if (region->type == GIMPLE_OMP_PARALLEL)
5559 determine_parallel_type (region);
5561 if (region->inner)
5562 expand_omp (region->inner);
5564 saved_location = input_location;
5565 if (gimple_has_location (last_stmt (region->entry)))
5566 input_location = gimple_location (last_stmt (region->entry));
5568 switch (region->type)
5570 case GIMPLE_OMP_PARALLEL:
5571 case GIMPLE_OMP_TASK:
5572 expand_omp_taskreg (region);
5573 break;
5575 case GIMPLE_OMP_FOR:
5576 expand_omp_for (region);
5577 break;
5579 case GIMPLE_OMP_SECTIONS:
5580 expand_omp_sections (region);
5581 break;
5583 case GIMPLE_OMP_SECTION:
5584 /* Individual omp sections are handled together with their
5585 parent GIMPLE_OMP_SECTIONS region. */
5586 break;
5588 case GIMPLE_OMP_SINGLE:
5589 expand_omp_single (region);
5590 break;
5592 case GIMPLE_OMP_MASTER:
5593 case GIMPLE_OMP_ORDERED:
5594 case GIMPLE_OMP_CRITICAL:
5595 expand_omp_synch (region);
5596 break;
5598 case GIMPLE_OMP_ATOMIC_LOAD:
5599 expand_omp_atomic (region);
5600 break;
5602 default:
5603 gcc_unreachable ();
5606 input_location = saved_location;
5607 region = region->next;
5612 /* Helper for build_omp_regions. Scan the dominator tree starting at
5613 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5614 true, the function ends once a single tree is built (otherwise, whole
5615 forest of OMP constructs may be built). */
5617 static void
5618 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5619 bool single_tree)
5621 gimple_stmt_iterator gsi;
5622 gimple stmt;
5623 basic_block son;
5625 gsi = gsi_last_bb (bb);
5626 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5628 struct omp_region *region;
5629 enum gimple_code code;
5631 stmt = gsi_stmt (gsi);
5632 code = gimple_code (stmt);
5633 if (code == GIMPLE_OMP_RETURN)
5635 /* STMT is the return point out of region PARENT. Mark it
5636 as the exit point and make PARENT the immediately
5637 enclosing region. */
5638 gcc_assert (parent);
5639 region = parent;
5640 region->exit = bb;
5641 parent = parent->outer;
5643 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5645 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5646 GIMPLE_OMP_RETURN, but matches with
5647 GIMPLE_OMP_ATOMIC_LOAD. */
5648 gcc_assert (parent);
5649 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5650 region = parent;
5651 region->exit = bb;
5652 parent = parent->outer;
5655 else if (code == GIMPLE_OMP_CONTINUE)
5657 gcc_assert (parent);
5658 parent->cont = bb;
5660 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5662 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5663 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5666 else
5668 /* Otherwise, this directive becomes the parent for a new
5669 region. */
5670 region = new_omp_region (bb, code, parent);
5671 parent = region;
5675 if (single_tree && !parent)
5676 return;
5678 for (son = first_dom_son (CDI_DOMINATORS, bb);
5679 son;
5680 son = next_dom_son (CDI_DOMINATORS, son))
5681 build_omp_regions_1 (son, parent, single_tree);
5684 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5685 root_omp_region. */
5687 static void
5688 build_omp_regions_root (basic_block root)
5690 gcc_assert (root_omp_region == NULL);
5691 build_omp_regions_1 (root, NULL, true);
5692 gcc_assert (root_omp_region != NULL);
5695 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5697 void
5698 omp_expand_local (basic_block head)
5700 build_omp_regions_root (head);
5701 if (dump_file && (dump_flags & TDF_DETAILS))
5703 fprintf (dump_file, "\nOMP region tree\n\n");
5704 dump_omp_region (dump_file, root_omp_region, 0);
5705 fprintf (dump_file, "\n");
5708 remove_exit_barriers (root_omp_region);
5709 expand_omp (root_omp_region);
5711 free_omp_regions ();
5714 /* Scan the CFG and build a tree of OMP regions. Return the root of
5715 the OMP region tree. */
5717 static void
5718 build_omp_regions (void)
5720 gcc_assert (root_omp_region == NULL);
5721 calculate_dominance_info (CDI_DOMINATORS);
5722 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5725 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5727 static unsigned int
5728 execute_expand_omp (void)
5730 build_omp_regions ();
5732 if (!root_omp_region)
5733 return 0;
5735 if (dump_file)
5737 fprintf (dump_file, "\nOMP region tree\n\n");
5738 dump_omp_region (dump_file, root_omp_region, 0);
5739 fprintf (dump_file, "\n");
5742 remove_exit_barriers (root_omp_region);
5744 expand_omp (root_omp_region);
5746 cleanup_tree_cfg ();
5748 free_omp_regions ();
5750 return 0;
5753 /* OMP expansion -- the default pass, run before creation of SSA form. */
5755 static bool
5756 gate_expand_omp (void)
5758 return (flag_openmp != 0 && !seen_error ());
5761 struct gimple_opt_pass pass_expand_omp =
5764 GIMPLE_PASS,
5765 "ompexp", /* name */
5766 gate_expand_omp, /* gate */
5767 execute_expand_omp, /* execute */
5768 NULL, /* sub */
5769 NULL, /* next */
5770 0, /* static_pass_number */
5771 TV_NONE, /* tv_id */
5772 PROP_gimple_any, /* properties_required */
5773 0, /* properties_provided */
5774 0, /* properties_destroyed */
5775 0, /* todo_flags_start */
5776 0 /* todo_flags_finish */
5780 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5782 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5783 CTX is the enclosing OMP context for the current statement. */
5785 static void
5786 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5788 tree block, control;
5789 gimple_stmt_iterator tgsi;
5790 unsigned i, len;
5791 gimple stmt, new_stmt, bind, t;
5792 gimple_seq ilist, dlist, olist, new_body, body;
5793 struct gimplify_ctx gctx;
5795 stmt = gsi_stmt (*gsi_p);
5797 push_gimplify_context (&gctx);
5799 dlist = NULL;
5800 ilist = NULL;
5801 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5802 &ilist, &dlist, ctx);
5804 tgsi = gsi_start (gimple_omp_body (stmt));
5805 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5806 continue;
5808 tgsi = gsi_start (gimple_omp_body (stmt));
5809 body = NULL;
5810 for (i = 0; i < len; i++, gsi_next (&tgsi))
5812 omp_context *sctx;
5813 gimple sec_start;
5815 sec_start = gsi_stmt (tgsi);
5816 sctx = maybe_lookup_ctx (sec_start);
5817 gcc_assert (sctx);
5819 gimple_seq_add_stmt (&body, sec_start);
5821 lower_omp (gimple_omp_body (sec_start), sctx);
5822 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5823 gimple_omp_set_body (sec_start, NULL);
5825 if (i == len - 1)
5827 gimple_seq l = NULL;
5828 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5829 &l, ctx);
5830 gimple_seq_add_seq (&body, l);
5831 gimple_omp_section_set_last (sec_start);
5834 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5837 block = make_node (BLOCK);
5838 bind = gimple_build_bind (NULL, body, block);
5840 olist = NULL;
5841 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5843 block = make_node (BLOCK);
5844 new_stmt = gimple_build_bind (NULL, NULL, block);
5846 pop_gimplify_context (new_stmt);
5847 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5848 BLOCK_VARS (block) = gimple_bind_vars (bind);
5849 if (BLOCK_VARS (block))
5850 TREE_USED (block) = 1;
5852 new_body = NULL;
5853 gimple_seq_add_seq (&new_body, ilist);
5854 gimple_seq_add_stmt (&new_body, stmt);
5855 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5856 gimple_seq_add_stmt (&new_body, bind);
5858 control = create_tmp_var (unsigned_type_node, ".section");
5859 t = gimple_build_omp_continue (control, control);
5860 gimple_omp_sections_set_control (stmt, control);
5861 gimple_seq_add_stmt (&new_body, t);
5863 gimple_seq_add_seq (&new_body, olist);
5864 gimple_seq_add_seq (&new_body, dlist);
5866 new_body = maybe_catch_exception (new_body);
5868 t = gimple_build_omp_return
5869 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5870 OMP_CLAUSE_NOWAIT));
5871 gimple_seq_add_stmt (&new_body, t);
5873 gimple_bind_set_body (new_stmt, new_body);
5874 gimple_omp_set_body (stmt, NULL);
5876 gsi_replace (gsi_p, new_stmt, true);
5880 /* A subroutine of lower_omp_single. Expand the simple form of
5881 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5883 if (GOMP_single_start ())
5884 BODY;
5885 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5887 FIXME. It may be better to delay expanding the logic of this until
5888 pass_expand_omp. The expanded logic may make the job more difficult
5889 to a synchronization analysis pass. */
5891 static void
5892 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5894 location_t loc = gimple_location (single_stmt);
5895 tree tlabel = create_artificial_label (loc);
5896 tree flabel = create_artificial_label (loc);
5897 gimple call, cond;
5898 tree lhs, decl;
5900 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5901 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5902 call = gimple_build_call (decl, 0);
5903 gimple_call_set_lhs (call, lhs);
5904 gimple_seq_add_stmt (pre_p, call);
5906 cond = gimple_build_cond (EQ_EXPR, lhs,
5907 fold_convert_loc (loc, TREE_TYPE (lhs),
5908 boolean_true_node),
5909 tlabel, flabel);
5910 gimple_seq_add_stmt (pre_p, cond);
5911 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5912 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5913 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5917 /* A subroutine of lower_omp_single. Expand the simple form of
5918 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5920 #pragma omp single copyprivate (a, b, c)
5922 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5925 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5927 BODY;
5928 copyout.a = a;
5929 copyout.b = b;
5930 copyout.c = c;
5931 GOMP_single_copy_end (&copyout);
5933 else
5935 a = copyout_p->a;
5936 b = copyout_p->b;
5937 c = copyout_p->c;
5939 GOMP_barrier ();
5942 FIXME. It may be better to delay expanding the logic of this until
5943 pass_expand_omp. The expanded logic may make the job more difficult
5944 to a synchronization analysis pass. */
5946 static void
5947 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5949 tree ptr_type, t, l0, l1, l2, bfn_decl;
5950 gimple_seq copyin_seq;
5951 location_t loc = gimple_location (single_stmt);
5953 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5955 ptr_type = build_pointer_type (ctx->record_type);
5956 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5958 l0 = create_artificial_label (loc);
5959 l1 = create_artificial_label (loc);
5960 l2 = create_artificial_label (loc);
5962 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5963 t = build_call_expr_loc (loc, bfn_decl, 0);
5964 t = fold_convert_loc (loc, ptr_type, t);
5965 gimplify_assign (ctx->receiver_decl, t, pre_p);
5967 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5968 build_int_cst (ptr_type, 0));
5969 t = build3 (COND_EXPR, void_type_node, t,
5970 build_and_jump (&l0), build_and_jump (&l1));
5971 gimplify_and_add (t, pre_p);
5973 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5975 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5977 copyin_seq = NULL;
5978 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5979 &copyin_seq, ctx);
5981 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5982 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5983 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5984 gimplify_and_add (t, pre_p);
5986 t = build_and_jump (&l2);
5987 gimplify_and_add (t, pre_p);
5989 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5991 gimple_seq_add_seq (pre_p, copyin_seq);
5993 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5997 /* Expand code for an OpenMP single directive. */
5999 static void
6000 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6002 tree block;
6003 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6004 gimple_seq bind_body, dlist;
6005 struct gimplify_ctx gctx;
6007 push_gimplify_context (&gctx);
6009 bind_body = NULL;
6010 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6011 &bind_body, &dlist, ctx);
6012 lower_omp (gimple_omp_body (single_stmt), ctx);
6014 gimple_seq_add_stmt (&bind_body, single_stmt);
6016 if (ctx->record_type)
6017 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6018 else
6019 lower_omp_single_simple (single_stmt, &bind_body);
6021 gimple_omp_set_body (single_stmt, NULL);
6023 gimple_seq_add_seq (&bind_body, dlist);
6025 bind_body = maybe_catch_exception (bind_body);
6027 t = gimple_build_omp_return
6028 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6029 OMP_CLAUSE_NOWAIT));
6030 gimple_seq_add_stmt (&bind_body, t);
6032 block = make_node (BLOCK);
6033 bind = gimple_build_bind (NULL, bind_body, block);
6035 pop_gimplify_context (bind);
6037 gimple_bind_append_vars (bind, ctx->block_vars);
6038 BLOCK_VARS (block) = ctx->block_vars;
6039 gsi_replace (gsi_p, bind, true);
6040 if (BLOCK_VARS (block))
6041 TREE_USED (block) = 1;
6045 /* Expand code for an OpenMP master directive. */
6047 static void
6048 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6050 tree block, lab = NULL, x, bfn_decl;
6051 gimple stmt = gsi_stmt (*gsi_p), bind;
6052 location_t loc = gimple_location (stmt);
6053 gimple_seq tseq;
6054 struct gimplify_ctx gctx;
6056 push_gimplify_context (&gctx);
6058 block = make_node (BLOCK);
6059 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6060 block);
6062 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6063 x = build_call_expr_loc (loc, bfn_decl, 0);
6064 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6065 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6066 tseq = NULL;
6067 gimplify_and_add (x, &tseq);
6068 gimple_bind_add_seq (bind, tseq);
6070 lower_omp (gimple_omp_body (stmt), ctx);
6071 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6072 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6073 gimple_omp_set_body (stmt, NULL);
6075 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6077 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6079 pop_gimplify_context (bind);
6081 gimple_bind_append_vars (bind, ctx->block_vars);
6082 BLOCK_VARS (block) = ctx->block_vars;
6083 gsi_replace (gsi_p, bind, true);
6087 /* Expand code for an OpenMP ordered directive. */
6089 static void
6090 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6092 tree block;
6093 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6094 struct gimplify_ctx gctx;
6096 push_gimplify_context (&gctx);
6098 block = make_node (BLOCK);
6099 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6100 block);
6102 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6104 gimple_bind_add_stmt (bind, x);
6106 lower_omp (gimple_omp_body (stmt), ctx);
6107 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6108 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6109 gimple_omp_set_body (stmt, NULL);
6111 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6112 gimple_bind_add_stmt (bind, x);
6114 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6116 pop_gimplify_context (bind);
6118 gimple_bind_append_vars (bind, ctx->block_vars);
6119 BLOCK_VARS (block) = gimple_bind_vars (bind);
6120 gsi_replace (gsi_p, bind, true);
6124 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6125 substitution of a couple of function calls. But in the NAMED case,
6126 requires that languages coordinate a symbol name. It is therefore
6127 best put here in common code. */
6129 static GTY((param1_is (tree), param2_is (tree)))
6130 splay_tree critical_name_mutexes;
6132 static void
6133 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6135 tree block;
6136 tree name, lock, unlock;
6137 gimple stmt = gsi_stmt (*gsi_p), bind;
6138 location_t loc = gimple_location (stmt);
6139 gimple_seq tbody;
6140 struct gimplify_ctx gctx;
6142 name = gimple_omp_critical_name (stmt);
6143 if (name)
6145 tree decl;
6146 splay_tree_node n;
6148 if (!critical_name_mutexes)
6149 critical_name_mutexes
6150 = splay_tree_new_ggc (splay_tree_compare_pointers,
6151 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6152 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6154 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6155 if (n == NULL)
6157 char *new_str;
6159 decl = create_tmp_var_raw (ptr_type_node, NULL);
6161 new_str = ACONCAT ((".gomp_critical_user_",
6162 IDENTIFIER_POINTER (name), NULL));
6163 DECL_NAME (decl) = get_identifier (new_str);
6164 TREE_PUBLIC (decl) = 1;
6165 TREE_STATIC (decl) = 1;
6166 DECL_COMMON (decl) = 1;
6167 DECL_ARTIFICIAL (decl) = 1;
6168 DECL_IGNORED_P (decl) = 1;
6169 varpool_finalize_decl (decl);
6171 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6172 (splay_tree_value) decl);
6174 else
6175 decl = (tree) n->value;
6177 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6178 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6180 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6181 unlock = build_call_expr_loc (loc, unlock, 1,
6182 build_fold_addr_expr_loc (loc, decl));
6184 else
6186 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6187 lock = build_call_expr_loc (loc, lock, 0);
6189 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6190 unlock = build_call_expr_loc (loc, unlock, 0);
6193 push_gimplify_context (&gctx);
6195 block = make_node (BLOCK);
6196 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6198 tbody = gimple_bind_body (bind);
6199 gimplify_and_add (lock, &tbody);
6200 gimple_bind_set_body (bind, tbody);
6202 lower_omp (gimple_omp_body (stmt), ctx);
6203 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6204 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6205 gimple_omp_set_body (stmt, NULL);
6207 tbody = gimple_bind_body (bind);
6208 gimplify_and_add (unlock, &tbody);
6209 gimple_bind_set_body (bind, tbody);
6211 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6213 pop_gimplify_context (bind);
6214 gimple_bind_append_vars (bind, ctx->block_vars);
6215 BLOCK_VARS (block) = gimple_bind_vars (bind);
6216 gsi_replace (gsi_p, bind, true);
6220 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6221 for a lastprivate clause. Given a loop control predicate of (V
6222 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6223 is appended to *DLIST, iterator initialization is appended to
6224 *BODY_P. */
6226 static void
6227 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6228 gimple_seq *dlist, struct omp_context *ctx)
6230 tree clauses, cond, vinit;
6231 enum tree_code cond_code;
6232 gimple_seq stmts;
6234 cond_code = fd->loop.cond_code;
6235 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6237 /* When possible, use a strict equality expression. This can let VRP
6238 type optimizations deduce the value and remove a copy. */
6239 if (host_integerp (fd->loop.step, 0))
6241 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6242 if (step == 1 || step == -1)
6243 cond_code = EQ_EXPR;
6246 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6248 clauses = gimple_omp_for_clauses (fd->for_stmt);
6249 stmts = NULL;
6250 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6251 if (!gimple_seq_empty_p (stmts))
6253 gimple_seq_add_seq (&stmts, *dlist);
6254 *dlist = stmts;
6256 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6257 vinit = fd->loop.n1;
6258 if (cond_code == EQ_EXPR
6259 && host_integerp (fd->loop.n2, 0)
6260 && ! integer_zerop (fd->loop.n2))
6261 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6263 /* Initialize the iterator variable, so that threads that don't execute
6264 any iterations don't execute the lastprivate clauses by accident. */
6265 gimplify_assign (fd->loop.v, vinit, body_p);
6270 /* Lower code for an OpenMP loop directive. */
6272 static void
6273 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6275 tree *rhs_p, block;
6276 struct omp_for_data fd;
6277 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6278 gimple_seq omp_for_body, body, dlist;
6279 size_t i;
6280 struct gimplify_ctx gctx;
6282 push_gimplify_context (&gctx);
6284 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6285 lower_omp (gimple_omp_body (stmt), ctx);
6287 block = make_node (BLOCK);
6288 new_stmt = gimple_build_bind (NULL, NULL, block);
6290 /* Move declaration of temporaries in the loop body before we make
6291 it go away. */
6292 omp_for_body = gimple_omp_body (stmt);
6293 if (!gimple_seq_empty_p (omp_for_body)
6294 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6296 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6297 gimple_bind_append_vars (new_stmt, vars);
6300 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6301 dlist = NULL;
6302 body = NULL;
6303 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6304 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6306 /* Lower the header expressions. At this point, we can assume that
6307 the header is of the form:
6309 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6311 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6312 using the .omp_data_s mapping, if needed. */
6313 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6315 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6316 if (!is_gimple_min_invariant (*rhs_p))
6317 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6319 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6320 if (!is_gimple_min_invariant (*rhs_p))
6321 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6323 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6324 if (!is_gimple_min_invariant (*rhs_p))
6325 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6328 /* Once lowered, extract the bounds and clauses. */
6329 extract_omp_for_data (stmt, &fd, NULL);
6331 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6333 gimple_seq_add_stmt (&body, stmt);
6334 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6336 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6337 fd.loop.v));
6339 /* After the loop, add exit clauses. */
6340 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6341 gimple_seq_add_seq (&body, dlist);
6343 body = maybe_catch_exception (body);
6345 /* Region exit marker goes at the end of the loop body. */
6346 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6348 pop_gimplify_context (new_stmt);
6350 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6351 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6352 if (BLOCK_VARS (block))
6353 TREE_USED (block) = 1;
6355 gimple_bind_set_body (new_stmt, body);
6356 gimple_omp_set_body (stmt, NULL);
6357 gimple_omp_for_set_pre_body (stmt, NULL);
6358 gsi_replace (gsi_p, new_stmt, true);
6361 /* Callback for walk_stmts. Check if the current statement only contains
6362 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6364 static tree
6365 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6366 bool *handled_ops_p,
6367 struct walk_stmt_info *wi)
6369 int *info = (int *) wi->info;
6370 gimple stmt = gsi_stmt (*gsi_p);
6372 *handled_ops_p = true;
6373 switch (gimple_code (stmt))
6375 WALK_SUBSTMTS;
6377 case GIMPLE_OMP_FOR:
6378 case GIMPLE_OMP_SECTIONS:
6379 *info = *info == 0 ? 1 : -1;
6380 break;
6381 default:
6382 *info = -1;
6383 break;
6385 return NULL;
6388 struct omp_taskcopy_context
6390 /* This field must be at the beginning, as we do "inheritance": Some
6391 callback functions for tree-inline.c (e.g., omp_copy_decl)
6392 receive a copy_body_data pointer that is up-casted to an
6393 omp_context pointer. */
6394 copy_body_data cb;
6395 omp_context *ctx;
6398 static tree
6399 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6401 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6403 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6404 return create_tmp_var (TREE_TYPE (var), NULL);
6406 return var;
6409 static tree
6410 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6412 tree name, new_fields = NULL, type, f;
6414 type = lang_hooks.types.make_type (RECORD_TYPE);
6415 name = DECL_NAME (TYPE_NAME (orig_type));
6416 name = build_decl (gimple_location (tcctx->ctx->stmt),
6417 TYPE_DECL, name, type);
6418 TYPE_NAME (type) = name;
6420 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6422 tree new_f = copy_node (f);
6423 DECL_CONTEXT (new_f) = type;
6424 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6425 TREE_CHAIN (new_f) = new_fields;
6426 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6427 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6428 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6429 &tcctx->cb, NULL);
6430 new_fields = new_f;
6431 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6433 TYPE_FIELDS (type) = nreverse (new_fields);
6434 layout_type (type);
6435 return type;
6438 /* Create task copyfn. */
6440 static void
6441 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6443 struct function *child_cfun;
6444 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6445 tree record_type, srecord_type, bind, list;
6446 bool record_needs_remap = false, srecord_needs_remap = false;
6447 splay_tree_node n;
6448 struct omp_taskcopy_context tcctx;
6449 struct gimplify_ctx gctx;
6450 location_t loc = gimple_location (task_stmt);
6452 child_fn = gimple_omp_task_copy_fn (task_stmt);
6453 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6454 gcc_assert (child_cfun->cfg == NULL);
6455 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6457 /* Reset DECL_CONTEXT on function arguments. */
6458 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6459 DECL_CONTEXT (t) = child_fn;
6461 /* Populate the function. */
6462 push_gimplify_context (&gctx);
6463 current_function_decl = child_fn;
6465 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6466 TREE_SIDE_EFFECTS (bind) = 1;
6467 list = NULL;
6468 DECL_SAVED_TREE (child_fn) = bind;
6469 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6471 /* Remap src and dst argument types if needed. */
6472 record_type = ctx->record_type;
6473 srecord_type = ctx->srecord_type;
6474 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6475 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6477 record_needs_remap = true;
6478 break;
6480 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6481 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6483 srecord_needs_remap = true;
6484 break;
6487 if (record_needs_remap || srecord_needs_remap)
6489 memset (&tcctx, '\0', sizeof (tcctx));
6490 tcctx.cb.src_fn = ctx->cb.src_fn;
6491 tcctx.cb.dst_fn = child_fn;
6492 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6493 gcc_checking_assert (tcctx.cb.src_node);
6494 tcctx.cb.dst_node = tcctx.cb.src_node;
6495 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6496 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6497 tcctx.cb.eh_lp_nr = 0;
6498 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6499 tcctx.cb.decl_map = pointer_map_create ();
6500 tcctx.ctx = ctx;
6502 if (record_needs_remap)
6503 record_type = task_copyfn_remap_type (&tcctx, record_type);
6504 if (srecord_needs_remap)
6505 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6507 else
6508 tcctx.cb.decl_map = NULL;
6510 push_cfun (child_cfun);
6512 arg = DECL_ARGUMENTS (child_fn);
6513 TREE_TYPE (arg) = build_pointer_type (record_type);
6514 sarg = DECL_CHAIN (arg);
6515 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6517 /* First pass: initialize temporaries used in record_type and srecord_type
6518 sizes and field offsets. */
6519 if (tcctx.cb.decl_map)
6520 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6521 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6523 tree *p;
6525 decl = OMP_CLAUSE_DECL (c);
6526 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6527 if (p == NULL)
6528 continue;
6529 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6530 sf = (tree) n->value;
6531 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6532 src = build_simple_mem_ref_loc (loc, sarg);
6533 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6534 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6535 append_to_statement_list (t, &list);
6538 /* Second pass: copy shared var pointers and copy construct non-VLA
6539 firstprivate vars. */
6540 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6541 switch (OMP_CLAUSE_CODE (c))
6543 case OMP_CLAUSE_SHARED:
6544 decl = OMP_CLAUSE_DECL (c);
6545 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6546 if (n == NULL)
6547 break;
6548 f = (tree) n->value;
6549 if (tcctx.cb.decl_map)
6550 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6551 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6552 sf = (tree) n->value;
6553 if (tcctx.cb.decl_map)
6554 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6555 src = build_simple_mem_ref_loc (loc, sarg);
6556 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6557 dst = build_simple_mem_ref_loc (loc, arg);
6558 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6559 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6560 append_to_statement_list (t, &list);
6561 break;
6562 case OMP_CLAUSE_FIRSTPRIVATE:
6563 decl = OMP_CLAUSE_DECL (c);
6564 if (is_variable_sized (decl))
6565 break;
6566 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6567 if (n == NULL)
6568 break;
6569 f = (tree) n->value;
6570 if (tcctx.cb.decl_map)
6571 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6572 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6573 if (n != NULL)
6575 sf = (tree) n->value;
6576 if (tcctx.cb.decl_map)
6577 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6578 src = build_simple_mem_ref_loc (loc, sarg);
6579 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6580 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6581 src = build_simple_mem_ref_loc (loc, src);
6583 else
6584 src = decl;
6585 dst = build_simple_mem_ref_loc (loc, arg);
6586 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6587 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6588 append_to_statement_list (t, &list);
6589 break;
6590 case OMP_CLAUSE_PRIVATE:
6591 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6592 break;
6593 decl = OMP_CLAUSE_DECL (c);
6594 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6595 f = (tree) n->value;
6596 if (tcctx.cb.decl_map)
6597 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6598 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6599 if (n != NULL)
6601 sf = (tree) n->value;
6602 if (tcctx.cb.decl_map)
6603 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6604 src = build_simple_mem_ref_loc (loc, sarg);
6605 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6606 if (use_pointer_for_field (decl, NULL))
6607 src = build_simple_mem_ref_loc (loc, src);
6609 else
6610 src = decl;
6611 dst = build_simple_mem_ref_loc (loc, arg);
6612 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6613 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6614 append_to_statement_list (t, &list);
6615 break;
6616 default:
6617 break;
6620 /* Last pass: handle VLA firstprivates. */
6621 if (tcctx.cb.decl_map)
6622 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6623 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6625 tree ind, ptr, df;
6627 decl = OMP_CLAUSE_DECL (c);
6628 if (!is_variable_sized (decl))
6629 continue;
6630 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6631 if (n == NULL)
6632 continue;
6633 f = (tree) n->value;
6634 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6635 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6636 ind = DECL_VALUE_EXPR (decl);
6637 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6638 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6639 n = splay_tree_lookup (ctx->sfield_map,
6640 (splay_tree_key) TREE_OPERAND (ind, 0));
6641 sf = (tree) n->value;
6642 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6643 src = build_simple_mem_ref_loc (loc, sarg);
6644 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6645 src = build_simple_mem_ref_loc (loc, src);
6646 dst = build_simple_mem_ref_loc (loc, arg);
6647 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6648 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6649 append_to_statement_list (t, &list);
6650 n = splay_tree_lookup (ctx->field_map,
6651 (splay_tree_key) TREE_OPERAND (ind, 0));
6652 df = (tree) n->value;
6653 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6654 ptr = build_simple_mem_ref_loc (loc, arg);
6655 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6656 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6657 build_fold_addr_expr_loc (loc, dst));
6658 append_to_statement_list (t, &list);
6661 t = build1 (RETURN_EXPR, void_type_node, NULL);
6662 append_to_statement_list (t, &list);
6664 if (tcctx.cb.decl_map)
6665 pointer_map_destroy (tcctx.cb.decl_map);
6666 pop_gimplify_context (NULL);
6667 BIND_EXPR_BODY (bind) = list;
6668 pop_cfun ();
6669 current_function_decl = ctx->cb.src_fn;
6672 /* Lower the OpenMP parallel or task directive in the current statement
6673 in GSI_P. CTX holds context information for the directive. */
6675 static void
6676 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6678 tree clauses;
6679 tree child_fn, t;
6680 gimple stmt = gsi_stmt (*gsi_p);
6681 gimple par_bind, bind;
6682 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6683 struct gimplify_ctx gctx;
6684 location_t loc = gimple_location (stmt);
6686 clauses = gimple_omp_taskreg_clauses (stmt);
6687 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6688 par_body = gimple_bind_body (par_bind);
6689 child_fn = ctx->cb.dst_fn;
6690 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6691 && !gimple_omp_parallel_combined_p (stmt))
6693 struct walk_stmt_info wi;
6694 int ws_num = 0;
6696 memset (&wi, 0, sizeof (wi));
6697 wi.info = &ws_num;
6698 wi.val_only = true;
6699 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6700 if (ws_num == 1)
6701 gimple_omp_parallel_set_combined_p (stmt, true);
6703 if (ctx->srecord_type)
6704 create_task_copyfn (stmt, ctx);
6706 push_gimplify_context (&gctx);
6708 par_olist = NULL;
6709 par_ilist = NULL;
6710 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6711 lower_omp (par_body, ctx);
6712 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6713 lower_reduction_clauses (clauses, &par_olist, ctx);
6715 /* Declare all the variables created by mapping and the variables
6716 declared in the scope of the parallel body. */
6717 record_vars_into (ctx->block_vars, child_fn);
6718 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6720 if (ctx->record_type)
6722 ctx->sender_decl
6723 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6724 : ctx->record_type, ".omp_data_o");
6725 DECL_NAMELESS (ctx->sender_decl) = 1;
6726 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6727 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6730 olist = NULL;
6731 ilist = NULL;
6732 lower_send_clauses (clauses, &ilist, &olist, ctx);
6733 lower_send_shared_vars (&ilist, &olist, ctx);
6735 /* Once all the expansions are done, sequence all the different
6736 fragments inside gimple_omp_body. */
6738 new_body = NULL;
6740 if (ctx->record_type)
6742 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6743 /* fixup_child_record_type might have changed receiver_decl's type. */
6744 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6745 gimple_seq_add_stmt (&new_body,
6746 gimple_build_assign (ctx->receiver_decl, t));
6749 gimple_seq_add_seq (&new_body, par_ilist);
6750 gimple_seq_add_seq (&new_body, par_body);
6751 gimple_seq_add_seq (&new_body, par_olist);
6752 new_body = maybe_catch_exception (new_body);
6753 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6754 gimple_omp_set_body (stmt, new_body);
6756 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6757 gimple_bind_add_stmt (bind, stmt);
6758 if (ilist || olist)
6760 gimple_seq_add_stmt (&ilist, bind);
6761 gimple_seq_add_seq (&ilist, olist);
6762 bind = gimple_build_bind (NULL, ilist, NULL);
6765 gsi_replace (gsi_p, bind, true);
6767 pop_gimplify_context (NULL);
6770 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6771 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6772 of OpenMP context, but with task_shared_vars set. */
6774 static tree
6775 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6776 void *data)
6778 tree t = *tp;
6780 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6781 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6782 return t;
6784 if (task_shared_vars
6785 && DECL_P (t)
6786 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6787 return t;
6789 /* If a global variable has been privatized, TREE_CONSTANT on
6790 ADDR_EXPR might be wrong. */
6791 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6792 recompute_tree_invariant_for_addr_expr (t);
6794 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6795 return NULL_TREE;
6798 static void
6799 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6801 gimple stmt = gsi_stmt (*gsi_p);
6802 struct walk_stmt_info wi;
6804 if (gimple_has_location (stmt))
6805 input_location = gimple_location (stmt);
6807 if (task_shared_vars)
6808 memset (&wi, '\0', sizeof (wi));
6810 /* If we have issued syntax errors, avoid doing any heavy lifting.
6811 Just replace the OpenMP directives with a NOP to avoid
6812 confusing RTL expansion. */
6813 if (seen_error () && is_gimple_omp (stmt))
6815 gsi_replace (gsi_p, gimple_build_nop (), true);
6816 return;
6819 switch (gimple_code (stmt))
6821 case GIMPLE_COND:
6822 if ((ctx || task_shared_vars)
6823 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6824 ctx ? NULL : &wi, NULL)
6825 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6826 ctx ? NULL : &wi, NULL)))
6827 gimple_regimplify_operands (stmt, gsi_p);
6828 break;
6829 case GIMPLE_CATCH:
6830 lower_omp (gimple_catch_handler (stmt), ctx);
6831 break;
6832 case GIMPLE_EH_FILTER:
6833 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6834 break;
6835 case GIMPLE_TRY:
6836 lower_omp (gimple_try_eval (stmt), ctx);
6837 lower_omp (gimple_try_cleanup (stmt), ctx);
6838 break;
6839 case GIMPLE_BIND:
6840 lower_omp (gimple_bind_body (stmt), ctx);
6841 break;
6842 case GIMPLE_OMP_PARALLEL:
6843 case GIMPLE_OMP_TASK:
6844 ctx = maybe_lookup_ctx (stmt);
6845 lower_omp_taskreg (gsi_p, ctx);
6846 break;
6847 case GIMPLE_OMP_FOR:
6848 ctx = maybe_lookup_ctx (stmt);
6849 gcc_assert (ctx);
6850 lower_omp_for (gsi_p, ctx);
6851 break;
6852 case GIMPLE_OMP_SECTIONS:
6853 ctx = maybe_lookup_ctx (stmt);
6854 gcc_assert (ctx);
6855 lower_omp_sections (gsi_p, ctx);
6856 break;
6857 case GIMPLE_OMP_SINGLE:
6858 ctx = maybe_lookup_ctx (stmt);
6859 gcc_assert (ctx);
6860 lower_omp_single (gsi_p, ctx);
6861 break;
6862 case GIMPLE_OMP_MASTER:
6863 ctx = maybe_lookup_ctx (stmt);
6864 gcc_assert (ctx);
6865 lower_omp_master (gsi_p, ctx);
6866 break;
6867 case GIMPLE_OMP_ORDERED:
6868 ctx = maybe_lookup_ctx (stmt);
6869 gcc_assert (ctx);
6870 lower_omp_ordered (gsi_p, ctx);
6871 break;
6872 case GIMPLE_OMP_CRITICAL:
6873 ctx = maybe_lookup_ctx (stmt);
6874 gcc_assert (ctx);
6875 lower_omp_critical (gsi_p, ctx);
6876 break;
6877 case GIMPLE_OMP_ATOMIC_LOAD:
6878 if ((ctx || task_shared_vars)
6879 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6880 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6881 gimple_regimplify_operands (stmt, gsi_p);
6882 break;
6883 default:
6884 if ((ctx || task_shared_vars)
6885 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6886 ctx ? NULL : &wi))
6887 gimple_regimplify_operands (stmt, gsi_p);
6888 break;
6892 static void
6893 lower_omp (gimple_seq body, omp_context *ctx)
6895 location_t saved_location = input_location;
6896 gimple_stmt_iterator gsi = gsi_start (body);
6897 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6898 lower_omp_1 (&gsi, ctx);
6899 input_location = saved_location;
6902 /* Main entry point. */
6904 static unsigned int
6905 execute_lower_omp (void)
6907 gimple_seq body;
6909 /* This pass always runs, to provide PROP_gimple_lomp.
6910 But there is nothing to do unless -fopenmp is given. */
6911 if (flag_openmp == 0)
6912 return 0;
6914 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6915 delete_omp_context);
6917 body = gimple_body (current_function_decl);
6918 scan_omp (body, NULL);
6919 gcc_assert (taskreg_nesting_level == 0);
6921 if (all_contexts->root)
6923 struct gimplify_ctx gctx;
6925 if (task_shared_vars)
6926 push_gimplify_context (&gctx);
6927 lower_omp (body, NULL);
6928 if (task_shared_vars)
6929 pop_gimplify_context (NULL);
6932 if (all_contexts)
6934 splay_tree_delete (all_contexts);
6935 all_contexts = NULL;
6937 BITMAP_FREE (task_shared_vars);
6938 return 0;
6941 struct gimple_opt_pass pass_lower_omp =
6944 GIMPLE_PASS,
6945 "omplower", /* name */
6946 NULL, /* gate */
6947 execute_lower_omp, /* execute */
6948 NULL, /* sub */
6949 NULL, /* next */
6950 0, /* static_pass_number */
6951 TV_NONE, /* tv_id */
6952 PROP_gimple_any, /* properties_required */
6953 PROP_gimple_lomp, /* properties_provided */
6954 0, /* properties_destroyed */
6955 0, /* todo_flags_start */
6956 0 /* todo_flags_finish */
6960 /* The following is a utility to diagnose OpenMP structured block violations.
6961 It is not part of the "omplower" pass, as that's invoked too late. It
6962 should be invoked by the respective front ends after gimplification. */
6964 static splay_tree all_labels;
6966 /* Check for mismatched contexts and generate an error if needed. Return
6967 true if an error is detected. */
6969 static bool
6970 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6971 gimple branch_ctx, gimple label_ctx)
6973 if (label_ctx == branch_ctx)
6974 return false;
6978 Previously we kept track of the label's entire context in diagnose_sb_[12]
6979 so we could traverse it and issue a correct "exit" or "enter" error
6980 message upon a structured block violation.
6982 We built the context by building a list with tree_cons'ing, but there is
6983 no easy counterpart in gimple tuples. It seems like far too much work
6984 for issuing exit/enter error messages. If someone really misses the
6985 distinct error message... patches welcome.
6988 #if 0
6989 /* Try to avoid confusing the user by producing and error message
6990 with correct "exit" or "enter" verbiage. We prefer "exit"
6991 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6992 if (branch_ctx == NULL)
6993 exit_p = false;
6994 else
6996 while (label_ctx)
6998 if (TREE_VALUE (label_ctx) == branch_ctx)
7000 exit_p = false;
7001 break;
7003 label_ctx = TREE_CHAIN (label_ctx);
7007 if (exit_p)
7008 error ("invalid exit from OpenMP structured block");
7009 else
7010 error ("invalid entry to OpenMP structured block");
7011 #endif
7013 /* If it's obvious we have an invalid entry, be specific about the error. */
7014 if (branch_ctx == NULL)
7015 error ("invalid entry to OpenMP structured block");
7016 else
7017 /* Otherwise, be vague and lazy, but efficient. */
7018 error ("invalid branch to/from an OpenMP structured block");
7020 gsi_replace (gsi_p, gimple_build_nop (), false);
7021 return true;
7024 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7025 where each label is found. */
7027 static tree
7028 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7029 struct walk_stmt_info *wi)
7031 gimple context = (gimple) wi->info;
7032 gimple inner_context;
7033 gimple stmt = gsi_stmt (*gsi_p);
7035 *handled_ops_p = true;
7037 switch (gimple_code (stmt))
7039 WALK_SUBSTMTS;
7041 case GIMPLE_OMP_PARALLEL:
7042 case GIMPLE_OMP_TASK:
7043 case GIMPLE_OMP_SECTIONS:
7044 case GIMPLE_OMP_SINGLE:
7045 case GIMPLE_OMP_SECTION:
7046 case GIMPLE_OMP_MASTER:
7047 case GIMPLE_OMP_ORDERED:
7048 case GIMPLE_OMP_CRITICAL:
7049 /* The minimal context here is just the current OMP construct. */
7050 inner_context = stmt;
7051 wi->info = inner_context;
7052 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7053 wi->info = context;
7054 break;
7056 case GIMPLE_OMP_FOR:
7057 inner_context = stmt;
7058 wi->info = inner_context;
7059 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7060 walk them. */
7061 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7062 diagnose_sb_1, NULL, wi);
7063 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7064 wi->info = context;
7065 break;
7067 case GIMPLE_LABEL:
7068 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7069 (splay_tree_value) context);
7070 break;
7072 default:
7073 break;
7076 return NULL_TREE;
7079 /* Pass 2: Check each branch and see if its context differs from that of
7080 the destination label's context. */
7082 static tree
7083 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7084 struct walk_stmt_info *wi)
7086 gimple context = (gimple) wi->info;
7087 splay_tree_node n;
7088 gimple stmt = gsi_stmt (*gsi_p);
7090 *handled_ops_p = true;
7092 switch (gimple_code (stmt))
7094 WALK_SUBSTMTS;
7096 case GIMPLE_OMP_PARALLEL:
7097 case GIMPLE_OMP_TASK:
7098 case GIMPLE_OMP_SECTIONS:
7099 case GIMPLE_OMP_SINGLE:
7100 case GIMPLE_OMP_SECTION:
7101 case GIMPLE_OMP_MASTER:
7102 case GIMPLE_OMP_ORDERED:
7103 case GIMPLE_OMP_CRITICAL:
7104 wi->info = stmt;
7105 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7106 wi->info = context;
7107 break;
7109 case GIMPLE_OMP_FOR:
7110 wi->info = stmt;
7111 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7112 walk them. */
7113 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7114 diagnose_sb_2, NULL, wi);
7115 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7116 wi->info = context;
7117 break;
7119 case GIMPLE_COND:
7121 tree lab = gimple_cond_true_label (stmt);
7122 if (lab)
7124 n = splay_tree_lookup (all_labels,
7125 (splay_tree_key) lab);
7126 diagnose_sb_0 (gsi_p, context,
7127 n ? (gimple) n->value : NULL);
7129 lab = gimple_cond_false_label (stmt);
7130 if (lab)
7132 n = splay_tree_lookup (all_labels,
7133 (splay_tree_key) lab);
7134 diagnose_sb_0 (gsi_p, context,
7135 n ? (gimple) n->value : NULL);
7138 break;
7140 case GIMPLE_GOTO:
7142 tree lab = gimple_goto_dest (stmt);
7143 if (TREE_CODE (lab) != LABEL_DECL)
7144 break;
7146 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7147 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7149 break;
7151 case GIMPLE_SWITCH:
7153 unsigned int i;
7154 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7156 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7157 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7158 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7159 break;
7162 break;
7164 case GIMPLE_RETURN:
7165 diagnose_sb_0 (gsi_p, context, NULL);
7166 break;
7168 default:
7169 break;
7172 return NULL_TREE;
7175 static unsigned int
7176 diagnose_omp_structured_block_errors (void)
7178 struct walk_stmt_info wi;
7179 gimple_seq body = gimple_body (current_function_decl);
7181 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7183 memset (&wi, 0, sizeof (wi));
7184 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7186 memset (&wi, 0, sizeof (wi));
7187 wi.want_locations = true;
7188 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7190 splay_tree_delete (all_labels);
7191 all_labels = NULL;
7193 return 0;
7196 static bool
7197 gate_diagnose_omp_blocks (void)
7199 return flag_openmp != 0;
7202 struct gimple_opt_pass pass_diagnose_omp_blocks =
7205 GIMPLE_PASS,
7206 "*diagnose_omp_blocks", /* name */
7207 gate_diagnose_omp_blocks, /* gate */
7208 diagnose_omp_structured_block_errors, /* execute */
7209 NULL, /* sub */
7210 NULL, /* next */
7211 0, /* static_pass_number */
7212 TV_NONE, /* tv_id */
7213 PROP_gimple_any, /* properties_required */
7214 0, /* properties_provided */
7215 0, /* properties_destroyed */
7216 0, /* todo_flags_start */
7217 0, /* todo_flags_finish */
7221 #include "gt-omp-low.h"