HAMMER VFS - REDO implementation base code part 3/many (addendum)
[dragonfly.git] / contrib / gcc-4.4 / gcc / omp-low.c
blob9843e5e7cdc68a1d03bec7cdffcd669cc9babc01
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 default:
260 break;
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
349 if (iter_type != long_long_unsigned_type_node)
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
357 tree n;
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
371 tree n1, n2;
373 if (loop->cond_code == LT_EXPR)
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
379 else
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
393 if (collapse_count && *collapse_count == NULL)
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
401 tree itype = TREE_TYPE (loop->v);
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
431 else
432 count = NULL_TREE;
436 if (count)
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
456 if (fd->collapse > 1)
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
481 Is lowered into:
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
523 extract_omp_for_data (ws_stmt, &fd, NULL);
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
541 return true;
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
549 static tree
550 get_ws_args_for (gimple ws_stmt)
552 tree t;
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
556 struct omp_for_data fd;
557 tree ws_args;
559 extract_omp_for_data (ws_stmt, &fd, NULL);
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
577 return ws_args;
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
590 gcc_unreachable ();
594 /* Discover whether REGION is a combined parallel+workshare region. */
596 static void
597 determine_parallel_type (struct omp_region *region)
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
627 gimple ws_stmt = last_stmt (ws_entry_bb);
629 if (region->inner->type == GIMPLE_OMP_FOR)
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
659 /* Return true if EXPR is variable sized. */
661 static inline bool
662 is_variable_sized (const_tree expr)
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
667 /* Return true if DECL is a reference type. */
669 static inline bool
670 is_reference (tree decl)
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
760 omp_context *up;
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
766 if (up)
768 tree c;
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
776 if (c)
777 return true;
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
798 return true;
802 return false;
805 /* Create a new VAR_DECL and copy information from VAR to it. */
807 tree
808 copy_var_decl (tree var, tree name, tree type)
810 tree copy = build_decl (VAR_DECL, name, type);
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
823 return copy;
826 /* Construct a new automatic decl similar to VAR. */
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
831 tree copy = copy_var_decl (var, name, type);
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
837 return copy;
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
846 /* Build tree nodes to access the field for VAR on the receiver side. */
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
851 tree x, field = lookup_field (var, ctx);
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
864 return x;
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
874 tree x;
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
884 else if (is_taskreg_ctx (ctx))
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
901 return x;
904 /* Build tree nodes to access the field for VAR on the sender side. */
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
919 tree field, type, sfield = NULL_TREE;
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
947 if ((mask & 3) == 3)
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
960 else
962 if (ctx->srecord_type == NULL_TREE)
964 tree t;
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
991 static tree
992 install_var_local (tree var, omp_context *ctx)
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1005 tree new_decl, size;
1007 new_decl = lookup_decl (decl, ctx);
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1045 if (TREE_CODE (var) == LABEL_DECL)
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1053 while (!is_taskreg_ctx (ctx))
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1066 return error_mark_node;
1070 /* Return the parallel region associated with STMT. */
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1077 /* Dump the parallel region tree rooted at REGION. */
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1088 if (region->cont)
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1104 void
1105 debug_omp_region (struct omp_region *region)
1107 dump_omp_region (stderr, region, 0);
1110 void
1111 debug_all_omp_regions (void)
1113 dump_omp_region (stderr, root_omp_region, 0);
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1123 struct omp_region *region = XCNEW (struct omp_region);
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1129 if (parent)
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1136 else
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1144 return region;
1147 /* Release the memory associated with the region tree rooted at REGION. */
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1152 struct omp_region *i, *n;
1154 for (i = region->inner; i ; i = n)
1156 n = i->next;
1157 free_omp_region_1 (i);
1160 free (region);
1163 /* Release the memory for the entire omp region tree. */
1165 void
1166 free_omp_regions (void)
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1171 n = r->next;
1172 free_omp_region_1 (r);
1174 root_omp_region = NULL;
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1183 omp_context *ctx = XCNEW (omp_context);
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1189 if (outer_ctx)
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1196 else
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1209 ctx->cb.decl_map = pointer_map_create ();
1211 return ctx;
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1216 /* Finalize task copyfn. */
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1253 cgraph_add_new_function (child_fn, false);
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1259 static void
1260 delete_omp_context (splay_tree_value value)
1262 omp_context *ctx = (omp_context *) value;
1264 pointer_map_destroy (ctx->cb.decl_map);
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1279 if (ctx->srecord_type)
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1289 XDELETE (ctx);
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1298 tree f, type = ctx->record_type;
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1309 tree name, new_fields = NULL;
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1352 bool by_ref;
1354 switch (OMP_CLAUSE_CODE (c))
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1404 else if (is_taskreg_ctx (ctx))
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1420 install_var_local (decl, ctx);
1421 break;
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1451 default:
1452 gcc_unreachable ();
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1458 switch (OMP_CLAUSE_CODE (c))
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1501 default:
1502 gcc_unreachable ();
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1519 /* Create a new name for omp child function. Returns an identifier. */
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1550 tree decl, type, name, t;
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1599 DECL_ARGUMENTS (decl) = t;
1602 /* Allocate memory for the function structure. The call to
1603 allocate_struct_function clobbers CFUN, so we need to restore
1604 it afterward. */
1605 push_struct_function (decl);
1606 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1607 cfun->function_end_locus = gimple_location (ctx->stmt);
1608 pop_cfun ();
1612 /* Scan an OpenMP parallel directive. */
1614 static void
1615 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1617 omp_context *ctx;
1618 tree name;
1619 gimple stmt = gsi_stmt (*gsi);
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1623 if (optimize > 0
1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
1628 gsi_replace (gsi, gimple_build_nop (), false);
1629 return;
1632 ctx = new_omp_context (stmt, outer_ctx);
1633 if (taskreg_nesting_level > 1)
1634 ctx->is_nested = true;
1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1638 name = create_tmp_var_name (".omp_data_s");
1639 name = build_decl (TYPE_DECL, name, ctx->record_type);
1640 TYPE_NAME (ctx->record_type) = name;
1641 create_omp_child_function (ctx, false);
1642 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1644 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1645 scan_omp (gimple_omp_body (stmt), ctx);
1647 if (TYPE_FIELDS (ctx->record_type) == NULL)
1648 ctx->record_type = ctx->receiver_decl = NULL;
1649 else
1651 layout_type (ctx->record_type);
1652 fixup_child_record_type (ctx);
1656 /* Scan an OpenMP task directive. */
1658 static void
1659 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1661 omp_context *ctx;
1662 tree name, t;
1663 gimple stmt = gsi_stmt (*gsi);
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
1667 && empty_body_p (gimple_omp_body (stmt)))
1669 gsi_replace (gsi, gimple_build_nop (), false);
1670 return;
1673 ctx = new_omp_context (stmt, outer_ctx);
1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (TYPE_DECL, name, ctx->record_type);
1681 TYPE_NAME (ctx->record_type) = name;
1682 create_omp_child_function (ctx, false);
1683 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687 if (ctx->srecord_type)
1689 name = create_tmp_var_name (".omp_data_a");
1690 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1691 TYPE_NAME (ctx->srecord_type) = name;
1692 create_omp_child_function (ctx, true);
1695 scan_omp (gimple_omp_body (stmt), ctx);
1697 if (TYPE_FIELDS (ctx->record_type) == NULL)
1699 ctx->record_type = ctx->receiver_decl = NULL;
1700 t = build_int_cst (long_integer_type_node, 0);
1701 gimple_omp_task_set_arg_size (stmt, t);
1702 t = build_int_cst (long_integer_type_node, 1);
1703 gimple_omp_task_set_arg_align (stmt, t);
1705 else
1707 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1708 /* Move VLA fields to the end. */
1709 p = &TYPE_FIELDS (ctx->record_type);
1710 while (*p)
1711 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1712 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1714 *q = *p;
1715 *p = TREE_CHAIN (*p);
1716 TREE_CHAIN (*q) = NULL_TREE;
1717 q = &TREE_CHAIN (*q);
1719 else
1720 p = &TREE_CHAIN (*p);
1721 *p = vla_fields;
1722 layout_type (ctx->record_type);
1723 fixup_child_record_type (ctx);
1724 if (ctx->srecord_type)
1725 layout_type (ctx->srecord_type);
1726 t = fold_convert (long_integer_type_node,
1727 TYPE_SIZE_UNIT (ctx->record_type));
1728 gimple_omp_task_set_arg_size (stmt, t);
1729 t = build_int_cst (long_integer_type_node,
1730 TYPE_ALIGN_UNIT (ctx->record_type));
1731 gimple_omp_task_set_arg_align (stmt, t);
1736 /* Scan an OpenMP loop directive. */
1738 static void
1739 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1741 omp_context *ctx;
1742 size_t i;
1744 ctx = new_omp_context (stmt, outer_ctx);
1746 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1748 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1749 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1751 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1752 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1756 scan_omp (gimple_omp_body (stmt), ctx);
1759 /* Scan an OpenMP sections directive. */
1761 static void
1762 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1764 omp_context *ctx;
1766 ctx = new_omp_context (stmt, outer_ctx);
1767 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1768 scan_omp (gimple_omp_body (stmt), ctx);
1771 /* Scan an OpenMP single directive. */
1773 static void
1774 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1776 omp_context *ctx;
1777 tree name;
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1781 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1782 name = create_tmp_var_name (".omp_copy_s");
1783 name = build_decl (TYPE_DECL, name, ctx->record_type);
1784 TYPE_NAME (ctx->record_type) = name;
1786 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1787 scan_omp (gimple_omp_body (stmt), ctx);
1789 if (TYPE_FIELDS (ctx->record_type) == NULL)
1790 ctx->record_type = NULL;
1791 else
1792 layout_type (ctx->record_type);
1796 /* Check OpenMP nesting restrictions. */
1797 static void
1798 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1800 switch (gimple_code (stmt))
1802 case GIMPLE_OMP_FOR:
1803 case GIMPLE_OMP_SECTIONS:
1804 case GIMPLE_OMP_SINGLE:
1805 case GIMPLE_CALL:
1806 for (; ctx != NULL; ctx = ctx->outer)
1807 switch (gimple_code (ctx->stmt))
1809 case GIMPLE_OMP_FOR:
1810 case GIMPLE_OMP_SECTIONS:
1811 case GIMPLE_OMP_SINGLE:
1812 case GIMPLE_OMP_ORDERED:
1813 case GIMPLE_OMP_MASTER:
1814 case GIMPLE_OMP_TASK:
1815 if (is_gimple_call (stmt))
1817 warning (0, "barrier region may not be closely nested inside "
1818 "of work-sharing, critical, ordered, master or "
1819 "explicit task region");
1820 return;
1822 warning (0, "work-sharing region may not be closely nested inside "
1823 "of work-sharing, critical, ordered, master or explicit "
1824 "task region");
1825 return;
1826 case GIMPLE_OMP_PARALLEL:
1827 return;
1828 default:
1829 break;
1831 break;
1832 case GIMPLE_OMP_MASTER:
1833 for (; ctx != NULL; ctx = ctx->outer)
1834 switch (gimple_code (ctx->stmt))
1836 case GIMPLE_OMP_FOR:
1837 case GIMPLE_OMP_SECTIONS:
1838 case GIMPLE_OMP_SINGLE:
1839 case GIMPLE_OMP_TASK:
1840 warning (0, "master region may not be closely nested inside "
1841 "of work-sharing or explicit task region");
1842 return;
1843 case GIMPLE_OMP_PARALLEL:
1844 return;
1845 default:
1846 break;
1848 break;
1849 case GIMPLE_OMP_ORDERED:
1850 for (; ctx != NULL; ctx = ctx->outer)
1851 switch (gimple_code (ctx->stmt))
1853 case GIMPLE_OMP_CRITICAL:
1854 case GIMPLE_OMP_TASK:
1855 warning (0, "ordered region may not be closely nested inside "
1856 "of critical or explicit task region");
1857 return;
1858 case GIMPLE_OMP_FOR:
1859 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1860 OMP_CLAUSE_ORDERED) == NULL)
1861 warning (0, "ordered region must be closely nested inside "
1862 "a loop region with an ordered clause");
1863 return;
1864 case GIMPLE_OMP_PARALLEL:
1865 return;
1866 default:
1867 break;
1869 break;
1870 case GIMPLE_OMP_CRITICAL:
1871 for (; ctx != NULL; ctx = ctx->outer)
1872 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1873 && (gimple_omp_critical_name (stmt)
1874 == gimple_omp_critical_name (ctx->stmt)))
1876 warning (0, "critical region may not be nested inside a critical "
1877 "region with the same name");
1878 return;
1880 break;
1881 default:
1882 break;
1887 /* Helper function scan_omp.
1889 Callback for walk_tree or operators in walk_gimple_stmt used to
1890 scan for OpenMP directives in TP. */
1892 static tree
1893 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1895 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1896 omp_context *ctx = (omp_context *) wi->info;
1897 tree t = *tp;
1899 switch (TREE_CODE (t))
1901 case VAR_DECL:
1902 case PARM_DECL:
1903 case LABEL_DECL:
1904 case RESULT_DECL:
1905 if (ctx)
1906 *tp = remap_decl (t, &ctx->cb);
1907 break;
1909 default:
1910 if (ctx && TYPE_P (t))
1911 *tp = remap_type (t, &ctx->cb);
1912 else if (!DECL_P (t))
1913 *walk_subtrees = 1;
1914 break;
1917 return NULL_TREE;
1921 /* Helper function for scan_omp.
1923 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1924 the current statement in GSI. */
1926 static tree
1927 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1928 struct walk_stmt_info *wi)
1930 gimple stmt = gsi_stmt (*gsi);
1931 omp_context *ctx = (omp_context *) wi->info;
1933 if (gimple_has_location (stmt))
1934 input_location = gimple_location (stmt);
1936 /* Check the OpenMP nesting restrictions. */
1937 if (ctx != NULL)
1939 if (is_gimple_omp (stmt))
1940 check_omp_nesting_restrictions (stmt, ctx);
1941 else if (is_gimple_call (stmt))
1943 tree fndecl = gimple_call_fndecl (stmt);
1944 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1945 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1946 check_omp_nesting_restrictions (stmt, ctx);
1950 *handled_ops_p = true;
1952 switch (gimple_code (stmt))
1954 case GIMPLE_OMP_PARALLEL:
1955 taskreg_nesting_level++;
1956 scan_omp_parallel (gsi, ctx);
1957 taskreg_nesting_level--;
1958 break;
1960 case GIMPLE_OMP_TASK:
1961 taskreg_nesting_level++;
1962 scan_omp_task (gsi, ctx);
1963 taskreg_nesting_level--;
1964 break;
1966 case GIMPLE_OMP_FOR:
1967 scan_omp_for (stmt, ctx);
1968 break;
1970 case GIMPLE_OMP_SECTIONS:
1971 scan_omp_sections (stmt, ctx);
1972 break;
1974 case GIMPLE_OMP_SINGLE:
1975 scan_omp_single (stmt, ctx);
1976 break;
1978 case GIMPLE_OMP_SECTION:
1979 case GIMPLE_OMP_MASTER:
1980 case GIMPLE_OMP_ORDERED:
1981 case GIMPLE_OMP_CRITICAL:
1982 ctx = new_omp_context (stmt, ctx);
1983 scan_omp (gimple_omp_body (stmt), ctx);
1984 break;
1986 case GIMPLE_BIND:
1988 tree var;
1990 *handled_ops_p = false;
1991 if (ctx)
1992 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1993 insert_decl_map (&ctx->cb, var, var);
1995 break;
1996 default:
1997 *handled_ops_p = false;
1998 break;
2001 return NULL_TREE;
2005 /* Scan all the statements starting at the current statement. CTX
2006 contains context information about the OpenMP directives and
2007 clauses found during the scan. */
2009 static void
2010 scan_omp (gimple_seq body, omp_context *ctx)
2012 location_t saved_location;
2013 struct walk_stmt_info wi;
2015 memset (&wi, 0, sizeof (wi));
2016 wi.info = ctx;
2017 wi.want_locations = true;
2019 saved_location = input_location;
2020 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2021 input_location = saved_location;
2024 /* Re-gimplification and code generation routines. */
2026 /* Build a call to GOMP_barrier. */
2028 static tree
2029 build_omp_barrier (void)
2031 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2034 /* If a context was created for STMT when it was scanned, return it. */
2036 static omp_context *
2037 maybe_lookup_ctx (gimple stmt)
2039 splay_tree_node n;
2040 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2041 return n ? (omp_context *) n->value : NULL;
2045 /* Find the mapping for DECL in CTX or the immediately enclosing
2046 context that has a mapping for DECL.
2048 If CTX is a nested parallel directive, we may have to use the decl
2049 mappings created in CTX's parent context. Suppose that we have the
2050 following parallel nesting (variable UIDs showed for clarity):
2052 iD.1562 = 0;
2053 #omp parallel shared(iD.1562) -> outer parallel
2054 iD.1562 = iD.1562 + 1;
2056 #omp parallel shared (iD.1562) -> inner parallel
2057 iD.1562 = iD.1562 - 1;
2059 Each parallel structure will create a distinct .omp_data_s structure
2060 for copying iD.1562 in/out of the directive:
2062 outer parallel .omp_data_s.1.i -> iD.1562
2063 inner parallel .omp_data_s.2.i -> iD.1562
2065 A shared variable mapping will produce a copy-out operation before
2066 the parallel directive and a copy-in operation after it. So, in
2067 this case we would have:
2069 iD.1562 = 0;
2070 .omp_data_o.1.i = iD.1562;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 .omp_data_i.1 = &.omp_data_o.1
2073 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2075 .omp_data_o.2.i = iD.1562; -> **
2076 #omp parallel shared(iD.1562) -> inner parallel
2077 .omp_data_i.2 = &.omp_data_o.2
2078 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2081 ** This is a problem. The symbol iD.1562 cannot be referenced
2082 inside the body of the outer parallel region. But since we are
2083 emitting this copy operation while expanding the inner parallel
2084 directive, we need to access the CTX structure of the outer
2085 parallel directive to get the correct mapping:
2087 .omp_data_o.2.i = .omp_data_i.1->i
2089 Since there may be other workshare or parallel directives enclosing
2090 the parallel directive, it may be necessary to walk up the context
2091 parent chain. This is not a problem in general because nested
2092 parallelism happens only rarely. */
2094 static tree
2095 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2097 tree t;
2098 omp_context *up;
2100 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2101 t = maybe_lookup_decl (decl, up);
2103 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2105 return t ? t : decl;
2109 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2110 in outer contexts. */
2112 static tree
2113 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2115 tree t = NULL;
2116 omp_context *up;
2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
2121 return t ? t : decl;
2125 /* Construct the initialization value for reduction CLAUSE. */
2127 tree
2128 omp_reduction_init (tree clause, tree type)
2130 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2132 case PLUS_EXPR:
2133 case MINUS_EXPR:
2134 case BIT_IOR_EXPR:
2135 case BIT_XOR_EXPR:
2136 case TRUTH_OR_EXPR:
2137 case TRUTH_ORIF_EXPR:
2138 case TRUTH_XOR_EXPR:
2139 case NE_EXPR:
2140 return fold_convert (type, integer_zero_node);
2142 case MULT_EXPR:
2143 case TRUTH_AND_EXPR:
2144 case TRUTH_ANDIF_EXPR:
2145 case EQ_EXPR:
2146 return fold_convert (type, integer_one_node);
2148 case BIT_AND_EXPR:
2149 return fold_convert (type, integer_minus_one_node);
2151 case MAX_EXPR:
2152 if (SCALAR_FLOAT_TYPE_P (type))
2154 REAL_VALUE_TYPE max, min;
2155 if (HONOR_INFINITIES (TYPE_MODE (type)))
2157 real_inf (&max);
2158 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2160 else
2161 real_maxval (&min, 1, TYPE_MODE (type));
2162 return build_real (type, min);
2164 else
2166 gcc_assert (INTEGRAL_TYPE_P (type));
2167 return TYPE_MIN_VALUE (type);
2170 case MIN_EXPR:
2171 if (SCALAR_FLOAT_TYPE_P (type))
2173 REAL_VALUE_TYPE max;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2175 real_inf (&max);
2176 else
2177 real_maxval (&max, 0, TYPE_MODE (type));
2178 return build_real (type, max);
2180 else
2182 gcc_assert (INTEGRAL_TYPE_P (type));
2183 return TYPE_MAX_VALUE (type);
2186 default:
2187 gcc_unreachable ();
2191 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2192 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2193 private variables. Initialization statements go in ILIST, while calls
2194 to destructors go in DLIST. */
2196 static void
2197 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2198 omp_context *ctx)
2200 gimple_stmt_iterator diter;
2201 tree c, dtor, copyin_seq, x, ptr;
2202 bool copyin_by_ref = false;
2203 bool lastprivate_firstprivate = false;
2204 int pass;
2206 *dlist = gimple_seq_alloc ();
2207 diter = gsi_start (*dlist);
2208 copyin_seq = NULL;
2210 /* Do all the fixed sized types in the first pass, and the variable sized
2211 types in the second pass. This makes sure that the scalar arguments to
2212 the variable sized types are processed before we use them in the
2213 variable sized operations. */
2214 for (pass = 0; pass < 2; ++pass)
2216 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2218 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2219 tree var, new_var;
2220 bool by_ref;
2222 switch (c_kind)
2224 case OMP_CLAUSE_PRIVATE:
2225 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2226 continue;
2227 break;
2228 case OMP_CLAUSE_SHARED:
2229 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2231 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2232 continue;
2234 case OMP_CLAUSE_FIRSTPRIVATE:
2235 case OMP_CLAUSE_COPYIN:
2236 case OMP_CLAUSE_REDUCTION:
2237 break;
2238 case OMP_CLAUSE_LASTPRIVATE:
2239 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2241 lastprivate_firstprivate = true;
2242 if (pass != 0)
2243 continue;
2245 break;
2246 default:
2247 continue;
2250 new_var = var = OMP_CLAUSE_DECL (c);
2251 if (c_kind != OMP_CLAUSE_COPYIN)
2252 new_var = lookup_decl (var, ctx);
2254 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2256 if (pass != 0)
2257 continue;
2259 else if (is_variable_sized (var))
2261 /* For variable sized types, we need to allocate the
2262 actual storage here. Call alloca and store the
2263 result in the pointer decl that we created elsewhere. */
2264 if (pass == 0)
2265 continue;
2267 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2269 gimple stmt;
2270 tree tmp;
2272 ptr = DECL_VALUE_EXPR (new_var);
2273 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2274 ptr = TREE_OPERAND (ptr, 0);
2275 gcc_assert (DECL_P (ptr));
2276 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2278 /* void *tmp = __builtin_alloca */
2279 stmt
2280 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2281 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2282 gimple_add_tmp_var (tmp);
2283 gimple_call_set_lhs (stmt, tmp);
2285 gimple_seq_add_stmt (ilist, stmt);
2287 x = fold_convert (TREE_TYPE (ptr), tmp);
2288 gimplify_assign (ptr, x, ilist);
2291 else if (is_reference (var))
2293 /* For references that are being privatized for Fortran,
2294 allocate new backing storage for the new pointer
2295 variable. This allows us to avoid changing all the
2296 code that expects a pointer to something that expects
2297 a direct variable. Note that this doesn't apply to
2298 C++, since reference types are disallowed in data
2299 sharing clauses there, except for NRV optimized
2300 return values. */
2301 if (pass == 0)
2302 continue;
2304 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2305 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2307 x = build_receiver_ref (var, false, ctx);
2308 x = build_fold_addr_expr (x);
2310 else if (TREE_CONSTANT (x))
2312 const char *name = NULL;
2313 if (DECL_NAME (var))
2314 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2316 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2317 name);
2318 gimple_add_tmp_var (x);
2319 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2321 else
2323 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2324 x = fold_convert (TREE_TYPE (new_var), x);
2327 gimplify_assign (new_var, x, ilist);
2329 new_var = build_fold_indirect_ref (new_var);
2331 else if (c_kind == OMP_CLAUSE_REDUCTION
2332 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2334 if (pass == 0)
2335 continue;
2337 else if (pass != 0)
2338 continue;
2340 switch (OMP_CLAUSE_CODE (c))
2342 case OMP_CLAUSE_SHARED:
2343 /* Shared global vars are just accessed directly. */
2344 if (is_global_var (new_var))
2345 break;
2346 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2347 needs to be delayed until after fixup_child_record_type so
2348 that we get the correct type during the dereference. */
2349 by_ref = use_pointer_for_field (var, ctx);
2350 x = build_receiver_ref (var, by_ref, ctx);
2351 SET_DECL_VALUE_EXPR (new_var, x);
2352 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2354 /* ??? If VAR is not passed by reference, and the variable
2355 hasn't been initialized yet, then we'll get a warning for
2356 the store into the omp_data_s structure. Ideally, we'd be
2357 able to notice this and not store anything at all, but
2358 we're generating code too early. Suppress the warning. */
2359 if (!by_ref)
2360 TREE_NO_WARNING (var) = 1;
2361 break;
2363 case OMP_CLAUSE_LASTPRIVATE:
2364 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2365 break;
2366 /* FALLTHRU */
2368 case OMP_CLAUSE_PRIVATE:
2369 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2370 x = build_outer_var_ref (var, ctx);
2371 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2373 if (is_task_ctx (ctx))
2374 x = build_receiver_ref (var, false, ctx);
2375 else
2376 x = build_outer_var_ref (var, ctx);
2378 else
2379 x = NULL;
2380 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2381 if (x)
2382 gimplify_and_add (x, ilist);
2383 /* FALLTHRU */
2385 do_dtor:
2386 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2387 if (x)
2389 gimple_seq tseq = NULL;
2391 dtor = x;
2392 gimplify_stmt (&dtor, &tseq);
2393 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2395 break;
2397 case OMP_CLAUSE_FIRSTPRIVATE:
2398 if (is_task_ctx (ctx))
2400 if (is_reference (var) || is_variable_sized (var))
2401 goto do_dtor;
2402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2403 ctx))
2404 || use_pointer_for_field (var, NULL))
2406 x = build_receiver_ref (var, false, ctx);
2407 SET_DECL_VALUE_EXPR (new_var, x);
2408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2409 goto do_dtor;
2412 x = build_outer_var_ref (var, ctx);
2413 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2414 gimplify_and_add (x, ilist);
2415 goto do_dtor;
2416 break;
2418 case OMP_CLAUSE_COPYIN:
2419 by_ref = use_pointer_for_field (var, NULL);
2420 x = build_receiver_ref (var, by_ref, ctx);
2421 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2422 append_to_statement_list (x, &copyin_seq);
2423 copyin_by_ref |= by_ref;
2424 break;
2426 case OMP_CLAUSE_REDUCTION:
2427 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2429 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2430 x = build_outer_var_ref (var, ctx);
2432 if (is_reference (var))
2433 x = build_fold_addr_expr (x);
2434 SET_DECL_VALUE_EXPR (placeholder, x);
2435 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2436 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2437 gimple_seq_add_seq (ilist,
2438 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2439 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2440 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2442 else
2444 x = omp_reduction_init (c, TREE_TYPE (new_var));
2445 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2446 gimplify_assign (new_var, x, ilist);
2448 break;
2450 default:
2451 gcc_unreachable ();
2456 /* The copyin sequence is not to be executed by the main thread, since
2457 that would result in self-copies. Perhaps not visible to scalars,
2458 but it certainly is to C++ operator=. */
2459 if (copyin_seq)
2461 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2462 x = build2 (NE_EXPR, boolean_type_node, x,
2463 build_int_cst (TREE_TYPE (x), 0));
2464 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2465 gimplify_and_add (x, ilist);
2468 /* If any copyin variable is passed by reference, we must ensure the
2469 master thread doesn't modify it before it is copied over in all
2470 threads. Similarly for variables in both firstprivate and
2471 lastprivate clauses we need to ensure the lastprivate copying
2472 happens after firstprivate copying in all threads. */
2473 if (copyin_by_ref || lastprivate_firstprivate)
2474 gimplify_and_add (build_omp_barrier (), ilist);
2478 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2479 both parallel and workshare constructs. PREDICATE may be NULL if it's
2480 always true. */
2482 static void
2483 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2484 omp_context *ctx)
2486 tree x, c, label = NULL;
2487 bool par_clauses = false;
2489 /* Early exit if there are no lastprivate clauses. */
2490 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2491 if (clauses == NULL)
2493 /* If this was a workshare clause, see if it had been combined
2494 with its parallel. In that case, look for the clauses on the
2495 parallel statement itself. */
2496 if (is_parallel_ctx (ctx))
2497 return;
2499 ctx = ctx->outer;
2500 if (ctx == NULL || !is_parallel_ctx (ctx))
2501 return;
2503 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2504 OMP_CLAUSE_LASTPRIVATE);
2505 if (clauses == NULL)
2506 return;
2507 par_clauses = true;
2510 if (predicate)
2512 gimple stmt;
2513 tree label_true, arm1, arm2;
2515 label = create_artificial_label ();
2516 label_true = create_artificial_label ();
2517 arm1 = TREE_OPERAND (predicate, 0);
2518 arm2 = TREE_OPERAND (predicate, 1);
2519 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2520 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2521 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2522 label_true, label);
2523 gimple_seq_add_stmt (stmt_list, stmt);
2524 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2527 for (c = clauses; c ;)
2529 tree var, new_var;
2531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2533 var = OMP_CLAUSE_DECL (c);
2534 new_var = lookup_decl (var, ctx);
2536 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2538 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2539 gimple_seq_add_seq (stmt_list,
2540 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2544 x = build_outer_var_ref (var, ctx);
2545 if (is_reference (var))
2546 new_var = build_fold_indirect_ref (new_var);
2547 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2548 gimplify_and_add (x, stmt_list);
2550 c = OMP_CLAUSE_CHAIN (c);
2551 if (c == NULL && !par_clauses)
2553 /* If this was a workshare clause, see if it had been combined
2554 with its parallel. In that case, continue looking for the
2555 clauses also on the parallel statement itself. */
2556 if (is_parallel_ctx (ctx))
2557 break;
2559 ctx = ctx->outer;
2560 if (ctx == NULL || !is_parallel_ctx (ctx))
2561 break;
2563 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2564 OMP_CLAUSE_LASTPRIVATE);
2565 par_clauses = true;
2569 if (label)
2570 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2574 /* Generate code to implement the REDUCTION clauses. */
2576 static void
2577 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2579 gimple_seq sub_seq = NULL;
2580 gimple stmt;
2581 tree x, c;
2582 int count = 0;
2584 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2585 update in that case, otherwise use a lock. */
2586 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2587 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2589 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2591 /* Never use OMP_ATOMIC for array reductions. */
2592 count = -1;
2593 break;
2595 count++;
2598 if (count == 0)
2599 return;
2601 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2603 tree var, ref, new_var;
2604 enum tree_code code;
2606 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2607 continue;
2609 var = OMP_CLAUSE_DECL (c);
2610 new_var = lookup_decl (var, ctx);
2611 if (is_reference (var))
2612 new_var = build_fold_indirect_ref (new_var);
2613 ref = build_outer_var_ref (var, ctx);
2614 code = OMP_CLAUSE_REDUCTION_CODE (c);
2616 /* reduction(-:var) sums up the partial results, so it acts
2617 identically to reduction(+:var). */
2618 if (code == MINUS_EXPR)
2619 code = PLUS_EXPR;
2621 if (count == 1)
2623 tree addr = build_fold_addr_expr (ref);
2625 addr = save_expr (addr);
2626 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2627 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2628 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2629 gimplify_and_add (x, stmt_seqp);
2630 return;
2633 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2635 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2637 if (is_reference (var))
2638 ref = build_fold_addr_expr (ref);
2639 SET_DECL_VALUE_EXPR (placeholder, ref);
2640 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2641 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2642 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2643 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2644 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2646 else
2648 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2649 ref = build_outer_var_ref (var, ctx);
2650 gimplify_assign (ref, x, &sub_seq);
2654 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2655 gimple_seq_add_stmt (stmt_seqp, stmt);
2657 gimple_seq_add_seq (stmt_seqp, sub_seq);
2659 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2660 gimple_seq_add_stmt (stmt_seqp, stmt);
2664 /* Generate code to implement the COPYPRIVATE clauses. */
2666 static void
2667 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2668 omp_context *ctx)
2670 tree c;
2672 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2674 tree var, ref, x;
2675 bool by_ref;
2677 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2678 continue;
2680 var = OMP_CLAUSE_DECL (c);
2681 by_ref = use_pointer_for_field (var, NULL);
2683 ref = build_sender_ref (var, ctx);
2684 x = lookup_decl_in_outer_ctx (var, ctx);
2685 x = by_ref ? build_fold_addr_expr (x) : x;
2686 gimplify_assign (ref, x, slist);
2688 ref = build_receiver_ref (var, by_ref, ctx);
2689 if (is_reference (var))
2691 ref = build_fold_indirect_ref (ref);
2692 var = build_fold_indirect_ref (var);
2694 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2695 gimplify_and_add (x, rlist);
2700 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2701 and REDUCTION from the sender (aka parent) side. */
2703 static void
2704 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2705 omp_context *ctx)
2707 tree c;
2709 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2711 tree val, ref, x, var;
2712 bool by_ref, do_in = false, do_out = false;
2714 switch (OMP_CLAUSE_CODE (c))
2716 case OMP_CLAUSE_PRIVATE:
2717 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2718 break;
2719 continue;
2720 case OMP_CLAUSE_FIRSTPRIVATE:
2721 case OMP_CLAUSE_COPYIN:
2722 case OMP_CLAUSE_LASTPRIVATE:
2723 case OMP_CLAUSE_REDUCTION:
2724 break;
2725 default:
2726 continue;
2729 val = OMP_CLAUSE_DECL (c);
2730 var = lookup_decl_in_outer_ctx (val, ctx);
2732 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2733 && is_global_var (var))
2734 continue;
2735 if (is_variable_sized (val))
2736 continue;
2737 by_ref = use_pointer_for_field (val, NULL);
2739 switch (OMP_CLAUSE_CODE (c))
2741 case OMP_CLAUSE_PRIVATE:
2742 case OMP_CLAUSE_FIRSTPRIVATE:
2743 case OMP_CLAUSE_COPYIN:
2744 do_in = true;
2745 break;
2747 case OMP_CLAUSE_LASTPRIVATE:
2748 if (by_ref || is_reference (val))
2750 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2751 continue;
2752 do_in = true;
2754 else
2756 do_out = true;
2757 if (lang_hooks.decls.omp_private_outer_ref (val))
2758 do_in = true;
2760 break;
2762 case OMP_CLAUSE_REDUCTION:
2763 do_in = true;
2764 do_out = !(by_ref || is_reference (val));
2765 break;
2767 default:
2768 gcc_unreachable ();
2771 if (do_in)
2773 ref = build_sender_ref (val, ctx);
2774 x = by_ref ? build_fold_addr_expr (var) : var;
2775 gimplify_assign (ref, x, ilist);
2776 if (is_task_ctx (ctx))
2777 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2780 if (do_out)
2782 ref = build_sender_ref (val, ctx);
2783 gimplify_assign (var, ref, olist);
2788 /* Generate code to implement SHARED from the sender (aka parent)
2789 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2790 list things that got automatically shared. */
2792 static void
2793 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2795 tree var, ovar, nvar, f, x, record_type;
2797 if (ctx->record_type == NULL)
2798 return;
2800 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2801 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2803 ovar = DECL_ABSTRACT_ORIGIN (f);
2804 nvar = maybe_lookup_decl (ovar, ctx);
2805 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2806 continue;
2808 /* If CTX is a nested parallel directive. Find the immediately
2809 enclosing parallel or workshare construct that contains a
2810 mapping for OVAR. */
2811 var = lookup_decl_in_outer_ctx (ovar, ctx);
2813 if (use_pointer_for_field (ovar, ctx))
2815 x = build_sender_ref (ovar, ctx);
2816 var = build_fold_addr_expr (var);
2817 gimplify_assign (x, var, ilist);
2819 else
2821 x = build_sender_ref (ovar, ctx);
2822 gimplify_assign (x, var, ilist);
2824 if (!TREE_READONLY (var)
2825 /* We don't need to receive a new reference to a result
2826 or parm decl. In fact we may not store to it as we will
2827 invalidate any pending RSO and generate wrong gimple
2828 during inlining. */
2829 && !((TREE_CODE (var) == RESULT_DECL
2830 || TREE_CODE (var) == PARM_DECL)
2831 && DECL_BY_REFERENCE (var)))
2833 x = build_sender_ref (ovar, ctx);
2834 gimplify_assign (var, x, olist);
2841 /* A convenience function to build an empty GIMPLE_COND with just the
2842 condition. */
2844 static gimple
2845 gimple_build_cond_empty (tree cond)
2847 enum tree_code pred_code;
2848 tree lhs, rhs;
2850 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2851 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2855 /* Build the function calls to GOMP_parallel_start etc to actually
2856 generate the parallel operation. REGION is the parallel region
2857 being expanded. BB is the block where to insert the code. WS_ARGS
2858 will be set if this is a call to a combined parallel+workshare
2859 construct, it contains the list of additional arguments needed by
2860 the workshare construct. */
2862 static void
2863 expand_parallel_call (struct omp_region *region, basic_block bb,
2864 gimple entry_stmt, tree ws_args)
2866 tree t, t1, t2, val, cond, c, clauses;
2867 gimple_stmt_iterator gsi;
2868 gimple stmt;
2869 int start_ix;
2871 clauses = gimple_omp_parallel_clauses (entry_stmt);
2873 /* Determine what flavor of GOMP_parallel_start we will be
2874 emitting. */
2875 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2876 if (is_combined_parallel (region))
2878 switch (region->inner->type)
2880 case GIMPLE_OMP_FOR:
2881 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2882 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2883 + (region->inner->sched_kind
2884 == OMP_CLAUSE_SCHEDULE_RUNTIME
2885 ? 3 : region->inner->sched_kind);
2886 break;
2887 case GIMPLE_OMP_SECTIONS:
2888 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2889 break;
2890 default:
2891 gcc_unreachable ();
2895 /* By default, the value of NUM_THREADS is zero (selected at run time)
2896 and there is no conditional. */
2897 cond = NULL_TREE;
2898 val = build_int_cst (unsigned_type_node, 0);
2900 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2901 if (c)
2902 cond = OMP_CLAUSE_IF_EXPR (c);
2904 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2905 if (c)
2906 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2908 /* Ensure 'val' is of the correct type. */
2909 val = fold_convert (unsigned_type_node, val);
2911 /* If we found the clause 'if (cond)', build either
2912 (cond != 0) or (cond ? val : 1u). */
2913 if (cond)
2915 gimple_stmt_iterator gsi;
2917 cond = gimple_boolify (cond);
2919 if (integer_zerop (val))
2920 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2921 build_int_cst (TREE_TYPE (cond), 0));
2922 else
2924 basic_block cond_bb, then_bb, else_bb;
2925 edge e, e_then, e_else;
2926 tree tmp_then, tmp_else, tmp_join, tmp_var;
2928 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2929 if (gimple_in_ssa_p (cfun))
2931 tmp_then = make_ssa_name (tmp_var, NULL);
2932 tmp_else = make_ssa_name (tmp_var, NULL);
2933 tmp_join = make_ssa_name (tmp_var, NULL);
2935 else
2937 tmp_then = tmp_var;
2938 tmp_else = tmp_var;
2939 tmp_join = tmp_var;
2942 e = split_block (bb, NULL);
2943 cond_bb = e->src;
2944 bb = e->dest;
2945 remove_edge (e);
2947 then_bb = create_empty_bb (cond_bb);
2948 else_bb = create_empty_bb (then_bb);
2949 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2950 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2952 stmt = gimple_build_cond_empty (cond);
2953 gsi = gsi_start_bb (cond_bb);
2954 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2956 gsi = gsi_start_bb (then_bb);
2957 stmt = gimple_build_assign (tmp_then, val);
2958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2960 gsi = gsi_start_bb (else_bb);
2961 stmt = gimple_build_assign
2962 (tmp_else, build_int_cst (unsigned_type_node, 1));
2963 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2965 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2966 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2967 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2968 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2970 if (gimple_in_ssa_p (cfun))
2972 gimple phi = create_phi_node (tmp_join, bb);
2973 SSA_NAME_DEF_STMT (tmp_join) = phi;
2974 add_phi_arg (phi, tmp_then, e_then);
2975 add_phi_arg (phi, tmp_else, e_else);
2978 val = tmp_join;
2981 gsi = gsi_start_bb (bb);
2982 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2983 false, GSI_CONTINUE_LINKING);
2986 gsi = gsi_last_bb (bb);
2987 t = gimple_omp_parallel_data_arg (entry_stmt);
2988 if (t == NULL)
2989 t1 = null_pointer_node;
2990 else
2991 t1 = build_fold_addr_expr (t);
2992 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2994 if (ws_args)
2996 tree args = tree_cons (NULL, t2,
2997 tree_cons (NULL, t1,
2998 tree_cons (NULL, val, ws_args)));
2999 t = build_function_call_expr (built_in_decls[start_ix], args);
3001 else
3002 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3004 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3005 false, GSI_CONTINUE_LINKING);
3007 t = gimple_omp_parallel_data_arg (entry_stmt);
3008 if (t == NULL)
3009 t = null_pointer_node;
3010 else
3011 t = build_fold_addr_expr (t);
3012 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3013 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3014 false, GSI_CONTINUE_LINKING);
3016 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3017 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3018 false, GSI_CONTINUE_LINKING);
3022 /* Build the function call to GOMP_task to actually
3023 generate the task operation. BB is the block where to insert the code. */
3025 static void
3026 expand_task_call (basic_block bb, gimple entry_stmt)
3028 tree t, t1, t2, t3, flags, cond, c, clauses;
3029 gimple_stmt_iterator gsi;
3031 clauses = gimple_omp_task_clauses (entry_stmt);
3033 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3034 if (c)
3035 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3036 else
3037 cond = boolean_true_node;
3039 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3040 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3042 gsi = gsi_last_bb (bb);
3043 t = gimple_omp_task_data_arg (entry_stmt);
3044 if (t == NULL)
3045 t2 = null_pointer_node;
3046 else
3047 t2 = build_fold_addr_expr (t);
3048 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3049 t = gimple_omp_task_copy_fn (entry_stmt);
3050 if (t == NULL)
3051 t3 = null_pointer_node;
3052 else
3053 t3 = build_fold_addr_expr (t);
3055 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3056 gimple_omp_task_arg_size (entry_stmt),
3057 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3059 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3060 false, GSI_CONTINUE_LINKING);
3064 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3065 catch handler and return it. This prevents programs from violating the
3066 structured block semantics with throws. */
3068 static gimple_seq
3069 maybe_catch_exception (gimple_seq body)
3071 gimple f, t;
3073 if (!flag_exceptions)
3074 return body;
3076 if (lang_protect_cleanup_actions)
3077 t = lang_protect_cleanup_actions ();
3078 else
3079 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3081 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3082 gimple_eh_filter_set_must_not_throw (f, true);
3084 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3085 GIMPLE_TRY_CATCH);
3087 return gimple_seq_alloc_with_stmt (t);
3090 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3092 static tree
3093 list2chain (tree list)
3095 tree t;
3097 for (t = list; t; t = TREE_CHAIN (t))
3099 tree var = TREE_VALUE (t);
3100 if (TREE_CHAIN (t))
3101 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3102 else
3103 TREE_CHAIN (var) = NULL_TREE;
3106 return list ? TREE_VALUE (list) : NULL_TREE;
3110 /* Remove barriers in REGION->EXIT's block. Note that this is only
3111 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3112 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3113 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3114 removed. */
3116 static void
3117 remove_exit_barrier (struct omp_region *region)
3119 gimple_stmt_iterator gsi;
3120 basic_block exit_bb;
3121 edge_iterator ei;
3122 edge e;
3123 gimple stmt;
3124 int any_addressable_vars = -1;
3126 exit_bb = region->exit;
3128 /* If the parallel region doesn't return, we don't have REGION->EXIT
3129 block at all. */
3130 if (! exit_bb)
3131 return;
3133 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3134 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3135 statements that can appear in between are extremely limited -- no
3136 memory operations at all. Here, we allow nothing at all, so the
3137 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3138 gsi = gsi_last_bb (exit_bb);
3139 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3140 gsi_prev (&gsi);
3141 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3142 return;
3144 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3146 gsi = gsi_last_bb (e->src);
3147 if (gsi_end_p (gsi))
3148 continue;
3149 stmt = gsi_stmt (gsi);
3150 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3151 && !gimple_omp_return_nowait_p (stmt))
3153 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3154 in many cases. If there could be tasks queued, the barrier
3155 might be needed to let the tasks run before some local
3156 variable of the parallel that the task uses as shared
3157 runs out of scope. The task can be spawned either
3158 from within current function (this would be easy to check)
3159 or from some function it calls and gets passed an address
3160 of such a variable. */
3161 if (any_addressable_vars < 0)
3163 gimple parallel_stmt = last_stmt (region->entry);
3164 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3165 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3166 tree block;
3168 any_addressable_vars = 0;
3169 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3170 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3172 any_addressable_vars = 1;
3173 break;
3175 for (block = gimple_block (stmt);
3176 !any_addressable_vars
3177 && block
3178 && TREE_CODE (block) == BLOCK;
3179 block = BLOCK_SUPERCONTEXT (block))
3181 for (local_decls = BLOCK_VARS (block);
3182 local_decls;
3183 local_decls = TREE_CHAIN (local_decls))
3184 if (TREE_ADDRESSABLE (local_decls))
3186 any_addressable_vars = 1;
3187 break;
3189 if (block == gimple_block (parallel_stmt))
3190 break;
3193 if (!any_addressable_vars)
3194 gimple_omp_return_set_nowait (stmt);
3199 static void
3200 remove_exit_barriers (struct omp_region *region)
3202 if (region->type == GIMPLE_OMP_PARALLEL)
3203 remove_exit_barrier (region);
3205 if (region->inner)
3207 region = region->inner;
3208 remove_exit_barriers (region);
3209 while (region->next)
3211 region = region->next;
3212 remove_exit_barriers (region);
3217 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3218 calls. These can't be declared as const functions, but
3219 within one parallel body they are constant, so they can be
3220 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3221 which are declared const. Similarly for task body, except
3222 that in untied task omp_get_thread_num () can change at any task
3223 scheduling point. */
3225 static void
3226 optimize_omp_library_calls (gimple entry_stmt)
3228 basic_block bb;
3229 gimple_stmt_iterator gsi;
3230 tree thr_num_id
3231 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3232 tree num_thr_id
3233 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3234 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3235 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3236 OMP_CLAUSE_UNTIED) != NULL);
3238 FOR_EACH_BB (bb)
3239 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3241 gimple call = gsi_stmt (gsi);
3242 tree decl;
3244 if (is_gimple_call (call)
3245 && (decl = gimple_call_fndecl (call))
3246 && DECL_EXTERNAL (decl)
3247 && TREE_PUBLIC (decl)
3248 && DECL_INITIAL (decl) == NULL)
3250 tree built_in;
3252 if (DECL_NAME (decl) == thr_num_id)
3254 /* In #pragma omp task untied omp_get_thread_num () can change
3255 during the execution of the task region. */
3256 if (untied_task)
3257 continue;
3258 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3260 else if (DECL_NAME (decl) == num_thr_id)
3261 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3262 else
3263 continue;
3265 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3266 || gimple_call_num_args (call) != 0)
3267 continue;
3269 if (flag_exceptions && !TREE_NOTHROW (decl))
3270 continue;
3272 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3273 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3274 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3275 continue;
3277 gimple_call_set_fndecl (call, built_in);
3282 /* Expand the OpenMP parallel or task directive starting at REGION. */
3284 static void
3285 expand_omp_taskreg (struct omp_region *region)
3287 basic_block entry_bb, exit_bb, new_bb;
3288 struct function *child_cfun;
3289 tree child_fn, block, t, ws_args, *tp;
3290 gimple_stmt_iterator gsi;
3291 gimple entry_stmt, stmt;
3292 edge e;
3294 entry_stmt = last_stmt (region->entry);
3295 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3296 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3297 /* If this function has been already instrumented, make sure
3298 the child function isn't instrumented again. */
3299 child_cfun->after_tree_profile = cfun->after_tree_profile;
3301 entry_bb = region->entry;
3302 exit_bb = region->exit;
3304 if (is_combined_parallel (region))
3305 ws_args = region->ws_args;
3306 else
3307 ws_args = NULL_TREE;
3309 if (child_cfun->cfg)
3311 /* Due to inlining, it may happen that we have already outlined
3312 the region, in which case all we need to do is make the
3313 sub-graph unreachable and emit the parallel call. */
3314 edge entry_succ_e, exit_succ_e;
3315 gimple_stmt_iterator gsi;
3317 entry_succ_e = single_succ_edge (entry_bb);
3319 gsi = gsi_last_bb (entry_bb);
3320 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3321 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3322 gsi_remove (&gsi, true);
3324 new_bb = entry_bb;
3325 if (exit_bb)
3327 exit_succ_e = single_succ_edge (exit_bb);
3328 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3330 remove_edge_and_dominated_blocks (entry_succ_e);
3332 else
3334 /* If the parallel region needs data sent from the parent
3335 function, then the very first statement (except possible
3336 tree profile counter updates) of the parallel body
3337 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3338 &.OMP_DATA_O is passed as an argument to the child function,
3339 we need to replace it with the argument as seen by the child
3340 function.
3342 In most cases, this will end up being the identity assignment
3343 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3344 a function call that has been inlined, the original PARM_DECL
3345 .OMP_DATA_I may have been converted into a different local
3346 variable. In which case, we need to keep the assignment. */
3347 if (gimple_omp_taskreg_data_arg (entry_stmt))
3349 basic_block entry_succ_bb = single_succ (entry_bb);
3350 gimple_stmt_iterator gsi;
3351 tree arg, narg;
3352 gimple parcopy_stmt = NULL;
3354 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3356 gimple stmt;
3358 gcc_assert (!gsi_end_p (gsi));
3359 stmt = gsi_stmt (gsi);
3360 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3361 continue;
3363 if (gimple_num_ops (stmt) == 2)
3365 tree arg = gimple_assign_rhs1 (stmt);
3367 /* We're ignore the subcode because we're
3368 effectively doing a STRIP_NOPS. */
3370 if (TREE_CODE (arg) == ADDR_EXPR
3371 && TREE_OPERAND (arg, 0)
3372 == gimple_omp_taskreg_data_arg (entry_stmt))
3374 parcopy_stmt = stmt;
3375 break;
3380 gcc_assert (parcopy_stmt != NULL);
3381 arg = DECL_ARGUMENTS (child_fn);
3383 if (!gimple_in_ssa_p (cfun))
3385 if (gimple_assign_lhs (parcopy_stmt) == arg)
3386 gsi_remove (&gsi, true);
3387 else
3389 /* ?? Is setting the subcode really necessary ?? */
3390 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3391 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3394 else
3396 /* If we are in ssa form, we must load the value from the default
3397 definition of the argument. That should not be defined now,
3398 since the argument is not used uninitialized. */
3399 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3400 narg = make_ssa_name (arg, gimple_build_nop ());
3401 set_default_def (arg, narg);
3402 /* ?? Is setting the subcode really necessary ?? */
3403 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3404 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3405 update_stmt (parcopy_stmt);
3409 /* Declare local variables needed in CHILD_CFUN. */
3410 block = DECL_INITIAL (child_fn);
3411 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3412 /* The gimplifier could record temporaries in parallel/task block
3413 rather than in containing function's local_decls chain,
3414 which would mean cgraph missed finalizing them. Do it now. */
3415 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3416 if (TREE_CODE (t) == VAR_DECL
3417 && TREE_STATIC (t)
3418 && !DECL_EXTERNAL (t))
3419 varpool_finalize_decl (t);
3420 DECL_SAVED_TREE (child_fn) = NULL;
3421 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3422 TREE_USED (block) = 1;
3424 /* Reset DECL_CONTEXT on function arguments. */
3425 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3426 DECL_CONTEXT (t) = child_fn;
3428 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3429 so that it can be moved to the child function. */
3430 gsi = gsi_last_bb (entry_bb);
3431 stmt = gsi_stmt (gsi);
3432 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3433 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3434 gsi_remove (&gsi, true);
3435 e = split_block (entry_bb, stmt);
3436 entry_bb = e->dest;
3437 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3439 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3440 if (exit_bb)
3442 gsi = gsi_last_bb (exit_bb);
3443 gcc_assert (!gsi_end_p (gsi)
3444 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3445 stmt = gimple_build_return (NULL);
3446 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3447 gsi_remove (&gsi, true);
3450 /* Move the parallel region into CHILD_CFUN. */
3452 if (gimple_in_ssa_p (cfun))
3454 push_cfun (child_cfun);
3455 init_tree_ssa (child_cfun);
3456 init_ssa_operands ();
3457 cfun->gimple_df->in_ssa_p = true;
3458 pop_cfun ();
3459 block = NULL_TREE;
3461 else
3462 block = gimple_block (entry_stmt);
3464 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3465 if (exit_bb)
3466 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3468 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3469 for (tp = &child_cfun->local_decls; *tp; )
3470 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3471 tp = &TREE_CHAIN (*tp);
3472 else
3473 *tp = TREE_CHAIN (*tp);
3475 /* Inform the callgraph about the new function. */
3476 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3477 = cfun->curr_properties;
3478 cgraph_add_new_function (child_fn, true);
3480 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3481 fixed in a following pass. */
3482 push_cfun (child_cfun);
3483 if (optimize)
3484 optimize_omp_library_calls (entry_stmt);
3485 rebuild_cgraph_edges ();
3487 /* Some EH regions might become dead, see PR34608. If
3488 pass_cleanup_cfg isn't the first pass to happen with the
3489 new child, these dead EH edges might cause problems.
3490 Clean them up now. */
3491 if (flag_exceptions)
3493 basic_block bb;
3494 tree save_current = current_function_decl;
3495 bool changed = false;
3497 current_function_decl = child_fn;
3498 FOR_EACH_BB (bb)
3499 changed |= gimple_purge_dead_eh_edges (bb);
3500 if (changed)
3501 cleanup_tree_cfg ();
3502 current_function_decl = save_current;
3504 pop_cfun ();
3507 /* Emit a library call to launch the children threads. */
3508 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3509 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3510 else
3511 expand_task_call (new_bb, entry_stmt);
3512 update_ssa (TODO_update_ssa_only_virtuals);
3516 /* A subroutine of expand_omp_for. Generate code for a parallel
3517 loop with any schedule. Given parameters:
3519 for (V = N1; V cond N2; V += STEP) BODY;
3521 where COND is "<" or ">", we generate pseudocode
3523 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3524 if (more) goto L0; else goto L3;
3526 V = istart0;
3527 iend = iend0;
3529 BODY;
3530 V += STEP;
3531 if (V cond iend) goto L1; else goto L2;
3533 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3536 If this is a combined omp parallel loop, instead of the call to
3537 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3539 For collapsed loops, given parameters:
3540 collapse(3)
3541 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3542 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3543 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3544 BODY;
3546 we generate pseudocode
3548 if (cond3 is <)
3549 adj = STEP3 - 1;
3550 else
3551 adj = STEP3 + 1;
3552 count3 = (adj + N32 - N31) / STEP3;
3553 if (cond2 is <)
3554 adj = STEP2 - 1;
3555 else
3556 adj = STEP2 + 1;
3557 count2 = (adj + N22 - N21) / STEP2;
3558 if (cond1 is <)
3559 adj = STEP1 - 1;
3560 else
3561 adj = STEP1 + 1;
3562 count1 = (adj + N12 - N11) / STEP1;
3563 count = count1 * count2 * count3;
3564 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3565 if (more) goto L0; else goto L3;
3567 V = istart0;
3568 T = V;
3569 V3 = N31 + (T % count3) * STEP3;
3570 T = T / count3;
3571 V2 = N21 + (T % count2) * STEP2;
3572 T = T / count2;
3573 V1 = N11 + T * STEP1;
3574 iend = iend0;
3576 BODY;
3577 V += 1;
3578 if (V < iend) goto L10; else goto L2;
3579 L10:
3580 V3 += STEP3;
3581 if (V3 cond3 N32) goto L1; else goto L11;
3582 L11:
3583 V3 = N31;
3584 V2 += STEP2;
3585 if (V2 cond2 N22) goto L1; else goto L12;
3586 L12:
3587 V2 = N21;
3588 V1 += STEP1;
3589 goto L1;
3591 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3596 static void
3597 expand_omp_for_generic (struct omp_region *region,
3598 struct omp_for_data *fd,
3599 enum built_in_function start_fn,
3600 enum built_in_function next_fn)
3602 tree type, istart0, iend0, iend;
3603 tree t, vmain, vback, bias = NULL_TREE;
3604 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3605 basic_block l2_bb = NULL, l3_bb = NULL;
3606 gimple_stmt_iterator gsi;
3607 gimple stmt;
3608 bool in_combined_parallel = is_combined_parallel (region);
3609 bool broken_loop = region->cont == NULL;
3610 edge e, ne;
3611 tree *counts = NULL;
3612 int i;
3614 gcc_assert (!broken_loop || !in_combined_parallel);
3615 gcc_assert (fd->iter_type == long_integer_type_node
3616 || !in_combined_parallel);
3618 type = TREE_TYPE (fd->loop.v);
3619 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3620 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3621 TREE_ADDRESSABLE (istart0) = 1;
3622 TREE_ADDRESSABLE (iend0) = 1;
3623 if (gimple_in_ssa_p (cfun))
3625 add_referenced_var (istart0);
3626 add_referenced_var (iend0);
3629 /* See if we need to bias by LLONG_MIN. */
3630 if (fd->iter_type == long_long_unsigned_type_node
3631 && TREE_CODE (type) == INTEGER_TYPE
3632 && !TYPE_UNSIGNED (type))
3634 tree n1, n2;
3636 if (fd->loop.cond_code == LT_EXPR)
3638 n1 = fd->loop.n1;
3639 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3641 else
3643 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3644 n2 = fd->loop.n1;
3646 if (TREE_CODE (n1) != INTEGER_CST
3647 || TREE_CODE (n2) != INTEGER_CST
3648 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3649 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3652 entry_bb = region->entry;
3653 cont_bb = region->cont;
3654 collapse_bb = NULL;
3655 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3656 gcc_assert (broken_loop
3657 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3658 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3659 l1_bb = single_succ (l0_bb);
3660 if (!broken_loop)
3662 l2_bb = create_empty_bb (cont_bb);
3663 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3664 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3666 else
3667 l2_bb = NULL;
3668 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3669 exit_bb = region->exit;
3671 gsi = gsi_last_bb (entry_bb);
3673 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3674 if (fd->collapse > 1)
3676 /* collapsed loops need work for expansion in SSA form. */
3677 gcc_assert (!gimple_in_ssa_p (cfun));
3678 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3679 for (i = 0; i < fd->collapse; i++)
3681 tree itype = TREE_TYPE (fd->loops[i].v);
3683 if (POINTER_TYPE_P (itype))
3684 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3685 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3686 ? -1 : 1));
3687 t = fold_build2 (PLUS_EXPR, itype,
3688 fold_convert (itype, fd->loops[i].step), t);
3689 t = fold_build2 (PLUS_EXPR, itype, t,
3690 fold_convert (itype, fd->loops[i].n2));
3691 t = fold_build2 (MINUS_EXPR, itype, t,
3692 fold_convert (itype, fd->loops[i].n1));
3693 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3694 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3695 fold_build1 (NEGATE_EXPR, itype, t),
3696 fold_build1 (NEGATE_EXPR, itype,
3697 fold_convert (itype,
3698 fd->loops[i].step)));
3699 else
3700 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3701 fold_convert (itype, fd->loops[i].step));
3702 t = fold_convert (type, t);
3703 if (TREE_CODE (t) == INTEGER_CST)
3704 counts[i] = t;
3705 else
3707 counts[i] = create_tmp_var (type, ".count");
3708 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3709 true, GSI_SAME_STMT);
3710 stmt = gimple_build_assign (counts[i], t);
3711 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3713 if (SSA_VAR_P (fd->loop.n2))
3715 if (i == 0)
3716 t = counts[0];
3717 else
3719 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3720 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3721 true, GSI_SAME_STMT);
3723 stmt = gimple_build_assign (fd->loop.n2, t);
3724 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3728 if (in_combined_parallel)
3730 /* In a combined parallel loop, emit a call to
3731 GOMP_loop_foo_next. */
3732 t = build_call_expr (built_in_decls[next_fn], 2,
3733 build_fold_addr_expr (istart0),
3734 build_fold_addr_expr (iend0));
3736 else
3738 tree t0, t1, t2, t3, t4;
3739 /* If this is not a combined parallel loop, emit a call to
3740 GOMP_loop_foo_start in ENTRY_BB. */
3741 t4 = build_fold_addr_expr (iend0);
3742 t3 = build_fold_addr_expr (istart0);
3743 t2 = fold_convert (fd->iter_type, fd->loop.step);
3744 if (POINTER_TYPE_P (type)
3745 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3747 /* Avoid casting pointers to integer of a different size. */
3748 tree itype
3749 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3750 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3751 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3753 else
3755 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3756 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3758 if (bias)
3760 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3761 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3763 if (fd->iter_type == long_integer_type_node)
3765 if (fd->chunk_size)
3767 t = fold_convert (fd->iter_type, fd->chunk_size);
3768 t = build_call_expr (built_in_decls[start_fn], 6,
3769 t0, t1, t2, t, t3, t4);
3771 else
3772 t = build_call_expr (built_in_decls[start_fn], 5,
3773 t0, t1, t2, t3, t4);
3775 else
3777 tree t5;
3778 tree c_bool_type;
3780 /* The GOMP_loop_ull_*start functions have additional boolean
3781 argument, true for < loops and false for > loops.
3782 In Fortran, the C bool type can be different from
3783 boolean_type_node. */
3784 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3785 t5 = build_int_cst (c_bool_type,
3786 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3787 if (fd->chunk_size)
3789 t = fold_convert (fd->iter_type, fd->chunk_size);
3790 t = build_call_expr (built_in_decls[start_fn], 7,
3791 t5, t0, t1, t2, t, t3, t4);
3793 else
3794 t = build_call_expr (built_in_decls[start_fn], 6,
3795 t5, t0, t1, t2, t3, t4);
3798 if (TREE_TYPE (t) != boolean_type_node)
3799 t = fold_build2 (NE_EXPR, boolean_type_node,
3800 t, build_int_cst (TREE_TYPE (t), 0));
3801 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3802 true, GSI_SAME_STMT);
3803 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3805 /* Remove the GIMPLE_OMP_FOR statement. */
3806 gsi_remove (&gsi, true);
3808 /* Iteration setup for sequential loop goes in L0_BB. */
3809 gsi = gsi_start_bb (l0_bb);
3810 if (bias)
3811 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3812 istart0, bias));
3813 else
3814 t = fold_convert (type, istart0);
3815 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3816 false, GSI_CONTINUE_LINKING);
3817 stmt = gimple_build_assign (fd->loop.v, t);
3818 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3820 if (bias)
3821 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3822 iend0, bias));
3823 else
3824 t = fold_convert (type, iend0);
3825 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3826 false, GSI_CONTINUE_LINKING);
3827 if (fd->collapse > 1)
3829 tree tem = create_tmp_var (type, ".tem");
3831 stmt = gimple_build_assign (tem, fd->loop.v);
3832 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3833 for (i = fd->collapse - 1; i >= 0; i--)
3835 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3836 itype = vtype;
3837 if (POINTER_TYPE_P (vtype))
3838 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3839 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3840 t = fold_convert (itype, t);
3841 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3842 if (POINTER_TYPE_P (vtype))
3843 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3844 fd->loops[i].n1, fold_convert (sizetype, t));
3845 else
3846 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3847 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3848 false, GSI_CONTINUE_LINKING);
3849 stmt = gimple_build_assign (fd->loops[i].v, t);
3850 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3851 if (i != 0)
3853 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3854 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3855 false, GSI_CONTINUE_LINKING);
3856 stmt = gimple_build_assign (tem, t);
3857 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3862 if (!broken_loop)
3864 /* Code to control the increment and predicate for the sequential
3865 loop goes in the CONT_BB. */
3866 gsi = gsi_last_bb (cont_bb);
3867 stmt = gsi_stmt (gsi);
3868 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3869 vmain = gimple_omp_continue_control_use (stmt);
3870 vback = gimple_omp_continue_control_def (stmt);
3872 if (POINTER_TYPE_P (type))
3873 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3874 fold_convert (sizetype, fd->loop.step));
3875 else
3876 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3877 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3878 true, GSI_SAME_STMT);
3879 stmt = gimple_build_assign (vback, t);
3880 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3882 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3883 stmt = gimple_build_cond_empty (t);
3884 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3886 /* Remove GIMPLE_OMP_CONTINUE. */
3887 gsi_remove (&gsi, true);
3889 if (fd->collapse > 1)
3891 basic_block last_bb, bb;
3893 last_bb = cont_bb;
3894 for (i = fd->collapse - 1; i >= 0; i--)
3896 tree vtype = TREE_TYPE (fd->loops[i].v);
3898 bb = create_empty_bb (last_bb);
3899 gsi = gsi_start_bb (bb);
3901 if (i < fd->collapse - 1)
3903 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3904 e->probability = REG_BR_PROB_BASE / 8;
3906 t = fd->loops[i + 1].n1;
3907 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3908 false, GSI_CONTINUE_LINKING);
3909 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3910 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3912 else
3913 collapse_bb = bb;
3915 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3917 if (POINTER_TYPE_P (vtype))
3918 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3919 fd->loops[i].v,
3920 fold_convert (sizetype, fd->loops[i].step));
3921 else
3922 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3923 fd->loops[i].step);
3924 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3925 false, GSI_CONTINUE_LINKING);
3926 stmt = gimple_build_assign (fd->loops[i].v, t);
3927 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3929 if (i > 0)
3931 t = fd->loops[i].n2;
3932 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3933 false, GSI_CONTINUE_LINKING);
3934 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3935 fd->loops[i].v, t);
3936 stmt = gimple_build_cond_empty (t);
3937 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3938 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3939 e->probability = REG_BR_PROB_BASE * 7 / 8;
3941 else
3942 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3943 last_bb = bb;
3947 /* Emit code to get the next parallel iteration in L2_BB. */
3948 gsi = gsi_start_bb (l2_bb);
3950 t = build_call_expr (built_in_decls[next_fn], 2,
3951 build_fold_addr_expr (istart0),
3952 build_fold_addr_expr (iend0));
3953 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3954 false, GSI_CONTINUE_LINKING);
3955 if (TREE_TYPE (t) != boolean_type_node)
3956 t = fold_build2 (NE_EXPR, boolean_type_node,
3957 t, build_int_cst (TREE_TYPE (t), 0));
3958 stmt = gimple_build_cond_empty (t);
3959 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3962 /* Add the loop cleanup function. */
3963 gsi = gsi_last_bb (exit_bb);
3964 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3965 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3966 else
3967 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3968 stmt = gimple_build_call (t, 0);
3969 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3970 gsi_remove (&gsi, true);
3972 /* Connect the new blocks. */
3973 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3974 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3976 if (!broken_loop)
3978 gimple_seq phis;
3980 e = find_edge (cont_bb, l3_bb);
3981 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3983 phis = phi_nodes (l3_bb);
3984 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3986 gimple phi = gsi_stmt (gsi);
3987 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3988 PHI_ARG_DEF_FROM_EDGE (phi, e));
3990 remove_edge (e);
3992 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3993 if (fd->collapse > 1)
3995 e = find_edge (cont_bb, l1_bb);
3996 remove_edge (e);
3997 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3999 else
4001 e = find_edge (cont_bb, l1_bb);
4002 e->flags = EDGE_TRUE_VALUE;
4004 e->probability = REG_BR_PROB_BASE * 7 / 8;
4005 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4006 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4008 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4009 recompute_dominator (CDI_DOMINATORS, l2_bb));
4010 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4011 recompute_dominator (CDI_DOMINATORS, l3_bb));
4012 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4013 recompute_dominator (CDI_DOMINATORS, l0_bb));
4014 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4015 recompute_dominator (CDI_DOMINATORS, l1_bb));
4020 /* A subroutine of expand_omp_for. Generate code for a parallel
4021 loop with static schedule and no specified chunk size. Given
4022 parameters:
4024 for (V = N1; V cond N2; V += STEP) BODY;
4026 where COND is "<" or ">", we generate pseudocode
4028 if (cond is <)
4029 adj = STEP - 1;
4030 else
4031 adj = STEP + 1;
4032 if ((__typeof (V)) -1 > 0 && cond is >)
4033 n = -(adj + N2 - N1) / -STEP;
4034 else
4035 n = (adj + N2 - N1) / STEP;
4036 q = n / nthreads;
4037 q += (q * nthreads != n);
4038 s0 = q * threadid;
4039 e0 = min(s0 + q, n);
4040 V = s0 * STEP + N1;
4041 if (s0 >= e0) goto L2; else goto L0;
4043 e = e0 * STEP + N1;
4045 BODY;
4046 V += STEP;
4047 if (V cond e) goto L1;
4051 static void
4052 expand_omp_for_static_nochunk (struct omp_region *region,
4053 struct omp_for_data *fd)
4055 tree n, q, s0, e0, e, t, nthreads, threadid;
4056 tree type, itype, vmain, vback;
4057 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4058 basic_block fin_bb;
4059 gimple_stmt_iterator gsi;
4060 gimple stmt;
4062 itype = type = TREE_TYPE (fd->loop.v);
4063 if (POINTER_TYPE_P (type))
4064 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4066 entry_bb = region->entry;
4067 cont_bb = region->cont;
4068 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4069 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4070 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4071 body_bb = single_succ (seq_start_bb);
4072 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4073 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4074 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4075 exit_bb = region->exit;
4077 /* Iteration space partitioning goes in ENTRY_BB. */
4078 gsi = gsi_last_bb (entry_bb);
4079 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4081 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4082 t = fold_convert (itype, t);
4083 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4084 true, GSI_SAME_STMT);
4086 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4087 t = fold_convert (itype, t);
4088 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4089 true, GSI_SAME_STMT);
4091 fd->loop.n1
4092 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4093 true, NULL_TREE, true, GSI_SAME_STMT);
4094 fd->loop.n2
4095 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4096 true, NULL_TREE, true, GSI_SAME_STMT);
4097 fd->loop.step
4098 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4099 true, NULL_TREE, true, GSI_SAME_STMT);
4101 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4102 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4103 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4104 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4105 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4106 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4107 fold_build1 (NEGATE_EXPR, itype, t),
4108 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4109 else
4110 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4111 t = fold_convert (itype, t);
4112 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4114 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4115 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4117 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4118 t = fold_build2 (NE_EXPR, itype, t, n);
4119 t = fold_build2 (PLUS_EXPR, itype, q, t);
4120 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4122 t = build2 (MULT_EXPR, itype, q, threadid);
4123 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4125 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4126 t = fold_build2 (MIN_EXPR, itype, t, n);
4127 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4129 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4130 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4132 /* Remove the GIMPLE_OMP_FOR statement. */
4133 gsi_remove (&gsi, true);
4135 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4136 gsi = gsi_start_bb (seq_start_bb);
4138 t = fold_convert (itype, s0);
4139 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4140 if (POINTER_TYPE_P (type))
4141 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4142 fold_convert (sizetype, t));
4143 else
4144 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4145 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4146 false, GSI_CONTINUE_LINKING);
4147 stmt = gimple_build_assign (fd->loop.v, t);
4148 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4150 t = fold_convert (itype, e0);
4151 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4152 if (POINTER_TYPE_P (type))
4153 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4154 fold_convert (sizetype, t));
4155 else
4156 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4157 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4158 false, GSI_CONTINUE_LINKING);
4160 /* The code controlling the sequential loop replaces the
4161 GIMPLE_OMP_CONTINUE. */
4162 gsi = gsi_last_bb (cont_bb);
4163 stmt = gsi_stmt (gsi);
4164 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4165 vmain = gimple_omp_continue_control_use (stmt);
4166 vback = gimple_omp_continue_control_def (stmt);
4168 if (POINTER_TYPE_P (type))
4169 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4170 fold_convert (sizetype, fd->loop.step));
4171 else
4172 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4173 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4174 true, GSI_SAME_STMT);
4175 stmt = gimple_build_assign (vback, t);
4176 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4178 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4179 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4181 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4182 gsi_remove (&gsi, true);
4184 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4185 gsi = gsi_last_bb (exit_bb);
4186 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4187 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4188 false, GSI_SAME_STMT);
4189 gsi_remove (&gsi, true);
4191 /* Connect all the blocks. */
4192 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4193 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4195 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4196 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4198 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4199 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4200 recompute_dominator (CDI_DOMINATORS, body_bb));
4201 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4202 recompute_dominator (CDI_DOMINATORS, fin_bb));
4206 /* A subroutine of expand_omp_for. Generate code for a parallel
4207 loop with static schedule and a specified chunk size. Given
4208 parameters:
4210 for (V = N1; V cond N2; V += STEP) BODY;
4212 where COND is "<" or ">", we generate pseudocode
4214 if (cond is <)
4215 adj = STEP - 1;
4216 else
4217 adj = STEP + 1;
4218 if ((__typeof (V)) -1 > 0 && cond is >)
4219 n = -(adj + N2 - N1) / -STEP;
4220 else
4221 n = (adj + N2 - N1) / STEP;
4222 trip = 0;
4223 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4224 here so that V is defined
4225 if the loop is not entered
4227 s0 = (trip * nthreads + threadid) * CHUNK;
4228 e0 = min(s0 + CHUNK, n);
4229 if (s0 < n) goto L1; else goto L4;
4231 V = s0 * STEP + N1;
4232 e = e0 * STEP + N1;
4234 BODY;
4235 V += STEP;
4236 if (V cond e) goto L2; else goto L3;
4238 trip += 1;
4239 goto L0;
4243 static void
4244 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4246 tree n, s0, e0, e, t;
4247 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4248 tree type, itype, v_main, v_back, v_extra;
4249 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4250 basic_block trip_update_bb, cont_bb, fin_bb;
4251 gimple_stmt_iterator si;
4252 gimple stmt;
4253 edge se;
4255 itype = type = TREE_TYPE (fd->loop.v);
4256 if (POINTER_TYPE_P (type))
4257 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4259 entry_bb = region->entry;
4260 se = split_block (entry_bb, last_stmt (entry_bb));
4261 entry_bb = se->src;
4262 iter_part_bb = se->dest;
4263 cont_bb = region->cont;
4264 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4265 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4266 == FALLTHRU_EDGE (cont_bb)->dest);
4267 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4268 body_bb = single_succ (seq_start_bb);
4269 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4270 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4271 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4272 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4273 exit_bb = region->exit;
4275 /* Trip and adjustment setup goes in ENTRY_BB. */
4276 si = gsi_last_bb (entry_bb);
4277 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4279 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4280 t = fold_convert (itype, t);
4281 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4282 true, GSI_SAME_STMT);
4284 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4285 t = fold_convert (itype, t);
4286 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4287 true, GSI_SAME_STMT);
4289 fd->loop.n1
4290 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4291 true, NULL_TREE, true, GSI_SAME_STMT);
4292 fd->loop.n2
4293 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4294 true, NULL_TREE, true, GSI_SAME_STMT);
4295 fd->loop.step
4296 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4297 true, NULL_TREE, true, GSI_SAME_STMT);
4298 fd->chunk_size
4299 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4300 true, NULL_TREE, true, GSI_SAME_STMT);
4302 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4303 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4304 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4305 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4306 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4307 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4308 fold_build1 (NEGATE_EXPR, itype, t),
4309 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4310 else
4311 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4312 t = fold_convert (itype, t);
4313 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4314 true, GSI_SAME_STMT);
4316 trip_var = create_tmp_var (itype, ".trip");
4317 if (gimple_in_ssa_p (cfun))
4319 add_referenced_var (trip_var);
4320 trip_init = make_ssa_name (trip_var, NULL);
4321 trip_main = make_ssa_name (trip_var, NULL);
4322 trip_back = make_ssa_name (trip_var, NULL);
4324 else
4326 trip_init = trip_var;
4327 trip_main = trip_var;
4328 trip_back = trip_var;
4331 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4332 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4334 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4335 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4336 if (POINTER_TYPE_P (type))
4337 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4338 fold_convert (sizetype, t));
4339 else
4340 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4341 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4342 true, GSI_SAME_STMT);
4344 /* Remove the GIMPLE_OMP_FOR. */
4345 gsi_remove (&si, true);
4347 /* Iteration space partitioning goes in ITER_PART_BB. */
4348 si = gsi_last_bb (iter_part_bb);
4350 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4351 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4352 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4353 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4354 false, GSI_CONTINUE_LINKING);
4356 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4357 t = fold_build2 (MIN_EXPR, itype, t, n);
4358 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4359 false, GSI_CONTINUE_LINKING);
4361 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4362 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4364 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4365 si = gsi_start_bb (seq_start_bb);
4367 t = fold_convert (itype, s0);
4368 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4369 if (POINTER_TYPE_P (type))
4370 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4371 fold_convert (sizetype, t));
4372 else
4373 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4374 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4375 false, GSI_CONTINUE_LINKING);
4376 stmt = gimple_build_assign (fd->loop.v, t);
4377 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4379 t = fold_convert (itype, e0);
4380 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4381 if (POINTER_TYPE_P (type))
4382 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4383 fold_convert (sizetype, t));
4384 else
4385 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4386 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4387 false, GSI_CONTINUE_LINKING);
4389 /* The code controlling the sequential loop goes in CONT_BB,
4390 replacing the GIMPLE_OMP_CONTINUE. */
4391 si = gsi_last_bb (cont_bb);
4392 stmt = gsi_stmt (si);
4393 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4394 v_main = gimple_omp_continue_control_use (stmt);
4395 v_back = gimple_omp_continue_control_def (stmt);
4397 if (POINTER_TYPE_P (type))
4398 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4399 fold_convert (sizetype, fd->loop.step));
4400 else
4401 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4402 stmt = gimple_build_assign (v_back, t);
4403 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4405 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4406 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4408 /* Remove GIMPLE_OMP_CONTINUE. */
4409 gsi_remove (&si, true);
4411 /* Trip update code goes into TRIP_UPDATE_BB. */
4412 si = gsi_start_bb (trip_update_bb);
4414 t = build_int_cst (itype, 1);
4415 t = build2 (PLUS_EXPR, itype, trip_main, t);
4416 stmt = gimple_build_assign (trip_back, t);
4417 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4419 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4420 si = gsi_last_bb (exit_bb);
4421 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4422 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4423 false, GSI_SAME_STMT);
4424 gsi_remove (&si, true);
4426 /* Connect the new blocks. */
4427 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4428 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4430 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4431 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4433 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4435 if (gimple_in_ssa_p (cfun))
4437 gimple_stmt_iterator psi;
4438 gimple phi;
4439 edge re, ene;
4440 edge_var_map_vector head;
4441 edge_var_map *vm;
4442 size_t i;
4444 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4445 remove arguments of the phi nodes in fin_bb. We need to create
4446 appropriate phi nodes in iter_part_bb instead. */
4447 se = single_pred_edge (fin_bb);
4448 re = single_succ_edge (trip_update_bb);
4449 head = redirect_edge_var_map_vector (re);
4450 ene = single_succ_edge (entry_bb);
4452 psi = gsi_start_phis (fin_bb);
4453 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4454 gsi_next (&psi), ++i)
4456 gimple nphi;
4458 phi = gsi_stmt (psi);
4459 t = gimple_phi_result (phi);
4460 gcc_assert (t == redirect_edge_var_map_result (vm));
4461 nphi = create_phi_node (t, iter_part_bb);
4462 SSA_NAME_DEF_STMT (t) = nphi;
4464 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4465 /* A special case -- fd->loop.v is not yet computed in
4466 iter_part_bb, we need to use v_extra instead. */
4467 if (t == fd->loop.v)
4468 t = v_extra;
4469 add_phi_arg (nphi, t, ene);
4470 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4472 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4473 redirect_edge_var_map_clear (re);
4474 while (1)
4476 psi = gsi_start_phis (fin_bb);
4477 if (gsi_end_p (psi))
4478 break;
4479 remove_phi_node (&psi, false);
4482 /* Make phi node for trip. */
4483 phi = create_phi_node (trip_main, iter_part_bb);
4484 SSA_NAME_DEF_STMT (trip_main) = phi;
4485 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4486 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4489 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4490 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4491 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4492 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4493 recompute_dominator (CDI_DOMINATORS, fin_bb));
4494 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4495 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4496 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4497 recompute_dominator (CDI_DOMINATORS, body_bb));
4501 /* Expand the OpenMP loop defined by REGION. */
4503 static void
4504 expand_omp_for (struct omp_region *region)
4506 struct omp_for_data fd;
4507 struct omp_for_data_loop *loops;
4509 loops
4510 = (struct omp_for_data_loop *)
4511 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4512 * sizeof (struct omp_for_data_loop));
4513 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4514 region->sched_kind = fd.sched_kind;
4516 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4517 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4518 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4519 if (region->cont)
4521 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4522 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4523 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4526 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4527 && !fd.have_ordered
4528 && fd.collapse == 1
4529 && region->cont != NULL)
4531 if (fd.chunk_size == NULL)
4532 expand_omp_for_static_nochunk (region, &fd);
4533 else
4534 expand_omp_for_static_chunk (region, &fd);
4536 else
4538 int fn_index, start_ix, next_ix;
4540 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4541 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4542 ? 3 : fd.sched_kind;
4543 fn_index += fd.have_ordered * 4;
4544 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4545 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4546 if (fd.iter_type == long_long_unsigned_type_node)
4548 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4549 - BUILT_IN_GOMP_LOOP_STATIC_START;
4550 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4551 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4553 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4556 update_ssa (TODO_update_ssa_only_virtuals);
4560 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4562 v = GOMP_sections_start (n);
4564 switch (v)
4566 case 0:
4567 goto L2;
4568 case 1:
4569 section 1;
4570 goto L1;
4571 case 2:
4573 case n:
4575 default:
4576 abort ();
4579 v = GOMP_sections_next ();
4580 goto L0;
4582 reduction;
4584 If this is a combined parallel sections, replace the call to
4585 GOMP_sections_start with call to GOMP_sections_next. */
4587 static void
4588 expand_omp_sections (struct omp_region *region)
4590 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4591 VEC (tree,heap) *label_vec;
4592 unsigned len;
4593 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4594 gimple_stmt_iterator si, switch_si;
4595 gimple sections_stmt, stmt, cont;
4596 edge_iterator ei;
4597 edge e;
4598 struct omp_region *inner;
4599 unsigned i, casei;
4600 bool exit_reachable = region->cont != NULL;
4602 gcc_assert (exit_reachable == (region->exit != NULL));
4603 entry_bb = region->entry;
4604 l0_bb = single_succ (entry_bb);
4605 l1_bb = region->cont;
4606 l2_bb = region->exit;
4607 if (exit_reachable)
4609 if (single_pred (l2_bb) == l0_bb)
4610 l2 = gimple_block_label (l2_bb);
4611 else
4613 /* This can happen if there are reductions. */
4614 len = EDGE_COUNT (l0_bb->succs);
4615 gcc_assert (len > 0);
4616 e = EDGE_SUCC (l0_bb, len - 1);
4617 si = gsi_last_bb (e->dest);
4618 l2 = NULL_TREE;
4619 if (gsi_end_p (si)
4620 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4621 l2 = gimple_block_label (e->dest);
4622 else
4623 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4625 si = gsi_last_bb (e->dest);
4626 if (gsi_end_p (si)
4627 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4629 l2 = gimple_block_label (e->dest);
4630 break;
4634 default_bb = create_empty_bb (l1_bb->prev_bb);
4635 l1 = gimple_block_label (l1_bb);
4637 else
4639 default_bb = create_empty_bb (l0_bb);
4640 l1 = NULL_TREE;
4641 l2 = gimple_block_label (default_bb);
4644 /* We will build a switch() with enough cases for all the
4645 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4646 and a default case to abort if something goes wrong. */
4647 len = EDGE_COUNT (l0_bb->succs);
4649 /* Use VEC_quick_push on label_vec throughout, since we know the size
4650 in advance. */
4651 label_vec = VEC_alloc (tree, heap, len);
4653 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4654 GIMPLE_OMP_SECTIONS statement. */
4655 si = gsi_last_bb (entry_bb);
4656 sections_stmt = gsi_stmt (si);
4657 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4658 vin = gimple_omp_sections_control (sections_stmt);
4659 if (!is_combined_parallel (region))
4661 /* If we are not inside a combined parallel+sections region,
4662 call GOMP_sections_start. */
4663 t = build_int_cst (unsigned_type_node,
4664 exit_reachable ? len - 1 : len);
4665 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4666 stmt = gimple_build_call (u, 1, t);
4668 else
4670 /* Otherwise, call GOMP_sections_next. */
4671 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4672 stmt = gimple_build_call (u, 0);
4674 gimple_call_set_lhs (stmt, vin);
4675 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4676 gsi_remove (&si, true);
4678 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4679 L0_BB. */
4680 switch_si = gsi_last_bb (l0_bb);
4681 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4682 if (exit_reachable)
4684 cont = last_stmt (l1_bb);
4685 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4686 vmain = gimple_omp_continue_control_use (cont);
4687 vnext = gimple_omp_continue_control_def (cont);
4689 else
4691 vmain = vin;
4692 vnext = NULL_TREE;
4695 i = 0;
4696 if (exit_reachable)
4698 t = build3 (CASE_LABEL_EXPR, void_type_node,
4699 build_int_cst (unsigned_type_node, 0), NULL, l2);
4700 VEC_quick_push (tree, label_vec, t);
4701 i++;
4704 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4705 for (inner = region->inner, casei = 1;
4706 inner;
4707 inner = inner->next, i++, casei++)
4709 basic_block s_entry_bb, s_exit_bb;
4711 /* Skip optional reduction region. */
4712 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4714 --i;
4715 --casei;
4716 continue;
4719 s_entry_bb = inner->entry;
4720 s_exit_bb = inner->exit;
4722 t = gimple_block_label (s_entry_bb);
4723 u = build_int_cst (unsigned_type_node, casei);
4724 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4725 VEC_quick_push (tree, label_vec, u);
4727 si = gsi_last_bb (s_entry_bb);
4728 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4729 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4730 gsi_remove (&si, true);
4731 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4733 if (s_exit_bb == NULL)
4734 continue;
4736 si = gsi_last_bb (s_exit_bb);
4737 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4738 gsi_remove (&si, true);
4740 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4743 /* Error handling code goes in DEFAULT_BB. */
4744 t = gimple_block_label (default_bb);
4745 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4746 make_edge (l0_bb, default_bb, 0);
4748 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4749 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4750 gsi_remove (&switch_si, true);
4751 VEC_free (tree, heap, label_vec);
4753 si = gsi_start_bb (default_bb);
4754 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4755 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4757 if (exit_reachable)
4759 /* Code to get the next section goes in L1_BB. */
4760 si = gsi_last_bb (l1_bb);
4761 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4763 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4764 gimple_call_set_lhs (stmt, vnext);
4765 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4766 gsi_remove (&si, true);
4768 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4770 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4771 si = gsi_last_bb (l2_bb);
4772 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4773 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4774 else
4775 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4776 stmt = gimple_build_call (t, 0);
4777 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4778 gsi_remove (&si, true);
4781 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4785 /* Expand code for an OpenMP single directive. We've already expanded
4786 much of the code, here we simply place the GOMP_barrier call. */
4788 static void
4789 expand_omp_single (struct omp_region *region)
4791 basic_block entry_bb, exit_bb;
4792 gimple_stmt_iterator si;
4793 bool need_barrier = false;
4795 entry_bb = region->entry;
4796 exit_bb = region->exit;
4798 si = gsi_last_bb (entry_bb);
4799 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4800 be removed. We need to ensure that the thread that entered the single
4801 does not exit before the data is copied out by the other threads. */
4802 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4803 OMP_CLAUSE_COPYPRIVATE))
4804 need_barrier = true;
4805 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4806 gsi_remove (&si, true);
4807 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4809 si = gsi_last_bb (exit_bb);
4810 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4811 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4812 false, GSI_SAME_STMT);
4813 gsi_remove (&si, true);
4814 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4818 /* Generic expansion for OpenMP synchronization directives: master,
4819 ordered and critical. All we need to do here is remove the entry
4820 and exit markers for REGION. */
4822 static void
4823 expand_omp_synch (struct omp_region *region)
4825 basic_block entry_bb, exit_bb;
4826 gimple_stmt_iterator si;
4828 entry_bb = region->entry;
4829 exit_bb = region->exit;
4831 si = gsi_last_bb (entry_bb);
4832 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4833 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4834 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4835 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4836 gsi_remove (&si, true);
4837 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4839 if (exit_bb)
4841 si = gsi_last_bb (exit_bb);
4842 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4843 gsi_remove (&si, true);
4844 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4848 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4849 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4850 size of the data type, and thus usable to find the index of the builtin
4851 decl. Returns false if the expression is not of the proper form. */
4853 static bool
4854 expand_omp_atomic_fetch_op (basic_block load_bb,
4855 tree addr, tree loaded_val,
4856 tree stored_val, int index)
4858 enum built_in_function base;
4859 tree decl, itype, call;
4860 enum insn_code *optab;
4861 tree rhs;
4862 basic_block store_bb = single_succ (load_bb);
4863 gimple_stmt_iterator gsi;
4864 gimple stmt;
4866 /* We expect to find the following sequences:
4868 load_bb:
4869 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4871 store_bb:
4872 val = tmp OP something; (or: something OP tmp)
4873 GIMPLE_OMP_STORE (val)
4875 ???FIXME: Allow a more flexible sequence.
4876 Perhaps use data flow to pick the statements.
4880 gsi = gsi_after_labels (store_bb);
4881 stmt = gsi_stmt (gsi);
4882 if (!is_gimple_assign (stmt))
4883 return false;
4884 gsi_next (&gsi);
4885 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4886 return false;
4888 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4889 return false;
4891 /* Check for one of the supported fetch-op operations. */
4892 switch (gimple_assign_rhs_code (stmt))
4894 case PLUS_EXPR:
4895 case POINTER_PLUS_EXPR:
4896 base = BUILT_IN_FETCH_AND_ADD_N;
4897 optab = sync_add_optab;
4898 break;
4899 case MINUS_EXPR:
4900 base = BUILT_IN_FETCH_AND_SUB_N;
4901 optab = sync_add_optab;
4902 break;
4903 case BIT_AND_EXPR:
4904 base = BUILT_IN_FETCH_AND_AND_N;
4905 optab = sync_and_optab;
4906 break;
4907 case BIT_IOR_EXPR:
4908 base = BUILT_IN_FETCH_AND_OR_N;
4909 optab = sync_ior_optab;
4910 break;
4911 case BIT_XOR_EXPR:
4912 base = BUILT_IN_FETCH_AND_XOR_N;
4913 optab = sync_xor_optab;
4914 break;
4915 default:
4916 return false;
4918 /* Make sure the expression is of the proper form. */
4919 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4920 rhs = gimple_assign_rhs2 (stmt);
4921 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4922 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4923 rhs = gimple_assign_rhs1 (stmt);
4924 else
4925 return false;
4927 decl = built_in_decls[base + index + 1];
4928 itype = TREE_TYPE (TREE_TYPE (decl));
4930 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4931 return false;
4933 gsi = gsi_last_bb (load_bb);
4934 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4935 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4936 call = fold_convert (void_type_node, call);
4937 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4938 gsi_remove (&gsi, true);
4940 gsi = gsi_last_bb (store_bb);
4941 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4942 gsi_remove (&gsi, true);
4943 gsi = gsi_last_bb (store_bb);
4944 gsi_remove (&gsi, true);
4946 if (gimple_in_ssa_p (cfun))
4947 update_ssa (TODO_update_ssa_no_phi);
4949 return true;
4952 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4954 oldval = *addr;
4955 repeat:
4956 newval = rhs; // with oldval replacing *addr in rhs
4957 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4958 if (oldval != newval)
4959 goto repeat;
4961 INDEX is log2 of the size of the data type, and thus usable to find the
4962 index of the builtin decl. */
4964 static bool
4965 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4966 tree addr, tree loaded_val, tree stored_val,
4967 int index)
4969 tree loadedi, storedi, initial, new_storedi, old_vali;
4970 tree type, itype, cmpxchg, iaddr;
4971 gimple_stmt_iterator si;
4972 basic_block loop_header = single_succ (load_bb);
4973 gimple phi, stmt;
4974 edge e;
4976 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4977 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4978 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4980 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4981 return false;
4983 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4984 si = gsi_last_bb (load_bb);
4985 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4987 /* For floating-point values, we'll need to view-convert them to integers
4988 so that we can perform the atomic compare and swap. Simplify the
4989 following code by always setting up the "i"ntegral variables. */
4990 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4992 tree iaddr_val;
4994 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4995 iaddr_val
4996 = force_gimple_operand_gsi (&si,
4997 fold_convert (TREE_TYPE (iaddr), addr),
4998 false, NULL_TREE, true, GSI_SAME_STMT);
4999 stmt = gimple_build_assign (iaddr, iaddr_val);
5000 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5001 DECL_NO_TBAA_P (iaddr) = 1;
5002 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5003 loadedi = create_tmp_var (itype, NULL);
5004 if (gimple_in_ssa_p (cfun))
5006 add_referenced_var (iaddr);
5007 add_referenced_var (loadedi);
5008 loadedi = make_ssa_name (loadedi, NULL);
5011 else
5013 iaddr = addr;
5014 loadedi = loaded_val;
5017 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5018 true, NULL_TREE, true, GSI_SAME_STMT);
5020 /* Move the value to the LOADEDI temporary. */
5021 if (gimple_in_ssa_p (cfun))
5023 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5024 phi = create_phi_node (loadedi, loop_header);
5025 SSA_NAME_DEF_STMT (loadedi) = phi;
5026 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5027 initial);
5029 else
5030 gsi_insert_before (&si,
5031 gimple_build_assign (loadedi, initial),
5032 GSI_SAME_STMT);
5033 if (loadedi != loaded_val)
5035 gimple_stmt_iterator gsi2;
5036 tree x;
5038 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5039 gsi2 = gsi_start_bb (loop_header);
5040 if (gimple_in_ssa_p (cfun))
5042 gimple stmt;
5043 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5044 true, GSI_SAME_STMT);
5045 stmt = gimple_build_assign (loaded_val, x);
5046 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5048 else
5050 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5051 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5052 true, GSI_SAME_STMT);
5055 gsi_remove (&si, true);
5057 si = gsi_last_bb (store_bb);
5058 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5060 if (iaddr == addr)
5061 storedi = stored_val;
5062 else
5063 storedi =
5064 force_gimple_operand_gsi (&si,
5065 build1 (VIEW_CONVERT_EXPR, itype,
5066 stored_val), true, NULL_TREE, true,
5067 GSI_SAME_STMT);
5069 /* Build the compare&swap statement. */
5070 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5071 new_storedi = force_gimple_operand_gsi (&si,
5072 fold_convert (itype, new_storedi),
5073 true, NULL_TREE,
5074 true, GSI_SAME_STMT);
5076 if (gimple_in_ssa_p (cfun))
5077 old_vali = loadedi;
5078 else
5080 old_vali = create_tmp_var (itype, NULL);
5081 if (gimple_in_ssa_p (cfun))
5082 add_referenced_var (old_vali);
5083 stmt = gimple_build_assign (old_vali, loadedi);
5084 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5086 stmt = gimple_build_assign (loadedi, new_storedi);
5087 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5090 /* Note that we always perform the comparison as an integer, even for
5091 floating point. This allows the atomic operation to properly
5092 succeed even with NaNs and -0.0. */
5093 stmt = gimple_build_cond_empty
5094 (build2 (NE_EXPR, boolean_type_node,
5095 new_storedi, old_vali));
5096 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5098 /* Update cfg. */
5099 e = single_succ_edge (store_bb);
5100 e->flags &= ~EDGE_FALLTHRU;
5101 e->flags |= EDGE_FALSE_VALUE;
5103 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5105 /* Copy the new value to loadedi (we already did that before the condition
5106 if we are not in SSA). */
5107 if (gimple_in_ssa_p (cfun))
5109 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5110 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5113 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5114 gsi_remove (&si, true);
5116 if (gimple_in_ssa_p (cfun))
5117 update_ssa (TODO_update_ssa_no_phi);
5119 return true;
5122 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5124 GOMP_atomic_start ();
5125 *addr = rhs;
5126 GOMP_atomic_end ();
5128 The result is not globally atomic, but works so long as all parallel
5129 references are within #pragma omp atomic directives. According to
5130 responses received from omp@openmp.org, appears to be within spec.
5131 Which makes sense, since that's how several other compilers handle
5132 this situation as well.
5133 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5134 expanding. STORED_VAL is the operand of the matching
5135 GIMPLE_OMP_ATOMIC_STORE.
5137 We replace
5138 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5139 loaded_val = *addr;
5141 and replace
5142 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5143 *addr = stored_val;
5146 static bool
5147 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5148 tree addr, tree loaded_val, tree stored_val)
5150 gimple_stmt_iterator si;
5151 gimple stmt;
5152 tree t;
5154 si = gsi_last_bb (load_bb);
5155 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5157 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5158 t = build_function_call_expr (t, 0);
5159 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5161 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5162 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5163 gsi_remove (&si, true);
5165 si = gsi_last_bb (store_bb);
5166 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5168 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5169 stored_val);
5170 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5172 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5173 t = build_function_call_expr (t, 0);
5174 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5175 gsi_remove (&si, true);
5177 if (gimple_in_ssa_p (cfun))
5178 update_ssa (TODO_update_ssa_no_phi);
5179 return true;
5182 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5183 using expand_omp_atomic_fetch_op. If it failed, we try to
5184 call expand_omp_atomic_pipeline, and if it fails too, the
5185 ultimate fallback is wrapping the operation in a mutex
5186 (expand_omp_atomic_mutex). REGION is the atomic region built
5187 by build_omp_regions_1(). */
5189 static void
5190 expand_omp_atomic (struct omp_region *region)
5192 basic_block load_bb = region->entry, store_bb = region->exit;
5193 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5194 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5195 tree addr = gimple_omp_atomic_load_rhs (load);
5196 tree stored_val = gimple_omp_atomic_store_val (store);
5197 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5198 HOST_WIDE_INT index;
5200 /* Make sure the type is one of the supported sizes. */
5201 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5202 index = exact_log2 (index);
5203 if (index >= 0 && index <= 4)
5205 unsigned int align = TYPE_ALIGN_UNIT (type);
5207 /* __sync builtins require strict data alignment. */
5208 if (exact_log2 (align) >= index)
5210 /* When possible, use specialized atomic update functions. */
5211 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5212 && store_bb == single_succ (load_bb))
5214 if (expand_omp_atomic_fetch_op (load_bb, addr,
5215 loaded_val, stored_val, index))
5216 return;
5219 /* If we don't have specialized __sync builtins, try and implement
5220 as a compare and swap loop. */
5221 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5222 loaded_val, stored_val, index))
5223 return;
5227 /* The ultimate fallback is wrapping the operation in a mutex. */
5228 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5232 /* Expand the parallel region tree rooted at REGION. Expansion
5233 proceeds in depth-first order. Innermost regions are expanded
5234 first. This way, parallel regions that require a new function to
5235 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5236 internal dependencies in their body. */
5238 static void
5239 expand_omp (struct omp_region *region)
5241 while (region)
5243 location_t saved_location;
5245 /* First, determine whether this is a combined parallel+workshare
5246 region. */
5247 if (region->type == GIMPLE_OMP_PARALLEL)
5248 determine_parallel_type (region);
5250 if (region->inner)
5251 expand_omp (region->inner);
5253 saved_location = input_location;
5254 if (gimple_has_location (last_stmt (region->entry)))
5255 input_location = gimple_location (last_stmt (region->entry));
5257 switch (region->type)
5259 case GIMPLE_OMP_PARALLEL:
5260 case GIMPLE_OMP_TASK:
5261 expand_omp_taskreg (region);
5262 break;
5264 case GIMPLE_OMP_FOR:
5265 expand_omp_for (region);
5266 break;
5268 case GIMPLE_OMP_SECTIONS:
5269 expand_omp_sections (region);
5270 break;
5272 case GIMPLE_OMP_SECTION:
5273 /* Individual omp sections are handled together with their
5274 parent GIMPLE_OMP_SECTIONS region. */
5275 break;
5277 case GIMPLE_OMP_SINGLE:
5278 expand_omp_single (region);
5279 break;
5281 case GIMPLE_OMP_MASTER:
5282 case GIMPLE_OMP_ORDERED:
5283 case GIMPLE_OMP_CRITICAL:
5284 expand_omp_synch (region);
5285 break;
5287 case GIMPLE_OMP_ATOMIC_LOAD:
5288 expand_omp_atomic (region);
5289 break;
5291 default:
5292 gcc_unreachable ();
5295 input_location = saved_location;
5296 region = region->next;
5301 /* Helper for build_omp_regions. Scan the dominator tree starting at
5302 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5303 true, the function ends once a single tree is built (otherwise, whole
5304 forest of OMP constructs may be built). */
5306 static void
5307 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5308 bool single_tree)
5310 gimple_stmt_iterator gsi;
5311 gimple stmt;
5312 basic_block son;
5314 gsi = gsi_last_bb (bb);
5315 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5317 struct omp_region *region;
5318 enum gimple_code code;
5320 stmt = gsi_stmt (gsi);
5321 code = gimple_code (stmt);
5322 if (code == GIMPLE_OMP_RETURN)
5324 /* STMT is the return point out of region PARENT. Mark it
5325 as the exit point and make PARENT the immediately
5326 enclosing region. */
5327 gcc_assert (parent);
5328 region = parent;
5329 region->exit = bb;
5330 parent = parent->outer;
5332 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5334 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5335 GIMPLE_OMP_RETURN, but matches with
5336 GIMPLE_OMP_ATOMIC_LOAD. */
5337 gcc_assert (parent);
5338 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5339 region = parent;
5340 region->exit = bb;
5341 parent = parent->outer;
5344 else if (code == GIMPLE_OMP_CONTINUE)
5346 gcc_assert (parent);
5347 parent->cont = bb;
5349 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5351 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5352 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5355 else
5357 /* Otherwise, this directive becomes the parent for a new
5358 region. */
5359 region = new_omp_region (bb, code, parent);
5360 parent = region;
5364 if (single_tree && !parent)
5365 return;
5367 for (son = first_dom_son (CDI_DOMINATORS, bb);
5368 son;
5369 son = next_dom_son (CDI_DOMINATORS, son))
5370 build_omp_regions_1 (son, parent, single_tree);
5373 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5374 root_omp_region. */
5376 static void
5377 build_omp_regions_root (basic_block root)
5379 gcc_assert (root_omp_region == NULL);
5380 build_omp_regions_1 (root, NULL, true);
5381 gcc_assert (root_omp_region != NULL);
5384 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5386 void
5387 omp_expand_local (basic_block head)
5389 build_omp_regions_root (head);
5390 if (dump_file && (dump_flags & TDF_DETAILS))
5392 fprintf (dump_file, "\nOMP region tree\n\n");
5393 dump_omp_region (dump_file, root_omp_region, 0);
5394 fprintf (dump_file, "\n");
5397 remove_exit_barriers (root_omp_region);
5398 expand_omp (root_omp_region);
5400 free_omp_regions ();
5403 /* Scan the CFG and build a tree of OMP regions. Return the root of
5404 the OMP region tree. */
5406 static void
5407 build_omp_regions (void)
5409 gcc_assert (root_omp_region == NULL);
5410 calculate_dominance_info (CDI_DOMINATORS);
5411 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5414 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5416 static unsigned int
5417 execute_expand_omp (void)
5419 build_omp_regions ();
5421 if (!root_omp_region)
5422 return 0;
5424 if (dump_file)
5426 fprintf (dump_file, "\nOMP region tree\n\n");
5427 dump_omp_region (dump_file, root_omp_region, 0);
5428 fprintf (dump_file, "\n");
5431 remove_exit_barriers (root_omp_region);
5433 expand_omp (root_omp_region);
5435 cleanup_tree_cfg ();
5437 free_omp_regions ();
5439 return 0;
5442 /* OMP expansion -- the default pass, run before creation of SSA form. */
5444 static bool
5445 gate_expand_omp (void)
5447 return (flag_openmp != 0 && errorcount == 0);
5450 struct gimple_opt_pass pass_expand_omp =
5453 GIMPLE_PASS,
5454 "ompexp", /* name */
5455 gate_expand_omp, /* gate */
5456 execute_expand_omp, /* execute */
5457 NULL, /* sub */
5458 NULL, /* next */
5459 0, /* static_pass_number */
5460 0, /* tv_id */
5461 PROP_gimple_any, /* properties_required */
5462 PROP_gimple_lomp, /* properties_provided */
5463 0, /* properties_destroyed */
5464 0, /* todo_flags_start */
5465 TODO_dump_func /* todo_flags_finish */
5469 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5471 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5472 CTX is the enclosing OMP context for the current statement. */
5474 static void
5475 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5477 tree block, control;
5478 gimple_stmt_iterator tgsi;
5479 unsigned i, len;
5480 gimple stmt, new_stmt, bind, t;
5481 gimple_seq ilist, dlist, olist, new_body, body;
5482 struct gimplify_ctx gctx;
5484 stmt = gsi_stmt (*gsi_p);
5486 push_gimplify_context (&gctx);
5488 dlist = NULL;
5489 ilist = NULL;
5490 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5491 &ilist, &dlist, ctx);
5493 tgsi = gsi_start (gimple_omp_body (stmt));
5494 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5495 continue;
5497 tgsi = gsi_start (gimple_omp_body (stmt));
5498 body = NULL;
5499 for (i = 0; i < len; i++, gsi_next (&tgsi))
5501 omp_context *sctx;
5502 gimple sec_start;
5504 sec_start = gsi_stmt (tgsi);
5505 sctx = maybe_lookup_ctx (sec_start);
5506 gcc_assert (sctx);
5508 gimple_seq_add_stmt (&body, sec_start);
5510 lower_omp (gimple_omp_body (sec_start), sctx);
5511 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5512 gimple_omp_set_body (sec_start, NULL);
5514 if (i == len - 1)
5516 gimple_seq l = NULL;
5517 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5518 &l, ctx);
5519 gimple_seq_add_seq (&body, l);
5520 gimple_omp_section_set_last (sec_start);
5523 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5526 block = make_node (BLOCK);
5527 bind = gimple_build_bind (NULL, body, block);
5529 olist = NULL;
5530 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5532 block = make_node (BLOCK);
5533 new_stmt = gimple_build_bind (NULL, NULL, block);
5535 pop_gimplify_context (new_stmt);
5536 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5537 BLOCK_VARS (block) = gimple_bind_vars (bind);
5538 if (BLOCK_VARS (block))
5539 TREE_USED (block) = 1;
5541 new_body = NULL;
5542 gimple_seq_add_seq (&new_body, ilist);
5543 gimple_seq_add_stmt (&new_body, stmt);
5544 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5545 gimple_seq_add_stmt (&new_body, bind);
5547 control = create_tmp_var (unsigned_type_node, ".section");
5548 t = gimple_build_omp_continue (control, control);
5549 gimple_omp_sections_set_control (stmt, control);
5550 gimple_seq_add_stmt (&new_body, t);
5552 gimple_seq_add_seq (&new_body, olist);
5553 gimple_seq_add_seq (&new_body, dlist);
5555 new_body = maybe_catch_exception (new_body);
5557 t = gimple_build_omp_return
5558 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5559 OMP_CLAUSE_NOWAIT));
5560 gimple_seq_add_stmt (&new_body, t);
5562 gimple_bind_set_body (new_stmt, new_body);
5563 gimple_omp_set_body (stmt, NULL);
5565 gsi_replace (gsi_p, new_stmt, true);
5569 /* A subroutine of lower_omp_single. Expand the simple form of
5570 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5572 if (GOMP_single_start ())
5573 BODY;
5574 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5576 FIXME. It may be better to delay expanding the logic of this until
5577 pass_expand_omp. The expanded logic may make the job more difficult
5578 to a synchronization analysis pass. */
5580 static void
5581 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5583 tree tlabel = create_artificial_label ();
5584 tree flabel = create_artificial_label ();
5585 gimple call, cond;
5586 tree lhs, decl;
5588 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5589 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5590 call = gimple_build_call (decl, 0);
5591 gimple_call_set_lhs (call, lhs);
5592 gimple_seq_add_stmt (pre_p, call);
5594 cond = gimple_build_cond (EQ_EXPR, lhs,
5595 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5596 tlabel, flabel);
5597 gimple_seq_add_stmt (pre_p, cond);
5598 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5599 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5600 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5604 /* A subroutine of lower_omp_single. Expand the simple form of
5605 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5607 #pragma omp single copyprivate (a, b, c)
5609 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5612 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5614 BODY;
5615 copyout.a = a;
5616 copyout.b = b;
5617 copyout.c = c;
5618 GOMP_single_copy_end (&copyout);
5620 else
5622 a = copyout_p->a;
5623 b = copyout_p->b;
5624 c = copyout_p->c;
5626 GOMP_barrier ();
5629 FIXME. It may be better to delay expanding the logic of this until
5630 pass_expand_omp. The expanded logic may make the job more difficult
5631 to a synchronization analysis pass. */
5633 static void
5634 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5636 tree ptr_type, t, l0, l1, l2;
5637 gimple_seq copyin_seq;
5639 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5641 ptr_type = build_pointer_type (ctx->record_type);
5642 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5644 l0 = create_artificial_label ();
5645 l1 = create_artificial_label ();
5646 l2 = create_artificial_label ();
5648 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5649 t = fold_convert (ptr_type, t);
5650 gimplify_assign (ctx->receiver_decl, t, pre_p);
5652 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5653 build_int_cst (ptr_type, 0));
5654 t = build3 (COND_EXPR, void_type_node, t,
5655 build_and_jump (&l0), build_and_jump (&l1));
5656 gimplify_and_add (t, pre_p);
5658 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5660 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5662 copyin_seq = NULL;
5663 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5664 &copyin_seq, ctx);
5666 t = build_fold_addr_expr (ctx->sender_decl);
5667 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5668 gimplify_and_add (t, pre_p);
5670 t = build_and_jump (&l2);
5671 gimplify_and_add (t, pre_p);
5673 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5675 gimple_seq_add_seq (pre_p, copyin_seq);
5677 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5681 /* Expand code for an OpenMP single directive. */
5683 static void
5684 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5686 tree block;
5687 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5688 gimple_seq bind_body, dlist;
5689 struct gimplify_ctx gctx;
5691 push_gimplify_context (&gctx);
5693 bind_body = NULL;
5694 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5695 &bind_body, &dlist, ctx);
5696 lower_omp (gimple_omp_body (single_stmt), ctx);
5698 gimple_seq_add_stmt (&bind_body, single_stmt);
5700 if (ctx->record_type)
5701 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5702 else
5703 lower_omp_single_simple (single_stmt, &bind_body);
5705 gimple_omp_set_body (single_stmt, NULL);
5707 gimple_seq_add_seq (&bind_body, dlist);
5709 bind_body = maybe_catch_exception (bind_body);
5711 t = gimple_build_omp_return
5712 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5713 OMP_CLAUSE_NOWAIT));
5714 gimple_seq_add_stmt (&bind_body, t);
5716 block = make_node (BLOCK);
5717 bind = gimple_build_bind (NULL, bind_body, block);
5719 pop_gimplify_context (bind);
5721 gimple_bind_append_vars (bind, ctx->block_vars);
5722 BLOCK_VARS (block) = ctx->block_vars;
5723 gsi_replace (gsi_p, bind, true);
5724 if (BLOCK_VARS (block))
5725 TREE_USED (block) = 1;
5729 /* Expand code for an OpenMP master directive. */
5731 static void
5732 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5734 tree block, lab = NULL, x;
5735 gimple stmt = gsi_stmt (*gsi_p), bind;
5736 gimple_seq tseq;
5737 struct gimplify_ctx gctx;
5739 push_gimplify_context (&gctx);
5741 block = make_node (BLOCK);
5742 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5743 block);
5745 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5746 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5747 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5748 tseq = NULL;
5749 gimplify_and_add (x, &tseq);
5750 gimple_bind_add_seq (bind, tseq);
5752 lower_omp (gimple_omp_body (stmt), ctx);
5753 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5754 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5755 gimple_omp_set_body (stmt, NULL);
5757 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5759 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5761 pop_gimplify_context (bind);
5763 gimple_bind_append_vars (bind, ctx->block_vars);
5764 BLOCK_VARS (block) = ctx->block_vars;
5765 gsi_replace (gsi_p, bind, true);
5769 /* Expand code for an OpenMP ordered directive. */
5771 static void
5772 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5774 tree block;
5775 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5776 struct gimplify_ctx gctx;
5778 push_gimplify_context (&gctx);
5780 block = make_node (BLOCK);
5781 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5782 block);
5784 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5785 gimple_bind_add_stmt (bind, x);
5787 lower_omp (gimple_omp_body (stmt), ctx);
5788 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5789 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5790 gimple_omp_set_body (stmt, NULL);
5792 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5793 gimple_bind_add_stmt (bind, x);
5795 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5797 pop_gimplify_context (bind);
5799 gimple_bind_append_vars (bind, ctx->block_vars);
5800 BLOCK_VARS (block) = gimple_bind_vars (bind);
5801 gsi_replace (gsi_p, bind, true);
5805 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5806 substitution of a couple of function calls. But in the NAMED case,
5807 requires that languages coordinate a symbol name. It is therefore
5808 best put here in common code. */
5810 static GTY((param1_is (tree), param2_is (tree)))
5811 splay_tree critical_name_mutexes;
5813 static void
5814 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5816 tree block;
5817 tree name, lock, unlock;
5818 gimple stmt = gsi_stmt (*gsi_p), bind;
5819 gimple_seq tbody;
5820 struct gimplify_ctx gctx;
5822 name = gimple_omp_critical_name (stmt);
5823 if (name)
5825 tree decl;
5826 splay_tree_node n;
5828 if (!critical_name_mutexes)
5829 critical_name_mutexes
5830 = splay_tree_new_ggc (splay_tree_compare_pointers);
5832 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5833 if (n == NULL)
5835 char *new_str;
5837 decl = create_tmp_var_raw (ptr_type_node, NULL);
5839 new_str = ACONCAT ((".gomp_critical_user_",
5840 IDENTIFIER_POINTER (name), NULL));
5841 DECL_NAME (decl) = get_identifier (new_str);
5842 TREE_PUBLIC (decl) = 1;
5843 TREE_STATIC (decl) = 1;
5844 DECL_COMMON (decl) = 1;
5845 DECL_ARTIFICIAL (decl) = 1;
5846 DECL_IGNORED_P (decl) = 1;
5847 varpool_finalize_decl (decl);
5849 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5850 (splay_tree_value) decl);
5852 else
5853 decl = (tree) n->value;
5855 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5856 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5858 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5859 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5861 else
5863 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5864 lock = build_call_expr (lock, 0);
5866 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5867 unlock = build_call_expr (unlock, 0);
5870 push_gimplify_context (&gctx);
5872 block = make_node (BLOCK);
5873 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5875 tbody = gimple_bind_body (bind);
5876 gimplify_and_add (lock, &tbody);
5877 gimple_bind_set_body (bind, tbody);
5879 lower_omp (gimple_omp_body (stmt), ctx);
5880 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5881 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5882 gimple_omp_set_body (stmt, NULL);
5884 tbody = gimple_bind_body (bind);
5885 gimplify_and_add (unlock, &tbody);
5886 gimple_bind_set_body (bind, tbody);
5888 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5890 pop_gimplify_context (bind);
5891 gimple_bind_append_vars (bind, ctx->block_vars);
5892 BLOCK_VARS (block) = gimple_bind_vars (bind);
5893 gsi_replace (gsi_p, bind, true);
5897 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5898 for a lastprivate clause. Given a loop control predicate of (V
5899 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5900 is appended to *DLIST, iterator initialization is appended to
5901 *BODY_P. */
5903 static void
5904 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5905 gimple_seq *dlist, struct omp_context *ctx)
5907 tree clauses, cond, vinit;
5908 enum tree_code cond_code;
5909 gimple_seq stmts;
5911 cond_code = fd->loop.cond_code;
5912 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5914 /* When possible, use a strict equality expression. This can let VRP
5915 type optimizations deduce the value and remove a copy. */
5916 if (host_integerp (fd->loop.step, 0))
5918 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5919 if (step == 1 || step == -1)
5920 cond_code = EQ_EXPR;
5923 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5925 clauses = gimple_omp_for_clauses (fd->for_stmt);
5926 stmts = NULL;
5927 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5928 if (!gimple_seq_empty_p (stmts))
5930 gimple_seq_add_seq (&stmts, *dlist);
5931 *dlist = stmts;
5933 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5934 vinit = fd->loop.n1;
5935 if (cond_code == EQ_EXPR
5936 && host_integerp (fd->loop.n2, 0)
5937 && ! integer_zerop (fd->loop.n2))
5938 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5940 /* Initialize the iterator variable, so that threads that don't execute
5941 any iterations don't execute the lastprivate clauses by accident. */
5942 gimplify_assign (fd->loop.v, vinit, body_p);
5947 /* Lower code for an OpenMP loop directive. */
5949 static void
5950 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5952 tree *rhs_p, block;
5953 struct omp_for_data fd;
5954 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5955 gimple_seq omp_for_body, body, dlist, ilist;
5956 size_t i;
5957 struct gimplify_ctx gctx;
5959 push_gimplify_context (&gctx);
5961 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5962 lower_omp (gimple_omp_body (stmt), ctx);
5964 block = make_node (BLOCK);
5965 new_stmt = gimple_build_bind (NULL, NULL, block);
5967 /* Move declaration of temporaries in the loop body before we make
5968 it go away. */
5969 omp_for_body = gimple_omp_body (stmt);
5970 if (!gimple_seq_empty_p (omp_for_body)
5971 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5973 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5974 gimple_bind_append_vars (new_stmt, vars);
5977 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5978 ilist = NULL;
5979 dlist = NULL;
5980 body = NULL;
5981 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5982 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5984 /* Lower the header expressions. At this point, we can assume that
5985 the header is of the form:
5987 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5989 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5990 using the .omp_data_s mapping, if needed. */
5991 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5993 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5994 if (!is_gimple_min_invariant (*rhs_p))
5995 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5997 rhs_p = gimple_omp_for_final_ptr (stmt, i);
5998 if (!is_gimple_min_invariant (*rhs_p))
5999 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6001 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6002 if (!is_gimple_min_invariant (*rhs_p))
6003 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6006 /* Once lowered, extract the bounds and clauses. */
6007 extract_omp_for_data (stmt, &fd, NULL);
6009 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6011 gimple_seq_add_stmt (&body, stmt);
6012 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6014 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6015 fd.loop.v));
6017 /* After the loop, add exit clauses. */
6018 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6019 gimple_seq_add_seq (&body, dlist);
6021 body = maybe_catch_exception (body);
6023 /* Region exit marker goes at the end of the loop body. */
6024 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6026 pop_gimplify_context (new_stmt);
6028 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6029 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6030 if (BLOCK_VARS (block))
6031 TREE_USED (block) = 1;
6033 gimple_bind_set_body (new_stmt, body);
6034 gimple_omp_set_body (stmt, NULL);
6035 gimple_omp_for_set_pre_body (stmt, NULL);
6036 gsi_replace (gsi_p, new_stmt, true);
6039 /* Callback for walk_stmts. Check if the current statement only contains
6040 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6042 static tree
6043 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6044 bool *handled_ops_p,
6045 struct walk_stmt_info *wi)
6047 int *info = (int *) wi->info;
6048 gimple stmt = gsi_stmt (*gsi_p);
6050 *handled_ops_p = true;
6051 switch (gimple_code (stmt))
6053 WALK_SUBSTMTS;
6055 case GIMPLE_OMP_FOR:
6056 case GIMPLE_OMP_SECTIONS:
6057 *info = *info == 0 ? 1 : -1;
6058 break;
6059 default:
6060 *info = -1;
6061 break;
6063 return NULL;
6066 struct omp_taskcopy_context
6068 /* This field must be at the beginning, as we do "inheritance": Some
6069 callback functions for tree-inline.c (e.g., omp_copy_decl)
6070 receive a copy_body_data pointer that is up-casted to an
6071 omp_context pointer. */
6072 copy_body_data cb;
6073 omp_context *ctx;
6076 static tree
6077 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6079 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6081 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6082 return create_tmp_var (TREE_TYPE (var), NULL);
6084 return var;
6087 static tree
6088 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6090 tree name, new_fields = NULL, type, f;
6092 type = lang_hooks.types.make_type (RECORD_TYPE);
6093 name = DECL_NAME (TYPE_NAME (orig_type));
6094 name = build_decl (TYPE_DECL, name, type);
6095 TYPE_NAME (type) = name;
6097 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6099 tree new_f = copy_node (f);
6100 DECL_CONTEXT (new_f) = type;
6101 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6102 TREE_CHAIN (new_f) = new_fields;
6103 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6104 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6105 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6106 &tcctx->cb, NULL);
6107 new_fields = new_f;
6108 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6110 TYPE_FIELDS (type) = nreverse (new_fields);
6111 layout_type (type);
6112 return type;
6115 /* Create task copyfn. */
6117 static void
6118 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6120 struct function *child_cfun;
6121 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6122 tree record_type, srecord_type, bind, list;
6123 bool record_needs_remap = false, srecord_needs_remap = false;
6124 splay_tree_node n;
6125 struct omp_taskcopy_context tcctx;
6126 struct gimplify_ctx gctx;
6128 child_fn = gimple_omp_task_copy_fn (task_stmt);
6129 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6130 gcc_assert (child_cfun->cfg == NULL);
6131 child_cfun->dont_save_pending_sizes_p = 1;
6132 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6134 /* Reset DECL_CONTEXT on function arguments. */
6135 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6136 DECL_CONTEXT (t) = child_fn;
6138 /* Populate the function. */
6139 push_gimplify_context (&gctx);
6140 current_function_decl = child_fn;
6142 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6143 TREE_SIDE_EFFECTS (bind) = 1;
6144 list = NULL;
6145 DECL_SAVED_TREE (child_fn) = bind;
6146 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6148 /* Remap src and dst argument types if needed. */
6149 record_type = ctx->record_type;
6150 srecord_type = ctx->srecord_type;
6151 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6152 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6154 record_needs_remap = true;
6155 break;
6157 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6158 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6160 srecord_needs_remap = true;
6161 break;
6164 if (record_needs_remap || srecord_needs_remap)
6166 memset (&tcctx, '\0', sizeof (tcctx));
6167 tcctx.cb.src_fn = ctx->cb.src_fn;
6168 tcctx.cb.dst_fn = child_fn;
6169 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6170 tcctx.cb.dst_node = tcctx.cb.src_node;
6171 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6172 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6173 tcctx.cb.eh_region = -1;
6174 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6175 tcctx.cb.decl_map = pointer_map_create ();
6176 tcctx.ctx = ctx;
6178 if (record_needs_remap)
6179 record_type = task_copyfn_remap_type (&tcctx, record_type);
6180 if (srecord_needs_remap)
6181 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6183 else
6184 tcctx.cb.decl_map = NULL;
6186 push_cfun (child_cfun);
6188 arg = DECL_ARGUMENTS (child_fn);
6189 TREE_TYPE (arg) = build_pointer_type (record_type);
6190 sarg = TREE_CHAIN (arg);
6191 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6193 /* First pass: initialize temporaries used in record_type and srecord_type
6194 sizes and field offsets. */
6195 if (tcctx.cb.decl_map)
6196 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6197 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6199 tree *p;
6201 decl = OMP_CLAUSE_DECL (c);
6202 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6203 if (p == NULL)
6204 continue;
6205 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6206 sf = (tree) n->value;
6207 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6208 src = build_fold_indirect_ref (sarg);
6209 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6210 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6211 append_to_statement_list (t, &list);
6214 /* Second pass: copy shared var pointers and copy construct non-VLA
6215 firstprivate vars. */
6216 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6217 switch (OMP_CLAUSE_CODE (c))
6219 case OMP_CLAUSE_SHARED:
6220 decl = OMP_CLAUSE_DECL (c);
6221 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6222 if (n == NULL)
6223 break;
6224 f = (tree) n->value;
6225 if (tcctx.cb.decl_map)
6226 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6227 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6228 sf = (tree) n->value;
6229 if (tcctx.cb.decl_map)
6230 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6231 src = build_fold_indirect_ref (sarg);
6232 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6233 dst = build_fold_indirect_ref (arg);
6234 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6235 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6236 append_to_statement_list (t, &list);
6237 break;
6238 case OMP_CLAUSE_FIRSTPRIVATE:
6239 decl = OMP_CLAUSE_DECL (c);
6240 if (is_variable_sized (decl))
6241 break;
6242 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6243 if (n == NULL)
6244 break;
6245 f = (tree) n->value;
6246 if (tcctx.cb.decl_map)
6247 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6248 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6249 if (n != NULL)
6251 sf = (tree) n->value;
6252 if (tcctx.cb.decl_map)
6253 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6254 src = build_fold_indirect_ref (sarg);
6255 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6256 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6257 src = build_fold_indirect_ref (src);
6259 else
6260 src = decl;
6261 dst = build_fold_indirect_ref (arg);
6262 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6263 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6264 append_to_statement_list (t, &list);
6265 break;
6266 case OMP_CLAUSE_PRIVATE:
6267 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6268 break;
6269 decl = OMP_CLAUSE_DECL (c);
6270 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6271 f = (tree) n->value;
6272 if (tcctx.cb.decl_map)
6273 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6274 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6275 if (n != NULL)
6277 sf = (tree) n->value;
6278 if (tcctx.cb.decl_map)
6279 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6280 src = build_fold_indirect_ref (sarg);
6281 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6282 if (use_pointer_for_field (decl, NULL))
6283 src = build_fold_indirect_ref (src);
6285 else
6286 src = decl;
6287 dst = build_fold_indirect_ref (arg);
6288 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6289 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6290 append_to_statement_list (t, &list);
6291 break;
6292 default:
6293 break;
6296 /* Last pass: handle VLA firstprivates. */
6297 if (tcctx.cb.decl_map)
6298 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6299 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6301 tree ind, ptr, df;
6303 decl = OMP_CLAUSE_DECL (c);
6304 if (!is_variable_sized (decl))
6305 continue;
6306 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6307 if (n == NULL)
6308 continue;
6309 f = (tree) n->value;
6310 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6311 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6312 ind = DECL_VALUE_EXPR (decl);
6313 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6314 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6315 n = splay_tree_lookup (ctx->sfield_map,
6316 (splay_tree_key) TREE_OPERAND (ind, 0));
6317 sf = (tree) n->value;
6318 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6319 src = build_fold_indirect_ref (sarg);
6320 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6321 src = build_fold_indirect_ref (src);
6322 dst = build_fold_indirect_ref (arg);
6323 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6324 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6325 append_to_statement_list (t, &list);
6326 n = splay_tree_lookup (ctx->field_map,
6327 (splay_tree_key) TREE_OPERAND (ind, 0));
6328 df = (tree) n->value;
6329 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6330 ptr = build_fold_indirect_ref (arg);
6331 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6332 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6333 build_fold_addr_expr (dst));
6334 append_to_statement_list (t, &list);
6337 t = build1 (RETURN_EXPR, void_type_node, NULL);
6338 append_to_statement_list (t, &list);
6340 if (tcctx.cb.decl_map)
6341 pointer_map_destroy (tcctx.cb.decl_map);
6342 pop_gimplify_context (NULL);
6343 BIND_EXPR_BODY (bind) = list;
6344 pop_cfun ();
6345 current_function_decl = ctx->cb.src_fn;
6348 /* Lower the OpenMP parallel or task directive in the current statement
6349 in GSI_P. CTX holds context information for the directive. */
6351 static void
6352 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6354 tree clauses;
6355 tree child_fn, t;
6356 gimple stmt = gsi_stmt (*gsi_p);
6357 gimple par_bind, bind;
6358 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6359 struct gimplify_ctx gctx;
6361 clauses = gimple_omp_taskreg_clauses (stmt);
6362 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6363 par_body = gimple_bind_body (par_bind);
6364 child_fn = ctx->cb.dst_fn;
6365 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6366 && !gimple_omp_parallel_combined_p (stmt))
6368 struct walk_stmt_info wi;
6369 int ws_num = 0;
6371 memset (&wi, 0, sizeof (wi));
6372 wi.info = &ws_num;
6373 wi.val_only = true;
6374 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6375 if (ws_num == 1)
6376 gimple_omp_parallel_set_combined_p (stmt, true);
6378 if (ctx->srecord_type)
6379 create_task_copyfn (stmt, ctx);
6381 push_gimplify_context (&gctx);
6383 par_olist = NULL;
6384 par_ilist = NULL;
6385 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6386 lower_omp (par_body, ctx);
6387 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6388 lower_reduction_clauses (clauses, &par_olist, ctx);
6390 /* Declare all the variables created by mapping and the variables
6391 declared in the scope of the parallel body. */
6392 record_vars_into (ctx->block_vars, child_fn);
6393 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6395 if (ctx->record_type)
6397 ctx->sender_decl
6398 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6399 : ctx->record_type, ".omp_data_o");
6400 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6403 olist = NULL;
6404 ilist = NULL;
6405 lower_send_clauses (clauses, &ilist, &olist, ctx);
6406 lower_send_shared_vars (&ilist, &olist, ctx);
6408 /* Once all the expansions are done, sequence all the different
6409 fragments inside gimple_omp_body. */
6411 new_body = NULL;
6413 if (ctx->record_type)
6415 t = build_fold_addr_expr (ctx->sender_decl);
6416 /* fixup_child_record_type might have changed receiver_decl's type. */
6417 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6418 gimple_seq_add_stmt (&new_body,
6419 gimple_build_assign (ctx->receiver_decl, t));
6422 gimple_seq_add_seq (&new_body, par_ilist);
6423 gimple_seq_add_seq (&new_body, par_body);
6424 gimple_seq_add_seq (&new_body, par_olist);
6425 new_body = maybe_catch_exception (new_body);
6426 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6427 gimple_omp_set_body (stmt, new_body);
6429 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6430 gimple_bind_add_stmt (bind, stmt);
6431 if (ilist || olist)
6433 gimple_seq_add_stmt (&ilist, bind);
6434 gimple_seq_add_seq (&ilist, olist);
6435 bind = gimple_build_bind (NULL, ilist, NULL);
6438 gsi_replace (gsi_p, bind, true);
6440 pop_gimplify_context (NULL);
6443 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6444 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6445 of OpenMP context, but with task_shared_vars set. */
6447 static tree
6448 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6449 void *data)
6451 tree t = *tp;
6453 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6454 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6455 return t;
6457 if (task_shared_vars
6458 && DECL_P (t)
6459 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6460 return t;
6462 /* If a global variable has been privatized, TREE_CONSTANT on
6463 ADDR_EXPR might be wrong. */
6464 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6465 recompute_tree_invariant_for_addr_expr (t);
6467 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6468 return NULL_TREE;
6471 static void
6472 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6474 gimple stmt = gsi_stmt (*gsi_p);
6475 struct walk_stmt_info wi;
6477 if (gimple_has_location (stmt))
6478 input_location = gimple_location (stmt);
6480 if (task_shared_vars)
6481 memset (&wi, '\0', sizeof (wi));
6483 /* If we have issued syntax errors, avoid doing any heavy lifting.
6484 Just replace the OpenMP directives with a NOP to avoid
6485 confusing RTL expansion. */
6486 if (errorcount && is_gimple_omp (stmt))
6488 gsi_replace (gsi_p, gimple_build_nop (), true);
6489 return;
6492 switch (gimple_code (stmt))
6494 case GIMPLE_COND:
6495 if ((ctx || task_shared_vars)
6496 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6497 ctx ? NULL : &wi, NULL)
6498 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6499 ctx ? NULL : &wi, NULL)))
6500 gimple_regimplify_operands (stmt, gsi_p);
6501 break;
6502 case GIMPLE_CATCH:
6503 lower_omp (gimple_catch_handler (stmt), ctx);
6504 break;
6505 case GIMPLE_EH_FILTER:
6506 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6507 break;
6508 case GIMPLE_TRY:
6509 lower_omp (gimple_try_eval (stmt), ctx);
6510 lower_omp (gimple_try_cleanup (stmt), ctx);
6511 break;
6512 case GIMPLE_BIND:
6513 lower_omp (gimple_bind_body (stmt), ctx);
6514 break;
6515 case GIMPLE_OMP_PARALLEL:
6516 case GIMPLE_OMP_TASK:
6517 ctx = maybe_lookup_ctx (stmt);
6518 lower_omp_taskreg (gsi_p, ctx);
6519 break;
6520 case GIMPLE_OMP_FOR:
6521 ctx = maybe_lookup_ctx (stmt);
6522 gcc_assert (ctx);
6523 lower_omp_for (gsi_p, ctx);
6524 break;
6525 case GIMPLE_OMP_SECTIONS:
6526 ctx = maybe_lookup_ctx (stmt);
6527 gcc_assert (ctx);
6528 lower_omp_sections (gsi_p, ctx);
6529 break;
6530 case GIMPLE_OMP_SINGLE:
6531 ctx = maybe_lookup_ctx (stmt);
6532 gcc_assert (ctx);
6533 lower_omp_single (gsi_p, ctx);
6534 break;
6535 case GIMPLE_OMP_MASTER:
6536 ctx = maybe_lookup_ctx (stmt);
6537 gcc_assert (ctx);
6538 lower_omp_master (gsi_p, ctx);
6539 break;
6540 case GIMPLE_OMP_ORDERED:
6541 ctx = maybe_lookup_ctx (stmt);
6542 gcc_assert (ctx);
6543 lower_omp_ordered (gsi_p, ctx);
6544 break;
6545 case GIMPLE_OMP_CRITICAL:
6546 ctx = maybe_lookup_ctx (stmt);
6547 gcc_assert (ctx);
6548 lower_omp_critical (gsi_p, ctx);
6549 break;
6550 case GIMPLE_OMP_ATOMIC_LOAD:
6551 if ((ctx || task_shared_vars)
6552 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6553 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6554 gimple_regimplify_operands (stmt, gsi_p);
6555 break;
6556 default:
6557 if ((ctx || task_shared_vars)
6558 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6559 ctx ? NULL : &wi))
6560 gimple_regimplify_operands (stmt, gsi_p);
6561 break;
6565 static void
6566 lower_omp (gimple_seq body, omp_context *ctx)
6568 location_t saved_location = input_location;
6569 gimple_stmt_iterator gsi = gsi_start (body);
6570 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6571 lower_omp_1 (&gsi, ctx);
6572 input_location = saved_location;
6575 /* Main entry point. */
6577 static unsigned int
6578 execute_lower_omp (void)
6580 gimple_seq body;
6582 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6583 delete_omp_context);
6585 body = gimple_body (current_function_decl);
6586 scan_omp (body, NULL);
6587 gcc_assert (taskreg_nesting_level == 0);
6589 if (all_contexts->root)
6591 struct gimplify_ctx gctx;
6593 if (task_shared_vars)
6594 push_gimplify_context (&gctx);
6595 lower_omp (body, NULL);
6596 if (task_shared_vars)
6597 pop_gimplify_context (NULL);
6600 if (all_contexts)
6602 splay_tree_delete (all_contexts);
6603 all_contexts = NULL;
6605 BITMAP_FREE (task_shared_vars);
6606 return 0;
6609 static bool
6610 gate_lower_omp (void)
6612 return flag_openmp != 0;
6615 struct gimple_opt_pass pass_lower_omp =
6618 GIMPLE_PASS,
6619 "omplower", /* name */
6620 gate_lower_omp, /* gate */
6621 execute_lower_omp, /* execute */
6622 NULL, /* sub */
6623 NULL, /* next */
6624 0, /* static_pass_number */
6625 0, /* tv_id */
6626 PROP_gimple_any, /* properties_required */
6627 PROP_gimple_lomp, /* properties_provided */
6628 0, /* properties_destroyed */
6629 0, /* todo_flags_start */
6630 TODO_dump_func /* todo_flags_finish */
6634 /* The following is a utility to diagnose OpenMP structured block violations.
6635 It is not part of the "omplower" pass, as that's invoked too late. It
6636 should be invoked by the respective front ends after gimplification. */
6638 static splay_tree all_labels;
6640 /* Check for mismatched contexts and generate an error if needed. Return
6641 true if an error is detected. */
6643 static bool
6644 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6645 gimple branch_ctx, gimple label_ctx)
6647 if (label_ctx == branch_ctx)
6648 return false;
6652 Previously we kept track of the label's entire context in diagnose_sb_[12]
6653 so we could traverse it and issue a correct "exit" or "enter" error
6654 message upon a structured block violation.
6656 We built the context by building a list with tree_cons'ing, but there is
6657 no easy counterpart in gimple tuples. It seems like far too much work
6658 for issuing exit/enter error messages. If someone really misses the
6659 distinct error message... patches welcome.
6662 #if 0
6663 /* Try to avoid confusing the user by producing and error message
6664 with correct "exit" or "enter" verbiage. We prefer "exit"
6665 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6666 if (branch_ctx == NULL)
6667 exit_p = false;
6668 else
6670 while (label_ctx)
6672 if (TREE_VALUE (label_ctx) == branch_ctx)
6674 exit_p = false;
6675 break;
6677 label_ctx = TREE_CHAIN (label_ctx);
6681 if (exit_p)
6682 error ("invalid exit from OpenMP structured block");
6683 else
6684 error ("invalid entry to OpenMP structured block");
6685 #endif
6687 /* If it's obvious we have an invalid entry, be specific about the error. */
6688 if (branch_ctx == NULL)
6689 error ("invalid entry to OpenMP structured block");
6690 else
6691 /* Otherwise, be vague and lazy, but efficient. */
6692 error ("invalid branch to/from an OpenMP structured block");
6694 gsi_replace (gsi_p, gimple_build_nop (), false);
6695 return true;
6698 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6699 where each label is found. */
6701 static tree
6702 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6703 struct walk_stmt_info *wi)
6705 gimple context = (gimple) wi->info;
6706 gimple inner_context;
6707 gimple stmt = gsi_stmt (*gsi_p);
6709 *handled_ops_p = true;
6711 switch (gimple_code (stmt))
6713 WALK_SUBSTMTS;
6715 case GIMPLE_OMP_PARALLEL:
6716 case GIMPLE_OMP_TASK:
6717 case GIMPLE_OMP_SECTIONS:
6718 case GIMPLE_OMP_SINGLE:
6719 case GIMPLE_OMP_SECTION:
6720 case GIMPLE_OMP_MASTER:
6721 case GIMPLE_OMP_ORDERED:
6722 case GIMPLE_OMP_CRITICAL:
6723 /* The minimal context here is just the current OMP construct. */
6724 inner_context = stmt;
6725 wi->info = inner_context;
6726 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6727 wi->info = context;
6728 break;
6730 case GIMPLE_OMP_FOR:
6731 inner_context = stmt;
6732 wi->info = inner_context;
6733 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6734 walk them. */
6735 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6736 diagnose_sb_1, NULL, wi);
6737 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6738 wi->info = context;
6739 break;
6741 case GIMPLE_LABEL:
6742 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6743 (splay_tree_value) context);
6744 break;
6746 default:
6747 break;
6750 return NULL_TREE;
6753 /* Pass 2: Check each branch and see if its context differs from that of
6754 the destination label's context. */
6756 static tree
6757 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6758 struct walk_stmt_info *wi)
6760 gimple context = (gimple) wi->info;
6761 splay_tree_node n;
6762 gimple stmt = gsi_stmt (*gsi_p);
6764 *handled_ops_p = true;
6766 switch (gimple_code (stmt))
6768 WALK_SUBSTMTS;
6770 case GIMPLE_OMP_PARALLEL:
6771 case GIMPLE_OMP_TASK:
6772 case GIMPLE_OMP_SECTIONS:
6773 case GIMPLE_OMP_SINGLE:
6774 case GIMPLE_OMP_SECTION:
6775 case GIMPLE_OMP_MASTER:
6776 case GIMPLE_OMP_ORDERED:
6777 case GIMPLE_OMP_CRITICAL:
6778 wi->info = stmt;
6779 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6780 wi->info = context;
6781 break;
6783 case GIMPLE_OMP_FOR:
6784 wi->info = stmt;
6785 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6786 walk them. */
6787 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6788 diagnose_sb_2, NULL, wi);
6789 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6790 wi->info = context;
6791 break;
6793 case GIMPLE_GOTO:
6795 tree lab = gimple_goto_dest (stmt);
6796 if (TREE_CODE (lab) != LABEL_DECL)
6797 break;
6799 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6800 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6802 break;
6804 case GIMPLE_SWITCH:
6806 unsigned int i;
6807 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6809 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6810 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6811 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6812 break;
6815 break;
6817 case GIMPLE_RETURN:
6818 diagnose_sb_0 (gsi_p, context, NULL);
6819 break;
6821 default:
6822 break;
6825 return NULL_TREE;
6828 void
6829 diagnose_omp_structured_block_errors (tree fndecl)
6831 tree save_current = current_function_decl;
6832 struct walk_stmt_info wi;
6833 struct function *old_cfun = cfun;
6834 gimple_seq body = gimple_body (fndecl);
6836 current_function_decl = fndecl;
6837 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6839 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6841 memset (&wi, 0, sizeof (wi));
6842 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6844 memset (&wi, 0, sizeof (wi));
6845 wi.want_locations = true;
6846 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6848 splay_tree_delete (all_labels);
6849 all_labels = NULL;
6851 set_cfun (old_cfun);
6852 current_function_decl = save_current;
6855 #include "gt-omp-low.h"