Fix typo in ChangeLog entry date.
[official-gcc.git] / gcc / omp-low.c
blob1a4aea9b37ba68bd9a3fe5b8c69ba331f2d86484
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 default:
260 break;
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
349 if (iter_type != long_long_unsigned_type_node)
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
357 tree n;
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
371 tree n1, n2;
373 if (loop->cond_code == LT_EXPR)
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
379 else
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
393 if (collapse_count && *collapse_count == NULL)
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
401 tree itype = TREE_TYPE (loop->v);
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
431 else
432 count = NULL_TREE;
436 if (count)
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
456 if (fd->collapse > 1)
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
481 Is lowered into:
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
523 extract_omp_for_data (ws_stmt, &fd, NULL);
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
541 return true;
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
549 static tree
550 get_ws_args_for (gimple ws_stmt)
552 tree t;
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
556 struct omp_for_data fd;
557 tree ws_args;
559 extract_omp_for_data (ws_stmt, &fd, NULL);
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
577 return ws_args;
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
590 gcc_unreachable ();
594 /* Discover whether REGION is a combined parallel+workshare region. */
596 static void
597 determine_parallel_type (struct omp_region *region)
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
627 gimple ws_stmt = last_stmt (ws_entry_bb);
629 if (region->inner->type == GIMPLE_OMP_FOR)
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
659 /* Return true if EXPR is variable sized. */
661 static inline bool
662 is_variable_sized (const_tree expr)
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
667 /* Return true if DECL is a reference type. */
669 static inline bool
670 is_reference (tree decl)
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
760 omp_context *up;
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
766 if (up)
768 tree c;
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
776 if (c)
777 return true;
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
798 return true;
802 return false;
805 /* Create a new VAR_DECL and copy information from VAR to it. */
807 tree
808 copy_var_decl (tree var, tree name, tree type)
810 tree copy = build_decl (VAR_DECL, name, type);
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
816 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
817 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
818 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
819 TREE_USED (copy) = 1;
820 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822 return copy;
825 /* Construct a new automatic decl similar to VAR. */
827 static tree
828 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 tree copy = copy_var_decl (var, name, type);
832 DECL_CONTEXT (copy) = current_function_decl;
833 TREE_CHAIN (copy) = ctx->block_vars;
834 ctx->block_vars = copy;
836 return copy;
839 static tree
840 omp_copy_decl_1 (tree var, omp_context *ctx)
842 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
845 /* Build tree nodes to access the field for VAR on the receiver side. */
847 static tree
848 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 tree x, field = lookup_field (var, ctx);
852 /* If the receiver record type was remapped in the child function,
853 remap the field into the new record type. */
854 x = maybe_lookup_field (field, ctx);
855 if (x != NULL)
856 field = x;
858 x = build_fold_indirect_ref (ctx->receiver_decl);
859 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
860 if (by_ref)
861 x = build_fold_indirect_ref (x);
863 return x;
866 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
867 of a parallel, this is a component reference; for workshare constructs
868 this is some variable. */
870 static tree
871 build_outer_var_ref (tree var, omp_context *ctx)
873 tree x;
875 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
876 x = var;
877 else if (is_variable_sized (var))
879 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
880 x = build_outer_var_ref (x, ctx);
881 x = build_fold_indirect_ref (x);
883 else if (is_taskreg_ctx (ctx))
885 bool by_ref = use_pointer_for_field (var, NULL);
886 x = build_receiver_ref (var, by_ref, ctx);
888 else if (ctx->outer)
889 x = lookup_decl (var, ctx->outer);
890 else if (is_reference (var))
891 /* This can happen with orphaned constructs. If var is reference, it is
892 possible it is shared and as such valid. */
893 x = var;
894 else
895 gcc_unreachable ();
897 if (is_reference (var))
898 x = build_fold_indirect_ref (x);
900 return x;
903 /* Build tree nodes to access the field for VAR on the sender side. */
905 static tree
906 build_sender_ref (tree var, omp_context *ctx)
908 tree field = lookup_sfield (var, ctx);
909 return build3 (COMPONENT_REF, TREE_TYPE (field),
910 ctx->sender_decl, field, NULL);
913 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915 static void
916 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 tree field, type, sfield = NULL_TREE;
920 gcc_assert ((mask & 1) == 0
921 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
922 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
923 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925 type = TREE_TYPE (var);
926 if (by_ref)
927 type = build_pointer_type (type);
928 else if ((mask & 3) == 1 && is_reference (var))
929 type = TREE_TYPE (type);
931 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933 /* Remember what variable this field was created for. This does have a
934 side effect of making dwarf2out ignore this member, so for helpful
935 debugging we clear it later in delete_omp_context. */
936 DECL_ABSTRACT_ORIGIN (field) = var;
937 if (type == TREE_TYPE (var))
939 DECL_ALIGN (field) = DECL_ALIGN (var);
940 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
941 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 else
944 DECL_ALIGN (field) = TYPE_ALIGN (type);
946 if ((mask & 3) == 3)
948 insert_field_into_struct (ctx->record_type, field);
949 if (ctx->srecord_type)
951 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
952 DECL_ABSTRACT_ORIGIN (sfield) = var;
953 DECL_ALIGN (sfield) = DECL_ALIGN (field);
954 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
955 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
956 insert_field_into_struct (ctx->srecord_type, sfield);
959 else
961 if (ctx->srecord_type == NULL_TREE)
963 tree t;
965 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
966 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
967 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
970 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
971 insert_field_into_struct (ctx->srecord_type, sfield);
972 splay_tree_insert (ctx->sfield_map,
973 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
974 (splay_tree_value) sfield);
977 sfield = field;
978 insert_field_into_struct ((mask & 1) ? ctx->record_type
979 : ctx->srecord_type, field);
982 if (mask & 1)
983 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
984 (splay_tree_value) field);
985 if ((mask & 2) && ctx->sfield_map)
986 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
987 (splay_tree_value) sfield);
990 static tree
991 install_var_local (tree var, omp_context *ctx)
993 tree new_var = omp_copy_decl_1 (var, ctx);
994 insert_decl_map (&ctx->cb, var, new_var);
995 return new_var;
998 /* Adjust the replacement for DECL in CTX for the new context. This means
999 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001 static void
1002 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 tree new_decl, size;
1006 new_decl = lookup_decl (decl, ctx);
1008 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1011 && DECL_HAS_VALUE_EXPR_P (decl))
1013 tree ve = DECL_VALUE_EXPR (decl);
1014 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1015 SET_DECL_VALUE_EXPR (new_decl, ve);
1016 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1019 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1022 if (size == error_mark_node)
1023 size = TYPE_SIZE (TREE_TYPE (new_decl));
1024 DECL_SIZE (new_decl) = size;
1026 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1027 if (size == error_mark_node)
1028 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1029 DECL_SIZE_UNIT (new_decl) = size;
1033 /* The callback for remap_decl. Search all containing contexts for a
1034 mapping of the variable; this avoids having to duplicate the splay
1035 tree ahead of time. We know a mapping doesn't already exist in the
1036 given context. Create new mappings to implement default semantics. */
1038 static tree
1039 omp_copy_decl (tree var, copy_body_data *cb)
1041 omp_context *ctx = (omp_context *) cb;
1042 tree new_var;
1044 if (TREE_CODE (var) == LABEL_DECL)
1046 new_var = create_artificial_label ();
1047 DECL_CONTEXT (new_var) = current_function_decl;
1048 insert_decl_map (&ctx->cb, var, new_var);
1049 return new_var;
1052 while (!is_taskreg_ctx (ctx))
1054 ctx = ctx->outer;
1055 if (ctx == NULL)
1056 return var;
1057 new_var = maybe_lookup_decl (var, ctx);
1058 if (new_var)
1059 return new_var;
1062 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1063 return var;
1065 return error_mark_node;
1069 /* Return the parallel region associated with STMT. */
1071 /* Debugging dumps for parallel regions. */
1072 void dump_omp_region (FILE *, struct omp_region *, int);
1073 void debug_omp_region (struct omp_region *);
1074 void debug_all_omp_regions (void);
1076 /* Dump the parallel region tree rooted at REGION. */
1078 void
1079 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1082 gimple_code_name[region->type]);
1084 if (region->inner)
1085 dump_omp_region (file, region->inner, indent + 4);
1087 if (region->cont)
1089 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1090 region->cont->index);
1093 if (region->exit)
1094 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1095 region->exit->index);
1096 else
1097 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099 if (region->next)
1100 dump_omp_region (file, region->next, indent);
1103 void
1104 debug_omp_region (struct omp_region *region)
1106 dump_omp_region (stderr, region, 0);
1109 void
1110 debug_all_omp_regions (void)
1112 dump_omp_region (stderr, root_omp_region, 0);
1116 /* Create a new parallel region starting at STMT inside region PARENT. */
1118 struct omp_region *
1119 new_omp_region (basic_block bb, enum gimple_code type,
1120 struct omp_region *parent)
1122 struct omp_region *region = XCNEW (struct omp_region);
1124 region->outer = parent;
1125 region->entry = bb;
1126 region->type = type;
1128 if (parent)
1130 /* This is a nested region. Add it to the list of inner
1131 regions in PARENT. */
1132 region->next = parent->inner;
1133 parent->inner = region;
1135 else
1137 /* This is a toplevel region. Add it to the list of toplevel
1138 regions in ROOT_OMP_REGION. */
1139 region->next = root_omp_region;
1140 root_omp_region = region;
1143 return region;
1146 /* Release the memory associated with the region tree rooted at REGION. */
1148 static void
1149 free_omp_region_1 (struct omp_region *region)
1151 struct omp_region *i, *n;
1153 for (i = region->inner; i ; i = n)
1155 n = i->next;
1156 free_omp_region_1 (i);
1159 free (region);
1162 /* Release the memory for the entire omp region tree. */
1164 void
1165 free_omp_regions (void)
1167 struct omp_region *r, *n;
1168 for (r = root_omp_region; r ; r = n)
1170 n = r->next;
1171 free_omp_region_1 (r);
1173 root_omp_region = NULL;
1177 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179 static omp_context *
1180 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 omp_context *ctx = XCNEW (omp_context);
1184 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1185 (splay_tree_value) ctx);
1186 ctx->stmt = stmt;
1188 if (outer_ctx)
1190 ctx->outer = outer_ctx;
1191 ctx->cb = outer_ctx->cb;
1192 ctx->cb.block = NULL;
1193 ctx->depth = outer_ctx->depth + 1;
1195 else
1197 ctx->cb.src_fn = current_function_decl;
1198 ctx->cb.dst_fn = current_function_decl;
1199 ctx->cb.src_node = cgraph_node (current_function_decl);
1200 ctx->cb.dst_node = ctx->cb.src_node;
1201 ctx->cb.src_cfun = cfun;
1202 ctx->cb.copy_decl = omp_copy_decl;
1203 ctx->cb.eh_region = -1;
1204 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1205 ctx->depth = 1;
1208 ctx->cb.decl_map = pointer_map_create ();
1210 return ctx;
1213 static gimple_seq maybe_catch_exception (gimple_seq);
1215 /* Finalize task copyfn. */
1217 static void
1218 finalize_task_copyfn (gimple task_stmt)
1220 struct function *child_cfun;
1221 tree child_fn, old_fn;
1222 gimple_seq seq, new_seq;
1223 gimple bind;
1225 child_fn = gimple_omp_task_copy_fn (task_stmt);
1226 if (child_fn == NULL_TREE)
1227 return;
1229 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231 /* Inform the callgraph about the new function. */
1232 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1233 = cfun->curr_properties;
1235 old_fn = current_function_decl;
1236 push_cfun (child_cfun);
1237 current_function_decl = child_fn;
1238 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1239 seq = gimple_seq_alloc ();
1240 gimple_seq_add_stmt (&seq, bind);
1241 new_seq = maybe_catch_exception (seq);
1242 if (new_seq != seq)
1244 bind = gimple_build_bind (NULL, new_seq, NULL);
1245 seq = gimple_seq_alloc ();
1246 gimple_seq_add_stmt (&seq, bind);
1248 gimple_set_body (child_fn, seq);
1249 pop_cfun ();
1250 current_function_decl = old_fn;
1252 cgraph_add_new_function (child_fn, false);
1255 /* Destroy a omp_context data structures. Called through the splay tree
1256 value delete callback. */
1258 static void
1259 delete_omp_context (splay_tree_value value)
1261 omp_context *ctx = (omp_context *) value;
1263 pointer_map_destroy (ctx->cb.decl_map);
1265 if (ctx->field_map)
1266 splay_tree_delete (ctx->field_map);
1267 if (ctx->sfield_map)
1268 splay_tree_delete (ctx->sfield_map);
1270 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1271 it produces corrupt debug information. */
1272 if (ctx->record_type)
1274 tree t;
1275 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1276 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 if (ctx->srecord_type)
1280 tree t;
1281 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1282 DECL_ABSTRACT_ORIGIN (t) = NULL;
1285 if (is_task_ctx (ctx))
1286 finalize_task_copyfn (ctx->stmt);
1288 XDELETE (ctx);
1291 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1292 context. */
1294 static void
1295 fixup_child_record_type (omp_context *ctx)
1297 tree f, type = ctx->record_type;
1299 /* ??? It isn't sufficient to just call remap_type here, because
1300 variably_modified_type_p doesn't work the way we expect for
1301 record types. Testing each field for whether it needs remapping
1302 and creating a new record by hand works, however. */
1303 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1304 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1305 break;
1306 if (f)
1308 tree name, new_fields = NULL;
1310 type = lang_hooks.types.make_type (RECORD_TYPE);
1311 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1312 name = build_decl (TYPE_DECL, name, type);
1313 TYPE_NAME (type) = name;
1315 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 tree new_f = copy_node (f);
1318 DECL_CONTEXT (new_f) = type;
1319 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1320 TREE_CHAIN (new_f) = new_fields;
1321 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1322 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1323 &ctx->cb, NULL);
1324 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1325 &ctx->cb, NULL);
1326 new_fields = new_f;
1328 /* Arrange to be able to look up the receiver field
1329 given the sender field. */
1330 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1331 (splay_tree_value) new_f);
1333 TYPE_FIELDS (type) = nreverse (new_fields);
1334 layout_type (type);
1337 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1340 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1341 specified by CLAUSES. */
1343 static void
1344 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 tree c, decl;
1347 bool scan_array_reductions = false;
1349 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 bool by_ref;
1353 switch (OMP_CLAUSE_CODE (c))
1355 case OMP_CLAUSE_PRIVATE:
1356 decl = OMP_CLAUSE_DECL (c);
1357 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1358 goto do_private;
1359 else if (!is_variable_sized (decl))
1360 install_var_local (decl, ctx);
1361 break;
1363 case OMP_CLAUSE_SHARED:
1364 gcc_assert (is_taskreg_ctx (ctx));
1365 decl = OMP_CLAUSE_DECL (c);
1366 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1367 || !is_variable_sized (decl));
1368 /* Global variables don't need to be copied,
1369 the receiver side will use them directly. */
1370 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1371 break;
1372 by_ref = use_pointer_for_field (decl, ctx);
1373 if (! TREE_READONLY (decl)
1374 || TREE_ADDRESSABLE (decl)
1375 || by_ref
1376 || is_reference (decl))
1378 install_var_field (decl, by_ref, 3, ctx);
1379 install_var_local (decl, ctx);
1380 break;
1382 /* We don't need to copy const scalar vars back. */
1383 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1384 goto do_private;
1386 case OMP_CLAUSE_LASTPRIVATE:
1387 /* Let the corresponding firstprivate clause create
1388 the variable. */
1389 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1390 break;
1391 /* FALLTHRU */
1393 case OMP_CLAUSE_FIRSTPRIVATE:
1394 case OMP_CLAUSE_REDUCTION:
1395 decl = OMP_CLAUSE_DECL (c);
1396 do_private:
1397 if (is_variable_sized (decl))
1399 if (is_task_ctx (ctx))
1400 install_var_field (decl, false, 1, ctx);
1401 break;
1403 else if (is_taskreg_ctx (ctx))
1405 bool global
1406 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1407 by_ref = use_pointer_for_field (decl, NULL);
1409 if (is_task_ctx (ctx)
1410 && (global || by_ref || is_reference (decl)))
1412 install_var_field (decl, false, 1, ctx);
1413 if (!global)
1414 install_var_field (decl, by_ref, 2, ctx);
1416 else if (!global)
1417 install_var_field (decl, by_ref, 3, ctx);
1419 install_var_local (decl, ctx);
1420 break;
1422 case OMP_CLAUSE_COPYPRIVATE:
1423 if (ctx->outer)
1424 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1425 /* FALLTHRU */
1427 case OMP_CLAUSE_COPYIN:
1428 decl = OMP_CLAUSE_DECL (c);
1429 by_ref = use_pointer_for_field (decl, NULL);
1430 install_var_field (decl, by_ref, 3, ctx);
1431 break;
1433 case OMP_CLAUSE_DEFAULT:
1434 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1435 break;
1437 case OMP_CLAUSE_IF:
1438 case OMP_CLAUSE_NUM_THREADS:
1439 case OMP_CLAUSE_SCHEDULE:
1440 if (ctx->outer)
1441 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1442 break;
1444 case OMP_CLAUSE_NOWAIT:
1445 case OMP_CLAUSE_ORDERED:
1446 case OMP_CLAUSE_COLLAPSE:
1447 case OMP_CLAUSE_UNTIED:
1448 break;
1450 default:
1451 gcc_unreachable ();
1455 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 switch (OMP_CLAUSE_CODE (c))
1459 case OMP_CLAUSE_LASTPRIVATE:
1460 /* Let the corresponding firstprivate clause create
1461 the variable. */
1462 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1463 scan_array_reductions = true;
1464 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1465 break;
1466 /* FALLTHRU */
1468 case OMP_CLAUSE_PRIVATE:
1469 case OMP_CLAUSE_FIRSTPRIVATE:
1470 case OMP_CLAUSE_REDUCTION:
1471 decl = OMP_CLAUSE_DECL (c);
1472 if (is_variable_sized (decl))
1473 install_var_local (decl, ctx);
1474 fixup_remapped_decl (decl, ctx,
1475 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1476 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1477 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1478 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1479 scan_array_reductions = true;
1480 break;
1482 case OMP_CLAUSE_SHARED:
1483 decl = OMP_CLAUSE_DECL (c);
1484 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1485 fixup_remapped_decl (decl, ctx, false);
1486 break;
1488 case OMP_CLAUSE_COPYPRIVATE:
1489 case OMP_CLAUSE_COPYIN:
1490 case OMP_CLAUSE_DEFAULT:
1491 case OMP_CLAUSE_IF:
1492 case OMP_CLAUSE_NUM_THREADS:
1493 case OMP_CLAUSE_SCHEDULE:
1494 case OMP_CLAUSE_NOWAIT:
1495 case OMP_CLAUSE_ORDERED:
1496 case OMP_CLAUSE_COLLAPSE:
1497 case OMP_CLAUSE_UNTIED:
1498 break;
1500 default:
1501 gcc_unreachable ();
1505 if (scan_array_reductions)
1506 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1507 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1508 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1514 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1515 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1518 /* Create a new name for omp child function. Returns an identifier. */
1520 static GTY(()) unsigned int tmp_ompfn_id_num;
1522 static tree
1523 create_omp_child_function_name (bool task_copy)
1525 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1526 size_t len = IDENTIFIER_LENGTH (name);
1527 char *tmp_name, *prefix;
1528 const char *suffix;
1530 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1531 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1532 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1533 strcpy (prefix + len, suffix);
1534 #ifndef NO_DOT_IN_LABEL
1535 prefix[len] = '.';
1536 #elif !defined NO_DOLLAR_IN_LABEL
1537 prefix[len] = '$';
1538 #endif
1539 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1540 return get_identifier (tmp_name);
1543 /* Build a decl for the omp child function. It'll not contain a body
1544 yet, just the bare decl. */
1546 static void
1547 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 tree decl, type, name, t;
1551 name = create_omp_child_function_name (task_copy);
1552 if (task_copy)
1553 type = build_function_type_list (void_type_node, ptr_type_node,
1554 ptr_type_node, NULL_TREE);
1555 else
1556 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558 decl = build_decl (FUNCTION_DECL, name, type);
1559 decl = lang_hooks.decls.pushdecl (decl);
1561 if (!task_copy)
1562 ctx->cb.dst_fn = decl;
1563 else
1564 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566 TREE_STATIC (decl) = 1;
1567 TREE_USED (decl) = 1;
1568 DECL_ARTIFICIAL (decl) = 1;
1569 DECL_IGNORED_P (decl) = 0;
1570 TREE_PUBLIC (decl) = 0;
1571 DECL_UNINLINABLE (decl) = 1;
1572 DECL_EXTERNAL (decl) = 0;
1573 DECL_CONTEXT (decl) = NULL_TREE;
1574 DECL_INITIAL (decl) = make_node (BLOCK);
1576 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1577 DECL_ARTIFICIAL (t) = 1;
1578 DECL_IGNORED_P (t) = 1;
1579 DECL_CONTEXT (t) = decl;
1580 DECL_RESULT (decl) = t;
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1605 it afterward. */
1606 push_struct_function (decl);
1607 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1609 pop_cfun ();
1613 /* Scan an OpenMP parallel directive. */
1615 static void
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1618 omp_context *ctx;
1619 tree name;
1620 gimple stmt = gsi_stmt (*gsi);
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1624 if (optimize > 0
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1629 gsi_replace (gsi, gimple_build_nop (), false);
1630 return;
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1657 /* Scan an OpenMP task directive. */
1659 static void
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1662 omp_context *ctx;
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
1666 /* Ignore task directives with empty bodies. */
1667 if (optimize > 0
1668 && empty_body_p (gimple_omp_body (stmt)))
1670 gsi_replace (gsi, gimple_build_nop (), false);
1671 return;
1674 ctx = new_omp_context (stmt, outer_ctx);
1675 if (taskreg_nesting_level > 1)
1676 ctx->is_nested = true;
1677 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1678 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1679 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1680 name = create_tmp_var_name (".omp_data_s");
1681 name = build_decl (TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1688 if (ctx->srecord_type)
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1692 TYPE_NAME (ctx->srecord_type) = name;
1693 create_omp_child_function (ctx, true);
1696 scan_omp (gimple_omp_body (stmt), ctx);
1698 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 ctx->record_type = ctx->receiver_decl = NULL;
1701 t = build_int_cst (long_integer_type_node, 0);
1702 gimple_omp_task_set_arg_size (stmt, t);
1703 t = build_int_cst (long_integer_type_node, 1);
1704 gimple_omp_task_set_arg_align (stmt, t);
1706 else
1708 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1709 /* Move VLA fields to the end. */
1710 p = &TYPE_FIELDS (ctx->record_type);
1711 while (*p)
1712 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1713 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 *q = *p;
1716 *p = TREE_CHAIN (*p);
1717 TREE_CHAIN (*q) = NULL_TREE;
1718 q = &TREE_CHAIN (*q);
1720 else
1721 p = &TREE_CHAIN (*p);
1722 *p = vla_fields;
1723 layout_type (ctx->record_type);
1724 fixup_child_record_type (ctx);
1725 if (ctx->srecord_type)
1726 layout_type (ctx->srecord_type);
1727 t = fold_convert (long_integer_type_node,
1728 TYPE_SIZE_UNIT (ctx->record_type));
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node,
1731 TYPE_ALIGN_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_align (stmt, t);
1737 /* Scan an OpenMP loop directive. */
1739 static void
1740 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1742 omp_context *ctx;
1743 size_t i;
1745 ctx = new_omp_context (stmt, outer_ctx);
1747 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1749 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1750 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1752 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1757 scan_omp (gimple_omp_body (stmt), ctx);
1760 /* Scan an OpenMP sections directive. */
1762 static void
1763 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1765 omp_context *ctx;
1767 ctx = new_omp_context (stmt, outer_ctx);
1768 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1769 scan_omp (gimple_omp_body (stmt), ctx);
1772 /* Scan an OpenMP single directive. */
1774 static void
1775 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1777 omp_context *ctx;
1778 tree name;
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1782 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1783 name = create_tmp_var_name (".omp_copy_s");
1784 name = build_decl (TYPE_DECL, name, ctx->record_type);
1785 TYPE_NAME (ctx->record_type) = name;
1787 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1790 if (TYPE_FIELDS (ctx->record_type) == NULL)
1791 ctx->record_type = NULL;
1792 else
1793 layout_type (ctx->record_type);
1797 /* Check OpenMP nesting restrictions. */
1798 static void
1799 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1801 switch (gimple_code (stmt))
1803 case GIMPLE_OMP_FOR:
1804 case GIMPLE_OMP_SECTIONS:
1805 case GIMPLE_OMP_SINGLE:
1806 case GIMPLE_CALL:
1807 for (; ctx != NULL; ctx = ctx->outer)
1808 switch (gimple_code (ctx->stmt))
1810 case GIMPLE_OMP_FOR:
1811 case GIMPLE_OMP_SECTIONS:
1812 case GIMPLE_OMP_SINGLE:
1813 case GIMPLE_OMP_ORDERED:
1814 case GIMPLE_OMP_MASTER:
1815 case GIMPLE_OMP_TASK:
1816 if (is_gimple_call (stmt))
1818 warning (0, "barrier region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or "
1820 "explicit task region");
1821 return;
1823 warning (0, "work-sharing region may not be closely nested inside "
1824 "of work-sharing, critical, ordered, master or explicit "
1825 "task region");
1826 return;
1827 case GIMPLE_OMP_PARALLEL:
1828 return;
1829 default:
1830 break;
1832 break;
1833 case GIMPLE_OMP_MASTER:
1834 for (; ctx != NULL; ctx = ctx->outer)
1835 switch (gimple_code (ctx->stmt))
1837 case GIMPLE_OMP_FOR:
1838 case GIMPLE_OMP_SECTIONS:
1839 case GIMPLE_OMP_SINGLE:
1840 case GIMPLE_OMP_TASK:
1841 warning (0, "master region may not be closely nested inside "
1842 "of work-sharing or explicit task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1849 break;
1850 case GIMPLE_OMP_ORDERED:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1854 case GIMPLE_OMP_CRITICAL:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "ordered region may not be closely nested inside "
1857 "of critical or explicit task region");
1858 return;
1859 case GIMPLE_OMP_FOR:
1860 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1861 OMP_CLAUSE_ORDERED) == NULL)
1862 warning (0, "ordered region must be closely nested inside "
1863 "a loop region with an ordered clause");
1864 return;
1865 case GIMPLE_OMP_PARALLEL:
1866 return;
1867 default:
1868 break;
1870 break;
1871 case GIMPLE_OMP_CRITICAL:
1872 for (; ctx != NULL; ctx = ctx->outer)
1873 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1874 && (gimple_omp_critical_name (stmt)
1875 == gimple_omp_critical_name (ctx->stmt)))
1877 warning (0, "critical region may not be nested inside a critical "
1878 "region with the same name");
1879 return;
1881 break;
1882 default:
1883 break;
1888 /* Helper function scan_omp.
1890 Callback for walk_tree or operators in walk_gimple_stmt used to
1891 scan for OpenMP directives in TP. */
1893 static tree
1894 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1896 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1897 omp_context *ctx = (omp_context *) wi->info;
1898 tree t = *tp;
1900 switch (TREE_CODE (t))
1902 case VAR_DECL:
1903 case PARM_DECL:
1904 case LABEL_DECL:
1905 case RESULT_DECL:
1906 if (ctx)
1907 *tp = remap_decl (t, &ctx->cb);
1908 break;
1910 default:
1911 if (ctx && TYPE_P (t))
1912 *tp = remap_type (t, &ctx->cb);
1913 else if (!DECL_P (t))
1914 *walk_subtrees = 1;
1915 break;
1918 return NULL_TREE;
1922 /* Helper function for scan_omp.
1924 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1925 the current statement in GSI. */
1927 static tree
1928 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1929 struct walk_stmt_info *wi)
1931 gimple stmt = gsi_stmt (*gsi);
1932 omp_context *ctx = (omp_context *) wi->info;
1934 if (gimple_has_location (stmt))
1935 input_location = gimple_location (stmt);
1937 /* Check the OpenMP nesting restrictions. */
1938 if (ctx != NULL)
1940 if (is_gimple_omp (stmt))
1941 check_omp_nesting_restrictions (stmt, ctx);
1942 else if (is_gimple_call (stmt))
1944 tree fndecl = gimple_call_fndecl (stmt);
1945 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1946 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1947 check_omp_nesting_restrictions (stmt, ctx);
1951 *handled_ops_p = true;
1953 switch (gimple_code (stmt))
1955 case GIMPLE_OMP_PARALLEL:
1956 taskreg_nesting_level++;
1957 scan_omp_parallel (gsi, ctx);
1958 taskreg_nesting_level--;
1959 break;
1961 case GIMPLE_OMP_TASK:
1962 taskreg_nesting_level++;
1963 scan_omp_task (gsi, ctx);
1964 taskreg_nesting_level--;
1965 break;
1967 case GIMPLE_OMP_FOR:
1968 scan_omp_for (stmt, ctx);
1969 break;
1971 case GIMPLE_OMP_SECTIONS:
1972 scan_omp_sections (stmt, ctx);
1973 break;
1975 case GIMPLE_OMP_SINGLE:
1976 scan_omp_single (stmt, ctx);
1977 break;
1979 case GIMPLE_OMP_SECTION:
1980 case GIMPLE_OMP_MASTER:
1981 case GIMPLE_OMP_ORDERED:
1982 case GIMPLE_OMP_CRITICAL:
1983 ctx = new_omp_context (stmt, ctx);
1984 scan_omp (gimple_omp_body (stmt), ctx);
1985 break;
1987 case GIMPLE_BIND:
1989 tree var;
1991 *handled_ops_p = false;
1992 if (ctx)
1993 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1994 insert_decl_map (&ctx->cb, var, var);
1996 break;
1997 default:
1998 *handled_ops_p = false;
1999 break;
2002 return NULL_TREE;
2006 /* Scan all the statements starting at the current statement. CTX
2007 contains context information about the OpenMP directives and
2008 clauses found during the scan. */
2010 static void
2011 scan_omp (gimple_seq body, omp_context *ctx)
2013 location_t saved_location;
2014 struct walk_stmt_info wi;
2016 memset (&wi, 0, sizeof (wi));
2017 wi.info = ctx;
2018 wi.want_locations = true;
2020 saved_location = input_location;
2021 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2022 input_location = saved_location;
2025 /* Re-gimplification and code generation routines. */
2027 /* Build a call to GOMP_barrier. */
2029 static tree
2030 build_omp_barrier (void)
2032 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2035 /* If a context was created for STMT when it was scanned, return it. */
2037 static omp_context *
2038 maybe_lookup_ctx (gimple stmt)
2040 splay_tree_node n;
2041 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2042 return n ? (omp_context *) n->value : NULL;
2046 /* Find the mapping for DECL in CTX or the immediately enclosing
2047 context that has a mapping for DECL.
2049 If CTX is a nested parallel directive, we may have to use the decl
2050 mappings created in CTX's parent context. Suppose that we have the
2051 following parallel nesting (variable UIDs showed for clarity):
2053 iD.1562 = 0;
2054 #omp parallel shared(iD.1562) -> outer parallel
2055 iD.1562 = iD.1562 + 1;
2057 #omp parallel shared (iD.1562) -> inner parallel
2058 iD.1562 = iD.1562 - 1;
2060 Each parallel structure will create a distinct .omp_data_s structure
2061 for copying iD.1562 in/out of the directive:
2063 outer parallel .omp_data_s.1.i -> iD.1562
2064 inner parallel .omp_data_s.2.i -> iD.1562
2066 A shared variable mapping will produce a copy-out operation before
2067 the parallel directive and a copy-in operation after it. So, in
2068 this case we would have:
2070 iD.1562 = 0;
2071 .omp_data_o.1.i = iD.1562;
2072 #omp parallel shared(iD.1562) -> outer parallel
2073 .omp_data_i.1 = &.omp_data_o.1
2074 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2076 .omp_data_o.2.i = iD.1562; -> **
2077 #omp parallel shared(iD.1562) -> inner parallel
2078 .omp_data_i.2 = &.omp_data_o.2
2079 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2082 ** This is a problem. The symbol iD.1562 cannot be referenced
2083 inside the body of the outer parallel region. But since we are
2084 emitting this copy operation while expanding the inner parallel
2085 directive, we need to access the CTX structure of the outer
2086 parallel directive to get the correct mapping:
2088 .omp_data_o.2.i = .omp_data_i.1->i
2090 Since there may be other workshare or parallel directives enclosing
2091 the parallel directive, it may be necessary to walk up the context
2092 parent chain. This is not a problem in general because nested
2093 parallelism happens only rarely. */
2095 static tree
2096 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2098 tree t;
2099 omp_context *up;
2101 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2102 t = maybe_lookup_decl (decl, up);
2104 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2106 return t ? t : decl;
2110 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2111 in outer contexts. */
2113 static tree
2114 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2116 tree t = NULL;
2117 omp_context *up;
2119 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2120 t = maybe_lookup_decl (decl, up);
2122 return t ? t : decl;
2126 /* Construct the initialization value for reduction CLAUSE. */
2128 tree
2129 omp_reduction_init (tree clause, tree type)
2131 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2133 case PLUS_EXPR:
2134 case MINUS_EXPR:
2135 case BIT_IOR_EXPR:
2136 case BIT_XOR_EXPR:
2137 case TRUTH_OR_EXPR:
2138 case TRUTH_ORIF_EXPR:
2139 case TRUTH_XOR_EXPR:
2140 case NE_EXPR:
2141 return fold_convert (type, integer_zero_node);
2143 case MULT_EXPR:
2144 case TRUTH_AND_EXPR:
2145 case TRUTH_ANDIF_EXPR:
2146 case EQ_EXPR:
2147 return fold_convert (type, integer_one_node);
2149 case BIT_AND_EXPR:
2150 return fold_convert (type, integer_minus_one_node);
2152 case MAX_EXPR:
2153 if (SCALAR_FLOAT_TYPE_P (type))
2155 REAL_VALUE_TYPE max, min;
2156 if (HONOR_INFINITIES (TYPE_MODE (type)))
2158 real_inf (&max);
2159 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2161 else
2162 real_maxval (&min, 1, TYPE_MODE (type));
2163 return build_real (type, min);
2165 else
2167 gcc_assert (INTEGRAL_TYPE_P (type));
2168 return TYPE_MIN_VALUE (type);
2171 case MIN_EXPR:
2172 if (SCALAR_FLOAT_TYPE_P (type))
2174 REAL_VALUE_TYPE max;
2175 if (HONOR_INFINITIES (TYPE_MODE (type)))
2176 real_inf (&max);
2177 else
2178 real_maxval (&max, 0, TYPE_MODE (type));
2179 return build_real (type, max);
2181 else
2183 gcc_assert (INTEGRAL_TYPE_P (type));
2184 return TYPE_MAX_VALUE (type);
2187 default:
2188 gcc_unreachable ();
2192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2194 private variables. Initialization statements go in ILIST, while calls
2195 to destructors go in DLIST. */
2197 static void
2198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2199 omp_context *ctx)
2201 gimple_stmt_iterator diter;
2202 tree c, dtor, copyin_seq, x, ptr;
2203 bool copyin_by_ref = false;
2204 bool lastprivate_firstprivate = false;
2205 int pass;
2207 *dlist = gimple_seq_alloc ();
2208 diter = gsi_start (*dlist);
2209 copyin_seq = NULL;
2211 /* Do all the fixed sized types in the first pass, and the variable sized
2212 types in the second pass. This makes sure that the scalar arguments to
2213 the variable sized types are processed before we use them in the
2214 variable sized operations. */
2215 for (pass = 0; pass < 2; ++pass)
2217 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2219 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2220 tree var, new_var;
2221 bool by_ref;
2223 switch (c_kind)
2225 case OMP_CLAUSE_PRIVATE:
2226 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2227 continue;
2228 break;
2229 case OMP_CLAUSE_SHARED:
2230 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2232 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2233 continue;
2235 case OMP_CLAUSE_FIRSTPRIVATE:
2236 case OMP_CLAUSE_COPYIN:
2237 case OMP_CLAUSE_REDUCTION:
2238 break;
2239 case OMP_CLAUSE_LASTPRIVATE:
2240 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2242 lastprivate_firstprivate = true;
2243 if (pass != 0)
2244 continue;
2246 break;
2247 default:
2248 continue;
2251 new_var = var = OMP_CLAUSE_DECL (c);
2252 if (c_kind != OMP_CLAUSE_COPYIN)
2253 new_var = lookup_decl (var, ctx);
2255 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2257 if (pass != 0)
2258 continue;
2260 else if (is_variable_sized (var))
2262 /* For variable sized types, we need to allocate the
2263 actual storage here. Call alloca and store the
2264 result in the pointer decl that we created elsewhere. */
2265 if (pass == 0)
2266 continue;
2268 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2270 gimple stmt;
2271 tree tmp;
2273 ptr = DECL_VALUE_EXPR (new_var);
2274 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2275 ptr = TREE_OPERAND (ptr, 0);
2276 gcc_assert (DECL_P (ptr));
2277 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2279 /* void *tmp = __builtin_alloca */
2280 stmt
2281 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2282 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2283 gimple_add_tmp_var (tmp);
2284 gimple_call_set_lhs (stmt, tmp);
2286 gimple_seq_add_stmt (ilist, stmt);
2288 x = fold_convert (TREE_TYPE (ptr), tmp);
2289 gimplify_assign (ptr, x, ilist);
2292 else if (is_reference (var))
2294 /* For references that are being privatized for Fortran,
2295 allocate new backing storage for the new pointer
2296 variable. This allows us to avoid changing all the
2297 code that expects a pointer to something that expects
2298 a direct variable. Note that this doesn't apply to
2299 C++, since reference types are disallowed in data
2300 sharing clauses there, except for NRV optimized
2301 return values. */
2302 if (pass == 0)
2303 continue;
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2306 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2308 x = build_receiver_ref (var, false, ctx);
2309 x = build_fold_addr_expr (x);
2311 else if (TREE_CONSTANT (x))
2313 const char *name = NULL;
2314 if (DECL_NAME (var))
2315 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2317 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2318 name);
2319 gimple_add_tmp_var (x);
2320 TREE_ADDRESSABLE (x) = 1;
2321 x = build_fold_addr_expr (x);
2323 else
2325 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2328 x = fold_convert (TREE_TYPE (new_var), x);
2329 gimplify_assign (new_var, x, ilist);
2331 new_var = build_fold_indirect_ref (new_var);
2333 else if (c_kind == OMP_CLAUSE_REDUCTION
2334 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2336 if (pass == 0)
2337 continue;
2339 else if (pass != 0)
2340 continue;
2342 switch (OMP_CLAUSE_CODE (c))
2344 case OMP_CLAUSE_SHARED:
2345 /* Shared global vars are just accessed directly. */
2346 if (is_global_var (new_var))
2347 break;
2348 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2349 needs to be delayed until after fixup_child_record_type so
2350 that we get the correct type during the dereference. */
2351 by_ref = use_pointer_for_field (var, ctx);
2352 x = build_receiver_ref (var, by_ref, ctx);
2353 SET_DECL_VALUE_EXPR (new_var, x);
2354 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2356 /* ??? If VAR is not passed by reference, and the variable
2357 hasn't been initialized yet, then we'll get a warning for
2358 the store into the omp_data_s structure. Ideally, we'd be
2359 able to notice this and not store anything at all, but
2360 we're generating code too early. Suppress the warning. */
2361 if (!by_ref)
2362 TREE_NO_WARNING (var) = 1;
2363 break;
2365 case OMP_CLAUSE_LASTPRIVATE:
2366 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2367 break;
2368 /* FALLTHRU */
2370 case OMP_CLAUSE_PRIVATE:
2371 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2372 x = build_outer_var_ref (var, ctx);
2373 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2375 if (is_task_ctx (ctx))
2376 x = build_receiver_ref (var, false, ctx);
2377 else
2378 x = build_outer_var_ref (var, ctx);
2380 else
2381 x = NULL;
2382 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2383 if (x)
2384 gimplify_and_add (x, ilist);
2385 /* FALLTHRU */
2387 do_dtor:
2388 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2389 if (x)
2391 gimple_seq tseq = NULL;
2393 dtor = x;
2394 gimplify_stmt (&dtor, &tseq);
2395 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2397 break;
2399 case OMP_CLAUSE_FIRSTPRIVATE:
2400 if (is_task_ctx (ctx))
2402 if (is_reference (var) || is_variable_sized (var))
2403 goto do_dtor;
2404 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2405 ctx))
2406 || use_pointer_for_field (var, NULL))
2408 x = build_receiver_ref (var, false, ctx);
2409 SET_DECL_VALUE_EXPR (new_var, x);
2410 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 goto do_dtor;
2414 x = build_outer_var_ref (var, ctx);
2415 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2416 gimplify_and_add (x, ilist);
2417 goto do_dtor;
2418 break;
2420 case OMP_CLAUSE_COPYIN:
2421 by_ref = use_pointer_for_field (var, NULL);
2422 x = build_receiver_ref (var, by_ref, ctx);
2423 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2424 append_to_statement_list (x, &copyin_seq);
2425 copyin_by_ref |= by_ref;
2426 break;
2428 case OMP_CLAUSE_REDUCTION:
2429 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2431 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2432 x = build_outer_var_ref (var, ctx);
2434 if (is_reference (var))
2435 x = build_fold_addr_expr (x);
2436 SET_DECL_VALUE_EXPR (placeholder, x);
2437 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2438 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2439 gimple_seq_add_seq (ilist,
2440 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2441 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2442 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2444 else
2446 x = omp_reduction_init (c, TREE_TYPE (new_var));
2447 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2448 gimplify_assign (new_var, x, ilist);
2450 break;
2452 default:
2453 gcc_unreachable ();
2458 /* The copyin sequence is not to be executed by the main thread, since
2459 that would result in self-copies. Perhaps not visible to scalars,
2460 but it certainly is to C++ operator=. */
2461 if (copyin_seq)
2463 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2464 x = build2 (NE_EXPR, boolean_type_node, x,
2465 build_int_cst (TREE_TYPE (x), 0));
2466 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2467 gimplify_and_add (x, ilist);
2470 /* If any copyin variable is passed by reference, we must ensure the
2471 master thread doesn't modify it before it is copied over in all
2472 threads. Similarly for variables in both firstprivate and
2473 lastprivate clauses we need to ensure the lastprivate copying
2474 happens after firstprivate copying in all threads. */
2475 if (copyin_by_ref || lastprivate_firstprivate)
2476 gimplify_and_add (build_omp_barrier (), ilist);
2480 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2481 both parallel and workshare constructs. PREDICATE may be NULL if it's
2482 always true. */
2484 static void
2485 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2486 omp_context *ctx)
2488 tree x, c, label = NULL;
2489 bool par_clauses = false;
2491 /* Early exit if there are no lastprivate clauses. */
2492 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2493 if (clauses == NULL)
2495 /* If this was a workshare clause, see if it had been combined
2496 with its parallel. In that case, look for the clauses on the
2497 parallel statement itself. */
2498 if (is_parallel_ctx (ctx))
2499 return;
2501 ctx = ctx->outer;
2502 if (ctx == NULL || !is_parallel_ctx (ctx))
2503 return;
2505 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2506 OMP_CLAUSE_LASTPRIVATE);
2507 if (clauses == NULL)
2508 return;
2509 par_clauses = true;
2512 if (predicate)
2514 gimple stmt;
2515 tree label_true, arm1, arm2;
2517 label = create_artificial_label ();
2518 label_true = create_artificial_label ();
2519 arm1 = TREE_OPERAND (predicate, 0);
2520 arm2 = TREE_OPERAND (predicate, 1);
2521 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2522 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2523 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2524 label_true, label);
2525 gimple_seq_add_stmt (stmt_list, stmt);
2526 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2529 for (c = clauses; c ;)
2531 tree var, new_var;
2533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2535 var = OMP_CLAUSE_DECL (c);
2536 new_var = lookup_decl (var, ctx);
2538 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2540 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2541 gimple_seq_add_seq (stmt_list,
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2544 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2546 x = build_outer_var_ref (var, ctx);
2547 if (is_reference (var))
2548 new_var = build_fold_indirect_ref (new_var);
2549 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2550 gimplify_and_add (x, stmt_list);
2552 c = OMP_CLAUSE_CHAIN (c);
2553 if (c == NULL && !par_clauses)
2555 /* If this was a workshare clause, see if it had been combined
2556 with its parallel. In that case, continue looking for the
2557 clauses also on the parallel statement itself. */
2558 if (is_parallel_ctx (ctx))
2559 break;
2561 ctx = ctx->outer;
2562 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 break;
2565 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2566 OMP_CLAUSE_LASTPRIVATE);
2567 par_clauses = true;
2571 if (label)
2572 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2576 /* Generate code to implement the REDUCTION clauses. */
2578 static void
2579 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2581 gimple_seq sub_seq = NULL;
2582 gimple stmt;
2583 tree x, c;
2584 int count = 0;
2586 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2587 update in that case, otherwise use a lock. */
2588 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2589 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2591 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2593 /* Never use OMP_ATOMIC for array reductions. */
2594 count = -1;
2595 break;
2597 count++;
2600 if (count == 0)
2601 return;
2603 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2605 tree var, ref, new_var;
2606 enum tree_code code;
2608 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2609 continue;
2611 var = OMP_CLAUSE_DECL (c);
2612 new_var = lookup_decl (var, ctx);
2613 if (is_reference (var))
2614 new_var = build_fold_indirect_ref (new_var);
2615 ref = build_outer_var_ref (var, ctx);
2616 code = OMP_CLAUSE_REDUCTION_CODE (c);
2618 /* reduction(-:var) sums up the partial results, so it acts
2619 identically to reduction(+:var). */
2620 if (code == MINUS_EXPR)
2621 code = PLUS_EXPR;
2623 if (count == 1)
2625 tree addr = build_fold_addr_expr (ref);
2627 addr = save_expr (addr);
2628 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2629 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2630 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2631 gimplify_and_add (x, stmt_seqp);
2632 return;
2635 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2637 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2639 if (is_reference (var))
2640 ref = build_fold_addr_expr (ref);
2641 SET_DECL_VALUE_EXPR (placeholder, ref);
2642 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2643 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2644 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2645 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2646 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2648 else
2650 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 gimplify_assign (ref, x, &sub_seq);
2656 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2657 gimple_seq_add_stmt (stmt_seqp, stmt);
2659 gimple_seq_add_seq (stmt_seqp, sub_seq);
2661 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2662 gimple_seq_add_stmt (stmt_seqp, stmt);
2666 /* Generate code to implement the COPYPRIVATE clauses. */
2668 static void
2669 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2670 omp_context *ctx)
2672 tree c;
2674 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2676 tree var, ref, x;
2677 bool by_ref;
2679 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2680 continue;
2682 var = OMP_CLAUSE_DECL (c);
2683 by_ref = use_pointer_for_field (var, NULL);
2685 ref = build_sender_ref (var, ctx);
2686 x = lookup_decl_in_outer_ctx (var, ctx);
2687 x = by_ref ? build_fold_addr_expr (x) : x;
2688 gimplify_assign (ref, x, slist);
2690 ref = build_receiver_ref (var, by_ref, ctx);
2691 if (is_reference (var))
2693 ref = build_fold_indirect_ref (ref);
2694 var = build_fold_indirect_ref (var);
2696 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2697 gimplify_and_add (x, rlist);
2702 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2703 and REDUCTION from the sender (aka parent) side. */
2705 static void
2706 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2707 omp_context *ctx)
2709 tree c;
2711 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2713 tree val, ref, x, var;
2714 bool by_ref, do_in = false, do_out = false;
2716 switch (OMP_CLAUSE_CODE (c))
2718 case OMP_CLAUSE_PRIVATE:
2719 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2720 break;
2721 continue;
2722 case OMP_CLAUSE_FIRSTPRIVATE:
2723 case OMP_CLAUSE_COPYIN:
2724 case OMP_CLAUSE_LASTPRIVATE:
2725 case OMP_CLAUSE_REDUCTION:
2726 break;
2727 default:
2728 continue;
2731 val = OMP_CLAUSE_DECL (c);
2732 var = lookup_decl_in_outer_ctx (val, ctx);
2734 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2735 && is_global_var (var))
2736 continue;
2737 if (is_variable_sized (val))
2738 continue;
2739 by_ref = use_pointer_for_field (val, NULL);
2741 switch (OMP_CLAUSE_CODE (c))
2743 case OMP_CLAUSE_PRIVATE:
2744 case OMP_CLAUSE_FIRSTPRIVATE:
2745 case OMP_CLAUSE_COPYIN:
2746 do_in = true;
2747 break;
2749 case OMP_CLAUSE_LASTPRIVATE:
2750 if (by_ref || is_reference (val))
2752 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2753 continue;
2754 do_in = true;
2756 else
2758 do_out = true;
2759 if (lang_hooks.decls.omp_private_outer_ref (val))
2760 do_in = true;
2762 break;
2764 case OMP_CLAUSE_REDUCTION:
2765 do_in = true;
2766 do_out = !(by_ref || is_reference (val));
2767 break;
2769 default:
2770 gcc_unreachable ();
2773 if (do_in)
2775 ref = build_sender_ref (val, ctx);
2776 x = by_ref ? build_fold_addr_expr (var) : var;
2777 gimplify_assign (ref, x, ilist);
2778 if (is_task_ctx (ctx))
2779 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2782 if (do_out)
2784 ref = build_sender_ref (val, ctx);
2785 gimplify_assign (var, ref, olist);
2790 /* Generate code to implement SHARED from the sender (aka parent)
2791 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2792 list things that got automatically shared. */
2794 static void
2795 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2797 tree var, ovar, nvar, f, x, record_type;
2799 if (ctx->record_type == NULL)
2800 return;
2802 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2803 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2805 ovar = DECL_ABSTRACT_ORIGIN (f);
2806 nvar = maybe_lookup_decl (ovar, ctx);
2807 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2808 continue;
2810 /* If CTX is a nested parallel directive. Find the immediately
2811 enclosing parallel or workshare construct that contains a
2812 mapping for OVAR. */
2813 var = lookup_decl_in_outer_ctx (ovar, ctx);
2815 if (use_pointer_for_field (ovar, ctx))
2817 x = build_sender_ref (ovar, ctx);
2818 var = build_fold_addr_expr (var);
2819 gimplify_assign (x, var, ilist);
2821 else
2823 x = build_sender_ref (ovar, ctx);
2824 gimplify_assign (x, var, ilist);
2826 if (!TREE_READONLY (var)
2827 /* We don't need to receive a new reference to a result
2828 or parm decl. In fact we may not store to it as we will
2829 invalidate any pending RSO and generate wrong gimple
2830 during inlining. */
2831 && !((TREE_CODE (var) == RESULT_DECL
2832 || TREE_CODE (var) == PARM_DECL)
2833 && DECL_BY_REFERENCE (var)))
2835 x = build_sender_ref (ovar, ctx);
2836 gimplify_assign (var, x, olist);
2843 /* A convenience function to build an empty GIMPLE_COND with just the
2844 condition. */
2846 static gimple
2847 gimple_build_cond_empty (tree cond)
2849 enum tree_code pred_code;
2850 tree lhs, rhs;
2852 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2853 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2857 /* Build the function calls to GOMP_parallel_start etc to actually
2858 generate the parallel operation. REGION is the parallel region
2859 being expanded. BB is the block where to insert the code. WS_ARGS
2860 will be set if this is a call to a combined parallel+workshare
2861 construct, it contains the list of additional arguments needed by
2862 the workshare construct. */
2864 static void
2865 expand_parallel_call (struct omp_region *region, basic_block bb,
2866 gimple entry_stmt, tree ws_args)
2868 tree t, t1, t2, val, cond, c, clauses;
2869 gimple_stmt_iterator gsi;
2870 gimple stmt;
2871 int start_ix;
2873 clauses = gimple_omp_parallel_clauses (entry_stmt);
2875 /* Determine what flavor of GOMP_parallel_start we will be
2876 emitting. */
2877 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2878 if (is_combined_parallel (region))
2880 switch (region->inner->type)
2882 case GIMPLE_OMP_FOR:
2883 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2884 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2885 + (region->inner->sched_kind
2886 == OMP_CLAUSE_SCHEDULE_RUNTIME
2887 ? 3 : region->inner->sched_kind);
2888 break;
2889 case GIMPLE_OMP_SECTIONS:
2890 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2891 break;
2892 default:
2893 gcc_unreachable ();
2897 /* By default, the value of NUM_THREADS is zero (selected at run time)
2898 and there is no conditional. */
2899 cond = NULL_TREE;
2900 val = build_int_cst (unsigned_type_node, 0);
2902 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2903 if (c)
2904 cond = OMP_CLAUSE_IF_EXPR (c);
2906 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2907 if (c)
2908 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2910 /* Ensure 'val' is of the correct type. */
2911 val = fold_convert (unsigned_type_node, val);
2913 /* If we found the clause 'if (cond)', build either
2914 (cond != 0) or (cond ? val : 1u). */
2915 if (cond)
2917 gimple_stmt_iterator gsi;
2919 cond = gimple_boolify (cond);
2921 if (integer_zerop (val))
2922 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2923 build_int_cst (TREE_TYPE (cond), 0));
2924 else
2926 basic_block cond_bb, then_bb, else_bb;
2927 edge e, e_then, e_else;
2928 tree tmp_then, tmp_else, tmp_join, tmp_var;
2930 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2931 if (gimple_in_ssa_p (cfun))
2933 tmp_then = make_ssa_name (tmp_var, NULL);
2934 tmp_else = make_ssa_name (tmp_var, NULL);
2935 tmp_join = make_ssa_name (tmp_var, NULL);
2937 else
2939 tmp_then = tmp_var;
2940 tmp_else = tmp_var;
2941 tmp_join = tmp_var;
2944 e = split_block (bb, NULL);
2945 cond_bb = e->src;
2946 bb = e->dest;
2947 remove_edge (e);
2949 then_bb = create_empty_bb (cond_bb);
2950 else_bb = create_empty_bb (then_bb);
2951 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2952 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2954 stmt = gimple_build_cond_empty (cond);
2955 gsi = gsi_start_bb (cond_bb);
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2958 gsi = gsi_start_bb (then_bb);
2959 stmt = gimple_build_assign (tmp_then, val);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2962 gsi = gsi_start_bb (else_bb);
2963 stmt = gimple_build_assign
2964 (tmp_else, build_int_cst (unsigned_type_node, 1));
2965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2967 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2968 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2969 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2970 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2972 if (gimple_in_ssa_p (cfun))
2974 gimple phi = create_phi_node (tmp_join, bb);
2975 SSA_NAME_DEF_STMT (tmp_join) = phi;
2976 add_phi_arg (phi, tmp_then, e_then);
2977 add_phi_arg (phi, tmp_else, e_else);
2980 val = tmp_join;
2983 gsi = gsi_start_bb (bb);
2984 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2985 false, GSI_CONTINUE_LINKING);
2988 gsi = gsi_last_bb (bb);
2989 t = gimple_omp_parallel_data_arg (entry_stmt);
2990 if (t == NULL)
2991 t1 = null_pointer_node;
2992 else
2993 t1 = build_fold_addr_expr (t);
2994 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2996 if (ws_args)
2998 tree args = tree_cons (NULL, t2,
2999 tree_cons (NULL, t1,
3000 tree_cons (NULL, val, ws_args)));
3001 t = build_function_call_expr (built_in_decls[start_ix], args);
3003 else
3004 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
3009 t = gimple_omp_parallel_data_arg (entry_stmt);
3010 if (t == NULL)
3011 t = null_pointer_node;
3012 else
3013 t = build_fold_addr_expr (t);
3014 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3015 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3016 false, GSI_CONTINUE_LINKING);
3018 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3024 /* Build the function call to GOMP_task to actually
3025 generate the task operation. BB is the block where to insert the code. */
3027 static void
3028 expand_task_call (basic_block bb, gimple entry_stmt)
3030 tree t, t1, t2, t3, flags, cond, c, clauses;
3031 gimple_stmt_iterator gsi;
3033 clauses = gimple_omp_task_clauses (entry_stmt);
3035 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3036 if (c)
3037 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3038 else
3039 cond = boolean_true_node;
3041 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3042 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3044 gsi = gsi_last_bb (bb);
3045 t = gimple_omp_task_data_arg (entry_stmt);
3046 if (t == NULL)
3047 t2 = null_pointer_node;
3048 else
3049 t2 = build_fold_addr_expr (t);
3050 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3051 t = gimple_omp_task_copy_fn (entry_stmt);
3052 if (t == NULL)
3053 t3 = null_pointer_node;
3054 else
3055 t3 = build_fold_addr_expr (t);
3057 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3058 gimple_omp_task_arg_size (entry_stmt),
3059 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3066 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3067 catch handler and return it. This prevents programs from violating the
3068 structured block semantics with throws. */
3070 static gimple_seq
3071 maybe_catch_exception (gimple_seq body)
3073 gimple f, t;
3075 if (!flag_exceptions)
3076 return body;
3078 if (lang_protect_cleanup_actions)
3079 t = lang_protect_cleanup_actions ();
3080 else
3081 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3083 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3084 gimple_eh_filter_set_must_not_throw (f, true);
3086 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3087 GIMPLE_TRY_CATCH);
3089 return gimple_seq_alloc_with_stmt (t);
3092 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3094 static tree
3095 list2chain (tree list)
3097 tree t;
3099 for (t = list; t; t = TREE_CHAIN (t))
3101 tree var = TREE_VALUE (t);
3102 if (TREE_CHAIN (t))
3103 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3104 else
3105 TREE_CHAIN (var) = NULL_TREE;
3108 return list ? TREE_VALUE (list) : NULL_TREE;
3112 /* Remove barriers in REGION->EXIT's block. Note that this is only
3113 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3114 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3115 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3116 removed. */
3118 static void
3119 remove_exit_barrier (struct omp_region *region)
3121 gimple_stmt_iterator gsi;
3122 basic_block exit_bb;
3123 edge_iterator ei;
3124 edge e;
3125 gimple stmt;
3126 int any_addressable_vars = -1;
3128 exit_bb = region->exit;
3130 /* If the parallel region doesn't return, we don't have REGION->EXIT
3131 block at all. */
3132 if (! exit_bb)
3133 return;
3135 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3136 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3137 statements that can appear in between are extremely limited -- no
3138 memory operations at all. Here, we allow nothing at all, so the
3139 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3140 gsi = gsi_last_bb (exit_bb);
3141 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3142 gsi_prev (&gsi);
3143 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3144 return;
3146 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3148 gsi = gsi_last_bb (e->src);
3149 if (gsi_end_p (gsi))
3150 continue;
3151 stmt = gsi_stmt (gsi);
3152 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3153 && !gimple_omp_return_nowait_p (stmt))
3155 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3156 in many cases. If there could be tasks queued, the barrier
3157 might be needed to let the tasks run before some local
3158 variable of the parallel that the task uses as shared
3159 runs out of scope. The task can be spawned either
3160 from within current function (this would be easy to check)
3161 or from some function it calls and gets passed an address
3162 of such a variable. */
3163 if (any_addressable_vars < 0)
3165 gimple parallel_stmt = last_stmt (region->entry);
3166 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3167 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3168 tree block;
3170 any_addressable_vars = 0;
3171 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3172 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3174 any_addressable_vars = 1;
3175 break;
3177 for (block = gimple_block (stmt);
3178 !any_addressable_vars
3179 && block
3180 && TREE_CODE (block) == BLOCK;
3181 block = BLOCK_SUPERCONTEXT (block))
3183 for (local_decls = BLOCK_VARS (block);
3184 local_decls;
3185 local_decls = TREE_CHAIN (local_decls))
3186 if (TREE_ADDRESSABLE (local_decls))
3188 any_addressable_vars = 1;
3189 break;
3191 if (block == gimple_block (parallel_stmt))
3192 break;
3195 if (!any_addressable_vars)
3196 gimple_omp_return_set_nowait (stmt);
3201 static void
3202 remove_exit_barriers (struct omp_region *region)
3204 if (region->type == GIMPLE_OMP_PARALLEL)
3205 remove_exit_barrier (region);
3207 if (region->inner)
3209 region = region->inner;
3210 remove_exit_barriers (region);
3211 while (region->next)
3213 region = region->next;
3214 remove_exit_barriers (region);
3219 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3220 calls. These can't be declared as const functions, but
3221 within one parallel body they are constant, so they can be
3222 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3223 which are declared const. Similarly for task body, except
3224 that in untied task omp_get_thread_num () can change at any task
3225 scheduling point. */
3227 static void
3228 optimize_omp_library_calls (gimple entry_stmt)
3230 basic_block bb;
3231 gimple_stmt_iterator gsi;
3232 tree thr_num_id
3233 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3234 tree num_thr_id
3235 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3236 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3237 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3238 OMP_CLAUSE_UNTIED) != NULL);
3240 FOR_EACH_BB (bb)
3241 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3243 gimple call = gsi_stmt (gsi);
3244 tree decl;
3246 if (is_gimple_call (call)
3247 && (decl = gimple_call_fndecl (call))
3248 && DECL_EXTERNAL (decl)
3249 && TREE_PUBLIC (decl)
3250 && DECL_INITIAL (decl) == NULL)
3252 tree built_in;
3254 if (DECL_NAME (decl) == thr_num_id)
3256 /* In #pragma omp task untied omp_get_thread_num () can change
3257 during the execution of the task region. */
3258 if (untied_task)
3259 continue;
3260 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3262 else if (DECL_NAME (decl) == num_thr_id)
3263 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3264 else
3265 continue;
3267 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3268 || gimple_call_num_args (call) != 0)
3269 continue;
3271 if (flag_exceptions && !TREE_NOTHROW (decl))
3272 continue;
3274 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3275 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3276 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3277 continue;
3279 gimple_call_set_fndecl (call, built_in);
3284 /* Expand the OpenMP parallel or task directive starting at REGION. */
3286 static void
3287 expand_omp_taskreg (struct omp_region *region)
3289 basic_block entry_bb, exit_bb, new_bb;
3290 struct function *child_cfun;
3291 tree child_fn, block, t, ws_args, *tp;
3292 tree save_current;
3293 gimple_stmt_iterator gsi;
3294 gimple entry_stmt, stmt;
3295 edge e;
3297 entry_stmt = last_stmt (region->entry);
3298 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3299 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3300 /* If this function has been already instrumented, make sure
3301 the child function isn't instrumented again. */
3302 child_cfun->after_tree_profile = cfun->after_tree_profile;
3304 entry_bb = region->entry;
3305 exit_bb = region->exit;
3307 if (is_combined_parallel (region))
3308 ws_args = region->ws_args;
3309 else
3310 ws_args = NULL_TREE;
3312 if (child_cfun->cfg)
3314 /* Due to inlining, it may happen that we have already outlined
3315 the region, in which case all we need to do is make the
3316 sub-graph unreachable and emit the parallel call. */
3317 edge entry_succ_e, exit_succ_e;
3318 gimple_stmt_iterator gsi;
3320 entry_succ_e = single_succ_edge (entry_bb);
3322 gsi = gsi_last_bb (entry_bb);
3323 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3324 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3325 gsi_remove (&gsi, true);
3327 new_bb = entry_bb;
3328 if (exit_bb)
3330 exit_succ_e = single_succ_edge (exit_bb);
3331 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3333 remove_edge_and_dominated_blocks (entry_succ_e);
3335 else
3337 /* If the parallel region needs data sent from the parent
3338 function, then the very first statement (except possible
3339 tree profile counter updates) of the parallel body
3340 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3341 &.OMP_DATA_O is passed as an argument to the child function,
3342 we need to replace it with the argument as seen by the child
3343 function.
3345 In most cases, this will end up being the identity assignment
3346 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3347 a function call that has been inlined, the original PARM_DECL
3348 .OMP_DATA_I may have been converted into a different local
3349 variable. In which case, we need to keep the assignment. */
3350 if (gimple_omp_taskreg_data_arg (entry_stmt))
3352 basic_block entry_succ_bb = single_succ (entry_bb);
3353 gimple_stmt_iterator gsi;
3354 tree arg, narg;
3355 gimple parcopy_stmt = NULL;
3357 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3359 gimple stmt;
3361 gcc_assert (!gsi_end_p (gsi));
3362 stmt = gsi_stmt (gsi);
3363 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3364 continue;
3366 if (gimple_num_ops (stmt) == 2)
3368 tree arg = gimple_assign_rhs1 (stmt);
3370 /* We're ignore the subcode because we're
3371 effectively doing a STRIP_NOPS. */
3373 if (TREE_CODE (arg) == ADDR_EXPR
3374 && TREE_OPERAND (arg, 0)
3375 == gimple_omp_taskreg_data_arg (entry_stmt))
3377 parcopy_stmt = stmt;
3378 break;
3383 gcc_assert (parcopy_stmt != NULL);
3384 arg = DECL_ARGUMENTS (child_fn);
3386 if (!gimple_in_ssa_p (cfun))
3388 if (gimple_assign_lhs (parcopy_stmt) == arg)
3389 gsi_remove (&gsi, true);
3390 else
3392 /* ?? Is setting the subcode really necessary ?? */
3393 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3394 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3397 else
3399 /* If we are in ssa form, we must load the value from the default
3400 definition of the argument. That should not be defined now,
3401 since the argument is not used uninitialized. */
3402 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3403 narg = make_ssa_name (arg, gimple_build_nop ());
3404 set_default_def (arg, narg);
3405 /* ?? Is setting the subcode really necessary ?? */
3406 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3407 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3408 update_stmt (parcopy_stmt);
3412 /* Declare local variables needed in CHILD_CFUN. */
3413 block = DECL_INITIAL (child_fn);
3414 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3415 /* The gimplifier could record temporaries in parallel/task block
3416 rather than in containing function's local_decls chain,
3417 which would mean cgraph missed finalizing them. Do it now. */
3418 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3419 if (TREE_CODE (t) == VAR_DECL
3420 && TREE_STATIC (t)
3421 && !DECL_EXTERNAL (t))
3422 varpool_finalize_decl (t);
3423 DECL_SAVED_TREE (child_fn) = NULL;
3424 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3425 TREE_USED (block) = 1;
3427 /* Reset DECL_CONTEXT on function arguments. */
3428 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3429 DECL_CONTEXT (t) = child_fn;
3431 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3432 so that it can be moved to the child function. */
3433 gsi = gsi_last_bb (entry_bb);
3434 stmt = gsi_stmt (gsi);
3435 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3436 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3437 gsi_remove (&gsi, true);
3438 e = split_block (entry_bb, stmt);
3439 entry_bb = e->dest;
3440 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3442 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3443 if (exit_bb)
3445 gsi = gsi_last_bb (exit_bb);
3446 gcc_assert (!gsi_end_p (gsi)
3447 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3448 stmt = gimple_build_return (NULL);
3449 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3450 gsi_remove (&gsi, true);
3453 /* Move the parallel region into CHILD_CFUN. */
3455 if (gimple_in_ssa_p (cfun))
3457 push_cfun (child_cfun);
3458 init_tree_ssa (child_cfun);
3459 init_ssa_operands ();
3460 cfun->gimple_df->in_ssa_p = true;
3461 pop_cfun ();
3462 block = NULL_TREE;
3464 else
3465 block = gimple_block (entry_stmt);
3467 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3468 if (exit_bb)
3469 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3471 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3472 for (tp = &child_cfun->local_decls; *tp; )
3473 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3474 tp = &TREE_CHAIN (*tp);
3475 else
3476 *tp = TREE_CHAIN (*tp);
3478 /* Inform the callgraph about the new function. */
3479 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3480 = cfun->curr_properties;
3481 cgraph_add_new_function (child_fn, true);
3483 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3484 fixed in a following pass. */
3485 push_cfun (child_cfun);
3486 save_current = current_function_decl;
3487 current_function_decl = child_fn;
3488 if (optimize)
3489 optimize_omp_library_calls (entry_stmt);
3490 rebuild_cgraph_edges ();
3492 /* Some EH regions might become dead, see PR34608. If
3493 pass_cleanup_cfg isn't the first pass to happen with the
3494 new child, these dead EH edges might cause problems.
3495 Clean them up now. */
3496 if (flag_exceptions)
3498 basic_block bb;
3499 bool changed = false;
3501 FOR_EACH_BB (bb)
3502 changed |= gimple_purge_dead_eh_edges (bb);
3503 if (changed)
3504 cleanup_tree_cfg ();
3506 if (gimple_in_ssa_p (cfun))
3507 update_ssa (TODO_update_ssa);
3508 current_function_decl = save_current;
3509 pop_cfun ();
3512 /* Emit a library call to launch the children threads. */
3513 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3514 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3515 else
3516 expand_task_call (new_bb, entry_stmt);
3517 update_ssa (TODO_update_ssa_only_virtuals);
3521 /* A subroutine of expand_omp_for. Generate code for a parallel
3522 loop with any schedule. Given parameters:
3524 for (V = N1; V cond N2; V += STEP) BODY;
3526 where COND is "<" or ">", we generate pseudocode
3528 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3529 if (more) goto L0; else goto L3;
3531 V = istart0;
3532 iend = iend0;
3534 BODY;
3535 V += STEP;
3536 if (V cond iend) goto L1; else goto L2;
3538 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3541 If this is a combined omp parallel loop, instead of the call to
3542 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3544 For collapsed loops, given parameters:
3545 collapse(3)
3546 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3547 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3548 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3549 BODY;
3551 we generate pseudocode
3553 if (cond3 is <)
3554 adj = STEP3 - 1;
3555 else
3556 adj = STEP3 + 1;
3557 count3 = (adj + N32 - N31) / STEP3;
3558 if (cond2 is <)
3559 adj = STEP2 - 1;
3560 else
3561 adj = STEP2 + 1;
3562 count2 = (adj + N22 - N21) / STEP2;
3563 if (cond1 is <)
3564 adj = STEP1 - 1;
3565 else
3566 adj = STEP1 + 1;
3567 count1 = (adj + N12 - N11) / STEP1;
3568 count = count1 * count2 * count3;
3569 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3570 if (more) goto L0; else goto L3;
3572 V = istart0;
3573 T = V;
3574 V3 = N31 + (T % count3) * STEP3;
3575 T = T / count3;
3576 V2 = N21 + (T % count2) * STEP2;
3577 T = T / count2;
3578 V1 = N11 + T * STEP1;
3579 iend = iend0;
3581 BODY;
3582 V += 1;
3583 if (V < iend) goto L10; else goto L2;
3584 L10:
3585 V3 += STEP3;
3586 if (V3 cond3 N32) goto L1; else goto L11;
3587 L11:
3588 V3 = N31;
3589 V2 += STEP2;
3590 if (V2 cond2 N22) goto L1; else goto L12;
3591 L12:
3592 V2 = N21;
3593 V1 += STEP1;
3594 goto L1;
3596 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3601 static void
3602 expand_omp_for_generic (struct omp_region *region,
3603 struct omp_for_data *fd,
3604 enum built_in_function start_fn,
3605 enum built_in_function next_fn)
3607 tree type, istart0, iend0, iend;
3608 tree t, vmain, vback, bias = NULL_TREE;
3609 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3610 basic_block l2_bb = NULL, l3_bb = NULL;
3611 gimple_stmt_iterator gsi;
3612 gimple stmt;
3613 bool in_combined_parallel = is_combined_parallel (region);
3614 bool broken_loop = region->cont == NULL;
3615 edge e, ne;
3616 tree *counts = NULL;
3617 int i;
3619 gcc_assert (!broken_loop || !in_combined_parallel);
3620 gcc_assert (fd->iter_type == long_integer_type_node
3621 || !in_combined_parallel);
3623 type = TREE_TYPE (fd->loop.v);
3624 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3625 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3626 TREE_ADDRESSABLE (istart0) = 1;
3627 TREE_ADDRESSABLE (iend0) = 1;
3628 if (gimple_in_ssa_p (cfun))
3630 add_referenced_var (istart0);
3631 add_referenced_var (iend0);
3634 /* See if we need to bias by LLONG_MIN. */
3635 if (fd->iter_type == long_long_unsigned_type_node
3636 && TREE_CODE (type) == INTEGER_TYPE
3637 && !TYPE_UNSIGNED (type))
3639 tree n1, n2;
3641 if (fd->loop.cond_code == LT_EXPR)
3643 n1 = fd->loop.n1;
3644 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3646 else
3648 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3649 n2 = fd->loop.n1;
3651 if (TREE_CODE (n1) != INTEGER_CST
3652 || TREE_CODE (n2) != INTEGER_CST
3653 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3654 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3657 entry_bb = region->entry;
3658 cont_bb = region->cont;
3659 collapse_bb = NULL;
3660 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3661 gcc_assert (broken_loop
3662 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3663 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3664 l1_bb = single_succ (l0_bb);
3665 if (!broken_loop)
3667 l2_bb = create_empty_bb (cont_bb);
3668 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3669 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3671 else
3672 l2_bb = NULL;
3673 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3674 exit_bb = region->exit;
3676 gsi = gsi_last_bb (entry_bb);
3678 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3679 if (fd->collapse > 1)
3681 /* collapsed loops need work for expansion in SSA form. */
3682 gcc_assert (!gimple_in_ssa_p (cfun));
3683 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3684 for (i = 0; i < fd->collapse; i++)
3686 tree itype = TREE_TYPE (fd->loops[i].v);
3688 if (POINTER_TYPE_P (itype))
3689 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3690 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3691 ? -1 : 1));
3692 t = fold_build2 (PLUS_EXPR, itype,
3693 fold_convert (itype, fd->loops[i].step), t);
3694 t = fold_build2 (PLUS_EXPR, itype, t,
3695 fold_convert (itype, fd->loops[i].n2));
3696 t = fold_build2 (MINUS_EXPR, itype, t,
3697 fold_convert (itype, fd->loops[i].n1));
3698 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3699 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3700 fold_build1 (NEGATE_EXPR, itype, t),
3701 fold_build1 (NEGATE_EXPR, itype,
3702 fold_convert (itype,
3703 fd->loops[i].step)));
3704 else
3705 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3706 fold_convert (itype, fd->loops[i].step));
3707 t = fold_convert (type, t);
3708 if (TREE_CODE (t) == INTEGER_CST)
3709 counts[i] = t;
3710 else
3712 counts[i] = create_tmp_var (type, ".count");
3713 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3714 true, GSI_SAME_STMT);
3715 stmt = gimple_build_assign (counts[i], t);
3716 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3718 if (SSA_VAR_P (fd->loop.n2))
3720 if (i == 0)
3721 t = counts[0];
3722 else
3724 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3725 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3726 true, GSI_SAME_STMT);
3728 stmt = gimple_build_assign (fd->loop.n2, t);
3729 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3733 if (in_combined_parallel)
3735 /* In a combined parallel loop, emit a call to
3736 GOMP_loop_foo_next. */
3737 t = build_call_expr (built_in_decls[next_fn], 2,
3738 build_fold_addr_expr (istart0),
3739 build_fold_addr_expr (iend0));
3741 else
3743 tree t0, t1, t2, t3, t4;
3744 /* If this is not a combined parallel loop, emit a call to
3745 GOMP_loop_foo_start in ENTRY_BB. */
3746 t4 = build_fold_addr_expr (iend0);
3747 t3 = build_fold_addr_expr (istart0);
3748 t2 = fold_convert (fd->iter_type, fd->loop.step);
3749 if (POINTER_TYPE_P (type)
3750 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3752 /* Avoid casting pointers to integer of a different size. */
3753 tree itype
3754 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3755 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3756 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3758 else
3760 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3761 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3763 if (bias)
3765 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3766 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3768 if (fd->iter_type == long_integer_type_node)
3770 if (fd->chunk_size)
3772 t = fold_convert (fd->iter_type, fd->chunk_size);
3773 t = build_call_expr (built_in_decls[start_fn], 6,
3774 t0, t1, t2, t, t3, t4);
3776 else
3777 t = build_call_expr (built_in_decls[start_fn], 5,
3778 t0, t1, t2, t3, t4);
3780 else
3782 tree t5;
3783 tree c_bool_type;
3785 /* The GOMP_loop_ull_*start functions have additional boolean
3786 argument, true for < loops and false for > loops.
3787 In Fortran, the C bool type can be different from
3788 boolean_type_node. */
3789 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3790 t5 = build_int_cst (c_bool_type,
3791 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3792 if (fd->chunk_size)
3794 t = fold_convert (fd->iter_type, fd->chunk_size);
3795 t = build_call_expr (built_in_decls[start_fn], 7,
3796 t5, t0, t1, t2, t, t3, t4);
3798 else
3799 t = build_call_expr (built_in_decls[start_fn], 6,
3800 t5, t0, t1, t2, t3, t4);
3803 if (TREE_TYPE (t) != boolean_type_node)
3804 t = fold_build2 (NE_EXPR, boolean_type_node,
3805 t, build_int_cst (TREE_TYPE (t), 0));
3806 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3807 true, GSI_SAME_STMT);
3808 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3810 /* Remove the GIMPLE_OMP_FOR statement. */
3811 gsi_remove (&gsi, true);
3813 /* Iteration setup for sequential loop goes in L0_BB. */
3814 gsi = gsi_start_bb (l0_bb);
3815 t = istart0;
3816 if (bias)
3817 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3818 if (POINTER_TYPE_P (type))
3819 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3820 0), t);
3821 t = fold_convert (type, t);
3822 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3823 false, GSI_CONTINUE_LINKING);
3824 stmt = gimple_build_assign (fd->loop.v, t);
3825 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3827 t = iend0;
3828 if (bias)
3829 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3830 if (POINTER_TYPE_P (type))
3831 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3832 0), t);
3833 t = fold_convert (type, t);
3834 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3835 false, GSI_CONTINUE_LINKING);
3836 if (fd->collapse > 1)
3838 tree tem = create_tmp_var (type, ".tem");
3840 stmt = gimple_build_assign (tem, fd->loop.v);
3841 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3842 for (i = fd->collapse - 1; i >= 0; i--)
3844 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3845 itype = vtype;
3846 if (POINTER_TYPE_P (vtype))
3847 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3848 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3849 t = fold_convert (itype, t);
3850 t = fold_build2 (MULT_EXPR, itype, t,
3851 fold_convert (itype, fd->loops[i].step));
3852 if (POINTER_TYPE_P (vtype))
3853 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3854 fd->loops[i].n1, fold_convert (sizetype, t));
3855 else
3856 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3857 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3858 false, GSI_CONTINUE_LINKING);
3859 stmt = gimple_build_assign (fd->loops[i].v, t);
3860 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3861 if (i != 0)
3863 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3864 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3865 false, GSI_CONTINUE_LINKING);
3866 stmt = gimple_build_assign (tem, t);
3867 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3872 if (!broken_loop)
3874 /* Code to control the increment and predicate for the sequential
3875 loop goes in the CONT_BB. */
3876 gsi = gsi_last_bb (cont_bb);
3877 stmt = gsi_stmt (gsi);
3878 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3879 vmain = gimple_omp_continue_control_use (stmt);
3880 vback = gimple_omp_continue_control_def (stmt);
3882 if (POINTER_TYPE_P (type))
3883 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3884 fold_convert (sizetype, fd->loop.step));
3885 else
3886 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3887 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3888 true, GSI_SAME_STMT);
3889 stmt = gimple_build_assign (vback, t);
3890 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3892 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3893 stmt = gimple_build_cond_empty (t);
3894 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3896 /* Remove GIMPLE_OMP_CONTINUE. */
3897 gsi_remove (&gsi, true);
3899 if (fd->collapse > 1)
3901 basic_block last_bb, bb;
3903 last_bb = cont_bb;
3904 for (i = fd->collapse - 1; i >= 0; i--)
3906 tree vtype = TREE_TYPE (fd->loops[i].v);
3908 bb = create_empty_bb (last_bb);
3909 gsi = gsi_start_bb (bb);
3911 if (i < fd->collapse - 1)
3913 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3914 e->probability = REG_BR_PROB_BASE / 8;
3916 t = fd->loops[i + 1].n1;
3917 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3918 false, GSI_CONTINUE_LINKING);
3919 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3920 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3922 else
3923 collapse_bb = bb;
3925 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3927 if (POINTER_TYPE_P (vtype))
3928 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3929 fd->loops[i].v,
3930 fold_convert (sizetype, fd->loops[i].step));
3931 else
3932 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3933 fd->loops[i].step);
3934 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3935 false, GSI_CONTINUE_LINKING);
3936 stmt = gimple_build_assign (fd->loops[i].v, t);
3937 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3939 if (i > 0)
3941 t = fd->loops[i].n2;
3942 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3943 false, GSI_CONTINUE_LINKING);
3944 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3945 fd->loops[i].v, t);
3946 stmt = gimple_build_cond_empty (t);
3947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3948 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3949 e->probability = REG_BR_PROB_BASE * 7 / 8;
3951 else
3952 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3953 last_bb = bb;
3957 /* Emit code to get the next parallel iteration in L2_BB. */
3958 gsi = gsi_start_bb (l2_bb);
3960 t = build_call_expr (built_in_decls[next_fn], 2,
3961 build_fold_addr_expr (istart0),
3962 build_fold_addr_expr (iend0));
3963 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3964 false, GSI_CONTINUE_LINKING);
3965 if (TREE_TYPE (t) != boolean_type_node)
3966 t = fold_build2 (NE_EXPR, boolean_type_node,
3967 t, build_int_cst (TREE_TYPE (t), 0));
3968 stmt = gimple_build_cond_empty (t);
3969 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3972 /* Add the loop cleanup function. */
3973 gsi = gsi_last_bb (exit_bb);
3974 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3975 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3976 else
3977 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3978 stmt = gimple_build_call (t, 0);
3979 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3980 gsi_remove (&gsi, true);
3982 /* Connect the new blocks. */
3983 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3984 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3986 if (!broken_loop)
3988 gimple_seq phis;
3990 e = find_edge (cont_bb, l3_bb);
3991 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3993 phis = phi_nodes (l3_bb);
3994 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3996 gimple phi = gsi_stmt (gsi);
3997 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3998 PHI_ARG_DEF_FROM_EDGE (phi, e));
4000 remove_edge (e);
4002 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4003 if (fd->collapse > 1)
4005 e = find_edge (cont_bb, l1_bb);
4006 remove_edge (e);
4007 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4009 else
4011 e = find_edge (cont_bb, l1_bb);
4012 e->flags = EDGE_TRUE_VALUE;
4014 e->probability = REG_BR_PROB_BASE * 7 / 8;
4015 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4016 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4018 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4019 recompute_dominator (CDI_DOMINATORS, l2_bb));
4020 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4021 recompute_dominator (CDI_DOMINATORS, l3_bb));
4022 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4023 recompute_dominator (CDI_DOMINATORS, l0_bb));
4024 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4025 recompute_dominator (CDI_DOMINATORS, l1_bb));
4030 /* A subroutine of expand_omp_for. Generate code for a parallel
4031 loop with static schedule and no specified chunk size. Given
4032 parameters:
4034 for (V = N1; V cond N2; V += STEP) BODY;
4036 where COND is "<" or ">", we generate pseudocode
4038 if (cond is <)
4039 adj = STEP - 1;
4040 else
4041 adj = STEP + 1;
4042 if ((__typeof (V)) -1 > 0 && cond is >)
4043 n = -(adj + N2 - N1) / -STEP;
4044 else
4045 n = (adj + N2 - N1) / STEP;
4046 q = n / nthreads;
4047 q += (q * nthreads != n);
4048 s0 = q * threadid;
4049 e0 = min(s0 + q, n);
4050 V = s0 * STEP + N1;
4051 if (s0 >= e0) goto L2; else goto L0;
4053 e = e0 * STEP + N1;
4055 BODY;
4056 V += STEP;
4057 if (V cond e) goto L1;
4061 static void
4062 expand_omp_for_static_nochunk (struct omp_region *region,
4063 struct omp_for_data *fd)
4065 tree n, q, s0, e0, e, t, nthreads, threadid;
4066 tree type, itype, vmain, vback;
4067 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4068 basic_block fin_bb;
4069 gimple_stmt_iterator gsi;
4070 gimple stmt;
4072 itype = type = TREE_TYPE (fd->loop.v);
4073 if (POINTER_TYPE_P (type))
4074 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4076 entry_bb = region->entry;
4077 cont_bb = region->cont;
4078 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4079 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4080 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4081 body_bb = single_succ (seq_start_bb);
4082 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4083 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4084 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4085 exit_bb = region->exit;
4087 /* Iteration space partitioning goes in ENTRY_BB. */
4088 gsi = gsi_last_bb (entry_bb);
4089 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4091 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4092 t = fold_convert (itype, t);
4093 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4094 true, GSI_SAME_STMT);
4096 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4097 t = fold_convert (itype, t);
4098 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4099 true, GSI_SAME_STMT);
4101 fd->loop.n1
4102 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4103 true, NULL_TREE, true, GSI_SAME_STMT);
4104 fd->loop.n2
4105 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4106 true, NULL_TREE, true, GSI_SAME_STMT);
4107 fd->loop.step
4108 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4109 true, NULL_TREE, true, GSI_SAME_STMT);
4111 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4112 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4113 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4114 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4115 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4116 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4117 fold_build1 (NEGATE_EXPR, itype, t),
4118 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4119 else
4120 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4121 t = fold_convert (itype, t);
4122 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4124 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4125 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4127 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4128 t = fold_build2 (NE_EXPR, itype, t, n);
4129 t = fold_build2 (PLUS_EXPR, itype, q, t);
4130 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4132 t = build2 (MULT_EXPR, itype, q, threadid);
4133 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4135 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4136 t = fold_build2 (MIN_EXPR, itype, t, n);
4137 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4139 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4140 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4142 /* Remove the GIMPLE_OMP_FOR statement. */
4143 gsi_remove (&gsi, true);
4145 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4146 gsi = gsi_start_bb (seq_start_bb);
4148 t = fold_convert (itype, s0);
4149 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4150 if (POINTER_TYPE_P (type))
4151 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4152 fold_convert (sizetype, t));
4153 else
4154 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4155 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4156 false, GSI_CONTINUE_LINKING);
4157 stmt = gimple_build_assign (fd->loop.v, t);
4158 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4160 t = fold_convert (itype, e0);
4161 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4162 if (POINTER_TYPE_P (type))
4163 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4164 fold_convert (sizetype, t));
4165 else
4166 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4167 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4168 false, GSI_CONTINUE_LINKING);
4170 /* The code controlling the sequential loop replaces the
4171 GIMPLE_OMP_CONTINUE. */
4172 gsi = gsi_last_bb (cont_bb);
4173 stmt = gsi_stmt (gsi);
4174 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4175 vmain = gimple_omp_continue_control_use (stmt);
4176 vback = gimple_omp_continue_control_def (stmt);
4178 if (POINTER_TYPE_P (type))
4179 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4180 fold_convert (sizetype, fd->loop.step));
4181 else
4182 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4183 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4184 true, GSI_SAME_STMT);
4185 stmt = gimple_build_assign (vback, t);
4186 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4188 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4189 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4191 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4192 gsi_remove (&gsi, true);
4194 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4195 gsi = gsi_last_bb (exit_bb);
4196 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4197 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4198 false, GSI_SAME_STMT);
4199 gsi_remove (&gsi, true);
4201 /* Connect all the blocks. */
4202 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4203 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4205 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4206 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4208 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4209 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4210 recompute_dominator (CDI_DOMINATORS, body_bb));
4211 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4212 recompute_dominator (CDI_DOMINATORS, fin_bb));
4216 /* A subroutine of expand_omp_for. Generate code for a parallel
4217 loop with static schedule and a specified chunk size. Given
4218 parameters:
4220 for (V = N1; V cond N2; V += STEP) BODY;
4222 where COND is "<" or ">", we generate pseudocode
4224 if (cond is <)
4225 adj = STEP - 1;
4226 else
4227 adj = STEP + 1;
4228 if ((__typeof (V)) -1 > 0 && cond is >)
4229 n = -(adj + N2 - N1) / -STEP;
4230 else
4231 n = (adj + N2 - N1) / STEP;
4232 trip = 0;
4233 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4234 here so that V is defined
4235 if the loop is not entered
4237 s0 = (trip * nthreads + threadid) * CHUNK;
4238 e0 = min(s0 + CHUNK, n);
4239 if (s0 < n) goto L1; else goto L4;
4241 V = s0 * STEP + N1;
4242 e = e0 * STEP + N1;
4244 BODY;
4245 V += STEP;
4246 if (V cond e) goto L2; else goto L3;
4248 trip += 1;
4249 goto L0;
4253 static void
4254 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4256 tree n, s0, e0, e, t;
4257 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4258 tree type, itype, v_main, v_back, v_extra;
4259 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4260 basic_block trip_update_bb, cont_bb, fin_bb;
4261 gimple_stmt_iterator si;
4262 gimple stmt;
4263 edge se;
4265 itype = type = TREE_TYPE (fd->loop.v);
4266 if (POINTER_TYPE_P (type))
4267 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4269 entry_bb = region->entry;
4270 se = split_block (entry_bb, last_stmt (entry_bb));
4271 entry_bb = se->src;
4272 iter_part_bb = se->dest;
4273 cont_bb = region->cont;
4274 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4275 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4276 == FALLTHRU_EDGE (cont_bb)->dest);
4277 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4278 body_bb = single_succ (seq_start_bb);
4279 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4280 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4281 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4282 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4283 exit_bb = region->exit;
4285 /* Trip and adjustment setup goes in ENTRY_BB. */
4286 si = gsi_last_bb (entry_bb);
4287 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4289 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4290 t = fold_convert (itype, t);
4291 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4292 true, GSI_SAME_STMT);
4294 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4295 t = fold_convert (itype, t);
4296 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4297 true, GSI_SAME_STMT);
4299 fd->loop.n1
4300 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4301 true, NULL_TREE, true, GSI_SAME_STMT);
4302 fd->loop.n2
4303 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4304 true, NULL_TREE, true, GSI_SAME_STMT);
4305 fd->loop.step
4306 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4307 true, NULL_TREE, true, GSI_SAME_STMT);
4308 fd->chunk_size
4309 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4310 true, NULL_TREE, true, GSI_SAME_STMT);
4312 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4313 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4314 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4315 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4316 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4317 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4318 fold_build1 (NEGATE_EXPR, itype, t),
4319 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4320 else
4321 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4322 t = fold_convert (itype, t);
4323 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4324 true, GSI_SAME_STMT);
4326 trip_var = create_tmp_var (itype, ".trip");
4327 if (gimple_in_ssa_p (cfun))
4329 add_referenced_var (trip_var);
4330 trip_init = make_ssa_name (trip_var, NULL);
4331 trip_main = make_ssa_name (trip_var, NULL);
4332 trip_back = make_ssa_name (trip_var, NULL);
4334 else
4336 trip_init = trip_var;
4337 trip_main = trip_var;
4338 trip_back = trip_var;
4341 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4342 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4344 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4345 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4346 if (POINTER_TYPE_P (type))
4347 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4348 fold_convert (sizetype, t));
4349 else
4350 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4351 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4352 true, GSI_SAME_STMT);
4354 /* Remove the GIMPLE_OMP_FOR. */
4355 gsi_remove (&si, true);
4357 /* Iteration space partitioning goes in ITER_PART_BB. */
4358 si = gsi_last_bb (iter_part_bb);
4360 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4361 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4362 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4363 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4364 false, GSI_CONTINUE_LINKING);
4366 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4367 t = fold_build2 (MIN_EXPR, itype, t, n);
4368 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4369 false, GSI_CONTINUE_LINKING);
4371 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4372 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4374 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4375 si = gsi_start_bb (seq_start_bb);
4377 t = fold_convert (itype, s0);
4378 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4379 if (POINTER_TYPE_P (type))
4380 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4381 fold_convert (sizetype, t));
4382 else
4383 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4384 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4385 false, GSI_CONTINUE_LINKING);
4386 stmt = gimple_build_assign (fd->loop.v, t);
4387 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4389 t = fold_convert (itype, e0);
4390 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4391 if (POINTER_TYPE_P (type))
4392 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4393 fold_convert (sizetype, t));
4394 else
4395 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4396 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4397 false, GSI_CONTINUE_LINKING);
4399 /* The code controlling the sequential loop goes in CONT_BB,
4400 replacing the GIMPLE_OMP_CONTINUE. */
4401 si = gsi_last_bb (cont_bb);
4402 stmt = gsi_stmt (si);
4403 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4404 v_main = gimple_omp_continue_control_use (stmt);
4405 v_back = gimple_omp_continue_control_def (stmt);
4407 if (POINTER_TYPE_P (type))
4408 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4409 fold_convert (sizetype, fd->loop.step));
4410 else
4411 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4412 stmt = gimple_build_assign (v_back, t);
4413 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4415 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4416 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4418 /* Remove GIMPLE_OMP_CONTINUE. */
4419 gsi_remove (&si, true);
4421 /* Trip update code goes into TRIP_UPDATE_BB. */
4422 si = gsi_start_bb (trip_update_bb);
4424 t = build_int_cst (itype, 1);
4425 t = build2 (PLUS_EXPR, itype, trip_main, t);
4426 stmt = gimple_build_assign (trip_back, t);
4427 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4429 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4430 si = gsi_last_bb (exit_bb);
4431 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4432 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4433 false, GSI_SAME_STMT);
4434 gsi_remove (&si, true);
4436 /* Connect the new blocks. */
4437 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4438 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4440 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4441 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4443 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4445 if (gimple_in_ssa_p (cfun))
4447 gimple_stmt_iterator psi;
4448 gimple phi;
4449 edge re, ene;
4450 edge_var_map_vector head;
4451 edge_var_map *vm;
4452 size_t i;
4454 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4455 remove arguments of the phi nodes in fin_bb. We need to create
4456 appropriate phi nodes in iter_part_bb instead. */
4457 se = single_pred_edge (fin_bb);
4458 re = single_succ_edge (trip_update_bb);
4459 head = redirect_edge_var_map_vector (re);
4460 ene = single_succ_edge (entry_bb);
4462 psi = gsi_start_phis (fin_bb);
4463 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4464 gsi_next (&psi), ++i)
4466 gimple nphi;
4468 phi = gsi_stmt (psi);
4469 t = gimple_phi_result (phi);
4470 gcc_assert (t == redirect_edge_var_map_result (vm));
4471 nphi = create_phi_node (t, iter_part_bb);
4472 SSA_NAME_DEF_STMT (t) = nphi;
4474 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4475 /* A special case -- fd->loop.v is not yet computed in
4476 iter_part_bb, we need to use v_extra instead. */
4477 if (t == fd->loop.v)
4478 t = v_extra;
4479 add_phi_arg (nphi, t, ene);
4480 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4482 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4483 redirect_edge_var_map_clear (re);
4484 while (1)
4486 psi = gsi_start_phis (fin_bb);
4487 if (gsi_end_p (psi))
4488 break;
4489 remove_phi_node (&psi, false);
4492 /* Make phi node for trip. */
4493 phi = create_phi_node (trip_main, iter_part_bb);
4494 SSA_NAME_DEF_STMT (trip_main) = phi;
4495 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4496 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4499 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4500 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4501 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4502 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4503 recompute_dominator (CDI_DOMINATORS, fin_bb));
4504 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4505 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4506 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4507 recompute_dominator (CDI_DOMINATORS, body_bb));
4511 /* Expand the OpenMP loop defined by REGION. */
4513 static void
4514 expand_omp_for (struct omp_region *region)
4516 struct omp_for_data fd;
4517 struct omp_for_data_loop *loops;
4519 loops
4520 = (struct omp_for_data_loop *)
4521 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4522 * sizeof (struct omp_for_data_loop));
4523 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4524 region->sched_kind = fd.sched_kind;
4526 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4527 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4528 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4529 if (region->cont)
4531 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4532 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4533 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4536 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4537 && !fd.have_ordered
4538 && fd.collapse == 1
4539 && region->cont != NULL)
4541 if (fd.chunk_size == NULL)
4542 expand_omp_for_static_nochunk (region, &fd);
4543 else
4544 expand_omp_for_static_chunk (region, &fd);
4546 else
4548 int fn_index, start_ix, next_ix;
4550 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4551 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4552 ? 3 : fd.sched_kind;
4553 fn_index += fd.have_ordered * 4;
4554 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4555 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4556 if (fd.iter_type == long_long_unsigned_type_node)
4558 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4559 - BUILT_IN_GOMP_LOOP_STATIC_START;
4560 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4561 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4563 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4564 (enum built_in_function) next_ix);
4567 update_ssa (TODO_update_ssa_only_virtuals);
4571 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4573 v = GOMP_sections_start (n);
4575 switch (v)
4577 case 0:
4578 goto L2;
4579 case 1:
4580 section 1;
4581 goto L1;
4582 case 2:
4584 case n:
4586 default:
4587 abort ();
4590 v = GOMP_sections_next ();
4591 goto L0;
4593 reduction;
4595 If this is a combined parallel sections, replace the call to
4596 GOMP_sections_start with call to GOMP_sections_next. */
4598 static void
4599 expand_omp_sections (struct omp_region *region)
4601 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4602 VEC (tree,heap) *label_vec;
4603 unsigned len;
4604 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4605 gimple_stmt_iterator si, switch_si;
4606 gimple sections_stmt, stmt, cont;
4607 edge_iterator ei;
4608 edge e;
4609 struct omp_region *inner;
4610 unsigned i, casei;
4611 bool exit_reachable = region->cont != NULL;
4613 gcc_assert (exit_reachable == (region->exit != NULL));
4614 entry_bb = region->entry;
4615 l0_bb = single_succ (entry_bb);
4616 l1_bb = region->cont;
4617 l2_bb = region->exit;
4618 if (exit_reachable)
4620 if (single_pred (l2_bb) == l0_bb)
4621 l2 = gimple_block_label (l2_bb);
4622 else
4624 /* This can happen if there are reductions. */
4625 len = EDGE_COUNT (l0_bb->succs);
4626 gcc_assert (len > 0);
4627 e = EDGE_SUCC (l0_bb, len - 1);
4628 si = gsi_last_bb (e->dest);
4629 l2 = NULL_TREE;
4630 if (gsi_end_p (si)
4631 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4632 l2 = gimple_block_label (e->dest);
4633 else
4634 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4636 si = gsi_last_bb (e->dest);
4637 if (gsi_end_p (si)
4638 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4640 l2 = gimple_block_label (e->dest);
4641 break;
4645 default_bb = create_empty_bb (l1_bb->prev_bb);
4646 l1 = gimple_block_label (l1_bb);
4648 else
4650 default_bb = create_empty_bb (l0_bb);
4651 l1 = NULL_TREE;
4652 l2 = gimple_block_label (default_bb);
4655 /* We will build a switch() with enough cases for all the
4656 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4657 and a default case to abort if something goes wrong. */
4658 len = EDGE_COUNT (l0_bb->succs);
4660 /* Use VEC_quick_push on label_vec throughout, since we know the size
4661 in advance. */
4662 label_vec = VEC_alloc (tree, heap, len);
4664 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4665 GIMPLE_OMP_SECTIONS statement. */
4666 si = gsi_last_bb (entry_bb);
4667 sections_stmt = gsi_stmt (si);
4668 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4669 vin = gimple_omp_sections_control (sections_stmt);
4670 if (!is_combined_parallel (region))
4672 /* If we are not inside a combined parallel+sections region,
4673 call GOMP_sections_start. */
4674 t = build_int_cst (unsigned_type_node,
4675 exit_reachable ? len - 1 : len);
4676 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4677 stmt = gimple_build_call (u, 1, t);
4679 else
4681 /* Otherwise, call GOMP_sections_next. */
4682 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4683 stmt = gimple_build_call (u, 0);
4685 gimple_call_set_lhs (stmt, vin);
4686 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4687 gsi_remove (&si, true);
4689 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4690 L0_BB. */
4691 switch_si = gsi_last_bb (l0_bb);
4692 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4693 if (exit_reachable)
4695 cont = last_stmt (l1_bb);
4696 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4697 vmain = gimple_omp_continue_control_use (cont);
4698 vnext = gimple_omp_continue_control_def (cont);
4700 else
4702 vmain = vin;
4703 vnext = NULL_TREE;
4706 i = 0;
4707 if (exit_reachable)
4709 t = build3 (CASE_LABEL_EXPR, void_type_node,
4710 build_int_cst (unsigned_type_node, 0), NULL, l2);
4711 VEC_quick_push (tree, label_vec, t);
4712 i++;
4715 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4716 for (inner = region->inner, casei = 1;
4717 inner;
4718 inner = inner->next, i++, casei++)
4720 basic_block s_entry_bb, s_exit_bb;
4722 /* Skip optional reduction region. */
4723 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4725 --i;
4726 --casei;
4727 continue;
4730 s_entry_bb = inner->entry;
4731 s_exit_bb = inner->exit;
4733 t = gimple_block_label (s_entry_bb);
4734 u = build_int_cst (unsigned_type_node, casei);
4735 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4736 VEC_quick_push (tree, label_vec, u);
4738 si = gsi_last_bb (s_entry_bb);
4739 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4740 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4741 gsi_remove (&si, true);
4742 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4744 if (s_exit_bb == NULL)
4745 continue;
4747 si = gsi_last_bb (s_exit_bb);
4748 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4749 gsi_remove (&si, true);
4751 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4754 /* Error handling code goes in DEFAULT_BB. */
4755 t = gimple_block_label (default_bb);
4756 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4757 make_edge (l0_bb, default_bb, 0);
4759 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4760 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4761 gsi_remove (&switch_si, true);
4762 VEC_free (tree, heap, label_vec);
4764 si = gsi_start_bb (default_bb);
4765 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4766 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4768 if (exit_reachable)
4770 /* Code to get the next section goes in L1_BB. */
4771 si = gsi_last_bb (l1_bb);
4772 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4774 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4775 gimple_call_set_lhs (stmt, vnext);
4776 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4777 gsi_remove (&si, true);
4779 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4781 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4782 si = gsi_last_bb (l2_bb);
4783 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4784 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4785 else
4786 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4787 stmt = gimple_build_call (t, 0);
4788 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4789 gsi_remove (&si, true);
4792 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4796 /* Expand code for an OpenMP single directive. We've already expanded
4797 much of the code, here we simply place the GOMP_barrier call. */
4799 static void
4800 expand_omp_single (struct omp_region *region)
4802 basic_block entry_bb, exit_bb;
4803 gimple_stmt_iterator si;
4804 bool need_barrier = false;
4806 entry_bb = region->entry;
4807 exit_bb = region->exit;
4809 si = gsi_last_bb (entry_bb);
4810 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4811 be removed. We need to ensure that the thread that entered the single
4812 does not exit before the data is copied out by the other threads. */
4813 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4814 OMP_CLAUSE_COPYPRIVATE))
4815 need_barrier = true;
4816 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4817 gsi_remove (&si, true);
4818 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4820 si = gsi_last_bb (exit_bb);
4821 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4822 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4823 false, GSI_SAME_STMT);
4824 gsi_remove (&si, true);
4825 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4829 /* Generic expansion for OpenMP synchronization directives: master,
4830 ordered and critical. All we need to do here is remove the entry
4831 and exit markers for REGION. */
4833 static void
4834 expand_omp_synch (struct omp_region *region)
4836 basic_block entry_bb, exit_bb;
4837 gimple_stmt_iterator si;
4839 entry_bb = region->entry;
4840 exit_bb = region->exit;
4842 si = gsi_last_bb (entry_bb);
4843 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4844 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4845 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4846 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4847 gsi_remove (&si, true);
4848 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4850 if (exit_bb)
4852 si = gsi_last_bb (exit_bb);
4853 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4854 gsi_remove (&si, true);
4855 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4859 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4860 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4861 size of the data type, and thus usable to find the index of the builtin
4862 decl. Returns false if the expression is not of the proper form. */
4864 static bool
4865 expand_omp_atomic_fetch_op (basic_block load_bb,
4866 tree addr, tree loaded_val,
4867 tree stored_val, int index)
4869 enum built_in_function base;
4870 tree decl, itype, call;
4871 enum insn_code *optab;
4872 tree rhs;
4873 basic_block store_bb = single_succ (load_bb);
4874 gimple_stmt_iterator gsi;
4875 gimple stmt;
4877 /* We expect to find the following sequences:
4879 load_bb:
4880 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4882 store_bb:
4883 val = tmp OP something; (or: something OP tmp)
4884 GIMPLE_OMP_STORE (val)
4886 ???FIXME: Allow a more flexible sequence.
4887 Perhaps use data flow to pick the statements.
4891 gsi = gsi_after_labels (store_bb);
4892 stmt = gsi_stmt (gsi);
4893 if (!is_gimple_assign (stmt))
4894 return false;
4895 gsi_next (&gsi);
4896 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4897 return false;
4899 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4900 return false;
4902 /* Check for one of the supported fetch-op operations. */
4903 switch (gimple_assign_rhs_code (stmt))
4905 case PLUS_EXPR:
4906 case POINTER_PLUS_EXPR:
4907 base = BUILT_IN_FETCH_AND_ADD_N;
4908 optab = sync_add_optab;
4909 break;
4910 case MINUS_EXPR:
4911 base = BUILT_IN_FETCH_AND_SUB_N;
4912 optab = sync_add_optab;
4913 break;
4914 case BIT_AND_EXPR:
4915 base = BUILT_IN_FETCH_AND_AND_N;
4916 optab = sync_and_optab;
4917 break;
4918 case BIT_IOR_EXPR:
4919 base = BUILT_IN_FETCH_AND_OR_N;
4920 optab = sync_ior_optab;
4921 break;
4922 case BIT_XOR_EXPR:
4923 base = BUILT_IN_FETCH_AND_XOR_N;
4924 optab = sync_xor_optab;
4925 break;
4926 default:
4927 return false;
4929 /* Make sure the expression is of the proper form. */
4930 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4931 rhs = gimple_assign_rhs2 (stmt);
4932 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4933 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4934 rhs = gimple_assign_rhs1 (stmt);
4935 else
4936 return false;
4938 decl = built_in_decls[base + index + 1];
4939 itype = TREE_TYPE (TREE_TYPE (decl));
4941 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4942 return false;
4944 gsi = gsi_last_bb (load_bb);
4945 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4946 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4947 call = fold_convert (void_type_node, call);
4948 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4949 gsi_remove (&gsi, true);
4951 gsi = gsi_last_bb (store_bb);
4952 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4953 gsi_remove (&gsi, true);
4954 gsi = gsi_last_bb (store_bb);
4955 gsi_remove (&gsi, true);
4957 if (gimple_in_ssa_p (cfun))
4958 update_ssa (TODO_update_ssa_no_phi);
4960 return true;
4963 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4965 oldval = *addr;
4966 repeat:
4967 newval = rhs; // with oldval replacing *addr in rhs
4968 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4969 if (oldval != newval)
4970 goto repeat;
4972 INDEX is log2 of the size of the data type, and thus usable to find the
4973 index of the builtin decl. */
4975 static bool
4976 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4977 tree addr, tree loaded_val, tree stored_val,
4978 int index)
4980 tree loadedi, storedi, initial, new_storedi, old_vali;
4981 tree type, itype, cmpxchg, iaddr;
4982 gimple_stmt_iterator si;
4983 basic_block loop_header = single_succ (load_bb);
4984 gimple phi, stmt;
4985 edge e;
4987 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4988 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4989 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4991 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4992 return false;
4994 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4995 si = gsi_last_bb (load_bb);
4996 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4998 /* For floating-point values, we'll need to view-convert them to integers
4999 so that we can perform the atomic compare and swap. Simplify the
5000 following code by always setting up the "i"ntegral variables. */
5001 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5003 tree iaddr_val;
5005 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5006 iaddr_val
5007 = force_gimple_operand_gsi (&si,
5008 fold_convert (TREE_TYPE (iaddr), addr),
5009 false, NULL_TREE, true, GSI_SAME_STMT);
5010 stmt = gimple_build_assign (iaddr, iaddr_val);
5011 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5012 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5013 loadedi = create_tmp_var (itype, NULL);
5014 if (gimple_in_ssa_p (cfun))
5016 add_referenced_var (iaddr);
5017 add_referenced_var (loadedi);
5018 loadedi = make_ssa_name (loadedi, NULL);
5021 else
5023 iaddr = addr;
5024 loadedi = loaded_val;
5027 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5028 true, NULL_TREE, true, GSI_SAME_STMT);
5030 /* Move the value to the LOADEDI temporary. */
5031 if (gimple_in_ssa_p (cfun))
5033 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5034 phi = create_phi_node (loadedi, loop_header);
5035 SSA_NAME_DEF_STMT (loadedi) = phi;
5036 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5037 initial);
5039 else
5040 gsi_insert_before (&si,
5041 gimple_build_assign (loadedi, initial),
5042 GSI_SAME_STMT);
5043 if (loadedi != loaded_val)
5045 gimple_stmt_iterator gsi2;
5046 tree x;
5048 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5049 gsi2 = gsi_start_bb (loop_header);
5050 if (gimple_in_ssa_p (cfun))
5052 gimple stmt;
5053 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5054 true, GSI_SAME_STMT);
5055 stmt = gimple_build_assign (loaded_val, x);
5056 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5058 else
5060 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5061 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5062 true, GSI_SAME_STMT);
5065 gsi_remove (&si, true);
5067 si = gsi_last_bb (store_bb);
5068 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5070 if (iaddr == addr)
5071 storedi = stored_val;
5072 else
5073 storedi =
5074 force_gimple_operand_gsi (&si,
5075 build1 (VIEW_CONVERT_EXPR, itype,
5076 stored_val), true, NULL_TREE, true,
5077 GSI_SAME_STMT);
5079 /* Build the compare&swap statement. */
5080 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5081 new_storedi = force_gimple_operand_gsi (&si,
5082 fold_convert (TREE_TYPE (loadedi),
5083 new_storedi),
5084 true, NULL_TREE,
5085 true, GSI_SAME_STMT);
5087 if (gimple_in_ssa_p (cfun))
5088 old_vali = loadedi;
5089 else
5091 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5092 if (gimple_in_ssa_p (cfun))
5093 add_referenced_var (old_vali);
5094 stmt = gimple_build_assign (old_vali, loadedi);
5095 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5097 stmt = gimple_build_assign (loadedi, new_storedi);
5098 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5101 /* Note that we always perform the comparison as an integer, even for
5102 floating point. This allows the atomic operation to properly
5103 succeed even with NaNs and -0.0. */
5104 stmt = gimple_build_cond_empty
5105 (build2 (NE_EXPR, boolean_type_node,
5106 new_storedi, old_vali));
5107 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5109 /* Update cfg. */
5110 e = single_succ_edge (store_bb);
5111 e->flags &= ~EDGE_FALLTHRU;
5112 e->flags |= EDGE_FALSE_VALUE;
5114 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5116 /* Copy the new value to loadedi (we already did that before the condition
5117 if we are not in SSA). */
5118 if (gimple_in_ssa_p (cfun))
5120 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5121 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5124 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5125 gsi_remove (&si, true);
5127 if (gimple_in_ssa_p (cfun))
5128 update_ssa (TODO_update_ssa_no_phi);
5130 return true;
5133 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5135 GOMP_atomic_start ();
5136 *addr = rhs;
5137 GOMP_atomic_end ();
5139 The result is not globally atomic, but works so long as all parallel
5140 references are within #pragma omp atomic directives. According to
5141 responses received from omp@openmp.org, appears to be within spec.
5142 Which makes sense, since that's how several other compilers handle
5143 this situation as well.
5144 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5145 expanding. STORED_VAL is the operand of the matching
5146 GIMPLE_OMP_ATOMIC_STORE.
5148 We replace
5149 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5150 loaded_val = *addr;
5152 and replace
5153 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5154 *addr = stored_val;
5157 static bool
5158 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5159 tree addr, tree loaded_val, tree stored_val)
5161 gimple_stmt_iterator si;
5162 gimple stmt;
5163 tree t;
5165 si = gsi_last_bb (load_bb);
5166 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5168 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5169 t = build_function_call_expr (t, 0);
5170 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5172 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5173 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5174 gsi_remove (&si, true);
5176 si = gsi_last_bb (store_bb);
5177 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5179 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5180 stored_val);
5181 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5183 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5184 t = build_function_call_expr (t, 0);
5185 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5186 gsi_remove (&si, true);
5188 if (gimple_in_ssa_p (cfun))
5189 update_ssa (TODO_update_ssa_no_phi);
5190 return true;
5193 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5194 using expand_omp_atomic_fetch_op. If it failed, we try to
5195 call expand_omp_atomic_pipeline, and if it fails too, the
5196 ultimate fallback is wrapping the operation in a mutex
5197 (expand_omp_atomic_mutex). REGION is the atomic region built
5198 by build_omp_regions_1(). */
5200 static void
5201 expand_omp_atomic (struct omp_region *region)
5203 basic_block load_bb = region->entry, store_bb = region->exit;
5204 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5205 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5206 tree addr = gimple_omp_atomic_load_rhs (load);
5207 tree stored_val = gimple_omp_atomic_store_val (store);
5208 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5209 HOST_WIDE_INT index;
5211 /* Make sure the type is one of the supported sizes. */
5212 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5213 index = exact_log2 (index);
5214 if (index >= 0 && index <= 4)
5216 unsigned int align = TYPE_ALIGN_UNIT (type);
5218 /* __sync builtins require strict data alignment. */
5219 if (exact_log2 (align) >= index)
5221 /* When possible, use specialized atomic update functions. */
5222 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5223 && store_bb == single_succ (load_bb))
5225 if (expand_omp_atomic_fetch_op (load_bb, addr,
5226 loaded_val, stored_val, index))
5227 return;
5230 /* If we don't have specialized __sync builtins, try and implement
5231 as a compare and swap loop. */
5232 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5233 loaded_val, stored_val, index))
5234 return;
5238 /* The ultimate fallback is wrapping the operation in a mutex. */
5239 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5243 /* Expand the parallel region tree rooted at REGION. Expansion
5244 proceeds in depth-first order. Innermost regions are expanded
5245 first. This way, parallel regions that require a new function to
5246 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5247 internal dependencies in their body. */
5249 static void
5250 expand_omp (struct omp_region *region)
5252 while (region)
5254 location_t saved_location;
5256 /* First, determine whether this is a combined parallel+workshare
5257 region. */
5258 if (region->type == GIMPLE_OMP_PARALLEL)
5259 determine_parallel_type (region);
5261 if (region->inner)
5262 expand_omp (region->inner);
5264 saved_location = input_location;
5265 if (gimple_has_location (last_stmt (region->entry)))
5266 input_location = gimple_location (last_stmt (region->entry));
5268 switch (region->type)
5270 case GIMPLE_OMP_PARALLEL:
5271 case GIMPLE_OMP_TASK:
5272 expand_omp_taskreg (region);
5273 break;
5275 case GIMPLE_OMP_FOR:
5276 expand_omp_for (region);
5277 break;
5279 case GIMPLE_OMP_SECTIONS:
5280 expand_omp_sections (region);
5281 break;
5283 case GIMPLE_OMP_SECTION:
5284 /* Individual omp sections are handled together with their
5285 parent GIMPLE_OMP_SECTIONS region. */
5286 break;
5288 case GIMPLE_OMP_SINGLE:
5289 expand_omp_single (region);
5290 break;
5292 case GIMPLE_OMP_MASTER:
5293 case GIMPLE_OMP_ORDERED:
5294 case GIMPLE_OMP_CRITICAL:
5295 expand_omp_synch (region);
5296 break;
5298 case GIMPLE_OMP_ATOMIC_LOAD:
5299 expand_omp_atomic (region);
5300 break;
5302 default:
5303 gcc_unreachable ();
5306 input_location = saved_location;
5307 region = region->next;
5312 /* Helper for build_omp_regions. Scan the dominator tree starting at
5313 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5314 true, the function ends once a single tree is built (otherwise, whole
5315 forest of OMP constructs may be built). */
5317 static void
5318 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5319 bool single_tree)
5321 gimple_stmt_iterator gsi;
5322 gimple stmt;
5323 basic_block son;
5325 gsi = gsi_last_bb (bb);
5326 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5328 struct omp_region *region;
5329 enum gimple_code code;
5331 stmt = gsi_stmt (gsi);
5332 code = gimple_code (stmt);
5333 if (code == GIMPLE_OMP_RETURN)
5335 /* STMT is the return point out of region PARENT. Mark it
5336 as the exit point and make PARENT the immediately
5337 enclosing region. */
5338 gcc_assert (parent);
5339 region = parent;
5340 region->exit = bb;
5341 parent = parent->outer;
5343 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5345 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5346 GIMPLE_OMP_RETURN, but matches with
5347 GIMPLE_OMP_ATOMIC_LOAD. */
5348 gcc_assert (parent);
5349 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5350 region = parent;
5351 region->exit = bb;
5352 parent = parent->outer;
5355 else if (code == GIMPLE_OMP_CONTINUE)
5357 gcc_assert (parent);
5358 parent->cont = bb;
5360 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5362 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5363 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5366 else
5368 /* Otherwise, this directive becomes the parent for a new
5369 region. */
5370 region = new_omp_region (bb, code, parent);
5371 parent = region;
5375 if (single_tree && !parent)
5376 return;
5378 for (son = first_dom_son (CDI_DOMINATORS, bb);
5379 son;
5380 son = next_dom_son (CDI_DOMINATORS, son))
5381 build_omp_regions_1 (son, parent, single_tree);
5384 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5385 root_omp_region. */
5387 static void
5388 build_omp_regions_root (basic_block root)
5390 gcc_assert (root_omp_region == NULL);
5391 build_omp_regions_1 (root, NULL, true);
5392 gcc_assert (root_omp_region != NULL);
5395 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5397 void
5398 omp_expand_local (basic_block head)
5400 build_omp_regions_root (head);
5401 if (dump_file && (dump_flags & TDF_DETAILS))
5403 fprintf (dump_file, "\nOMP region tree\n\n");
5404 dump_omp_region (dump_file, root_omp_region, 0);
5405 fprintf (dump_file, "\n");
5408 remove_exit_barriers (root_omp_region);
5409 expand_omp (root_omp_region);
5411 free_omp_regions ();
5414 /* Scan the CFG and build a tree of OMP regions. Return the root of
5415 the OMP region tree. */
5417 static void
5418 build_omp_regions (void)
5420 gcc_assert (root_omp_region == NULL);
5421 calculate_dominance_info (CDI_DOMINATORS);
5422 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5425 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5427 static unsigned int
5428 execute_expand_omp (void)
5430 build_omp_regions ();
5432 if (!root_omp_region)
5433 return 0;
5435 if (dump_file)
5437 fprintf (dump_file, "\nOMP region tree\n\n");
5438 dump_omp_region (dump_file, root_omp_region, 0);
5439 fprintf (dump_file, "\n");
5442 remove_exit_barriers (root_omp_region);
5444 expand_omp (root_omp_region);
5446 cleanup_tree_cfg ();
5448 free_omp_regions ();
5450 return 0;
5453 /* OMP expansion -- the default pass, run before creation of SSA form. */
5455 static bool
5456 gate_expand_omp (void)
5458 return (flag_openmp != 0 && errorcount == 0);
5461 struct gimple_opt_pass pass_expand_omp =
5464 GIMPLE_PASS,
5465 "ompexp", /* name */
5466 gate_expand_omp, /* gate */
5467 execute_expand_omp, /* execute */
5468 NULL, /* sub */
5469 NULL, /* next */
5470 0, /* static_pass_number */
5471 TV_NONE, /* tv_id */
5472 PROP_gimple_any, /* properties_required */
5473 0, /* properties_provided */
5474 0, /* properties_destroyed */
5475 0, /* todo_flags_start */
5476 TODO_dump_func /* todo_flags_finish */
5480 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5482 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5483 CTX is the enclosing OMP context for the current statement. */
5485 static void
5486 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5488 tree block, control;
5489 gimple_stmt_iterator tgsi;
5490 unsigned i, len;
5491 gimple stmt, new_stmt, bind, t;
5492 gimple_seq ilist, dlist, olist, new_body, body;
5493 struct gimplify_ctx gctx;
5495 stmt = gsi_stmt (*gsi_p);
5497 push_gimplify_context (&gctx);
5499 dlist = NULL;
5500 ilist = NULL;
5501 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5502 &ilist, &dlist, ctx);
5504 tgsi = gsi_start (gimple_omp_body (stmt));
5505 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5506 continue;
5508 tgsi = gsi_start (gimple_omp_body (stmt));
5509 body = NULL;
5510 for (i = 0; i < len; i++, gsi_next (&tgsi))
5512 omp_context *sctx;
5513 gimple sec_start;
5515 sec_start = gsi_stmt (tgsi);
5516 sctx = maybe_lookup_ctx (sec_start);
5517 gcc_assert (sctx);
5519 gimple_seq_add_stmt (&body, sec_start);
5521 lower_omp (gimple_omp_body (sec_start), sctx);
5522 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5523 gimple_omp_set_body (sec_start, NULL);
5525 if (i == len - 1)
5527 gimple_seq l = NULL;
5528 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5529 &l, ctx);
5530 gimple_seq_add_seq (&body, l);
5531 gimple_omp_section_set_last (sec_start);
5534 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5537 block = make_node (BLOCK);
5538 bind = gimple_build_bind (NULL, body, block);
5540 olist = NULL;
5541 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5543 block = make_node (BLOCK);
5544 new_stmt = gimple_build_bind (NULL, NULL, block);
5546 pop_gimplify_context (new_stmt);
5547 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5548 BLOCK_VARS (block) = gimple_bind_vars (bind);
5549 if (BLOCK_VARS (block))
5550 TREE_USED (block) = 1;
5552 new_body = NULL;
5553 gimple_seq_add_seq (&new_body, ilist);
5554 gimple_seq_add_stmt (&new_body, stmt);
5555 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5556 gimple_seq_add_stmt (&new_body, bind);
5558 control = create_tmp_var (unsigned_type_node, ".section");
5559 t = gimple_build_omp_continue (control, control);
5560 gimple_omp_sections_set_control (stmt, control);
5561 gimple_seq_add_stmt (&new_body, t);
5563 gimple_seq_add_seq (&new_body, olist);
5564 gimple_seq_add_seq (&new_body, dlist);
5566 new_body = maybe_catch_exception (new_body);
5568 t = gimple_build_omp_return
5569 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5570 OMP_CLAUSE_NOWAIT));
5571 gimple_seq_add_stmt (&new_body, t);
5573 gimple_bind_set_body (new_stmt, new_body);
5574 gimple_omp_set_body (stmt, NULL);
5576 gsi_replace (gsi_p, new_stmt, true);
5580 /* A subroutine of lower_omp_single. Expand the simple form of
5581 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5583 if (GOMP_single_start ())
5584 BODY;
5585 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5587 FIXME. It may be better to delay expanding the logic of this until
5588 pass_expand_omp. The expanded logic may make the job more difficult
5589 to a synchronization analysis pass. */
5591 static void
5592 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5594 tree tlabel = create_artificial_label ();
5595 tree flabel = create_artificial_label ();
5596 gimple call, cond;
5597 tree lhs, decl;
5599 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5600 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5601 call = gimple_build_call (decl, 0);
5602 gimple_call_set_lhs (call, lhs);
5603 gimple_seq_add_stmt (pre_p, call);
5605 cond = gimple_build_cond (EQ_EXPR, lhs,
5606 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5607 tlabel, flabel);
5608 gimple_seq_add_stmt (pre_p, cond);
5609 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5610 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5611 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5615 /* A subroutine of lower_omp_single. Expand the simple form of
5616 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5618 #pragma omp single copyprivate (a, b, c)
5620 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5623 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5625 BODY;
5626 copyout.a = a;
5627 copyout.b = b;
5628 copyout.c = c;
5629 GOMP_single_copy_end (&copyout);
5631 else
5633 a = copyout_p->a;
5634 b = copyout_p->b;
5635 c = copyout_p->c;
5637 GOMP_barrier ();
5640 FIXME. It may be better to delay expanding the logic of this until
5641 pass_expand_omp. The expanded logic may make the job more difficult
5642 to a synchronization analysis pass. */
5644 static void
5645 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5647 tree ptr_type, t, l0, l1, l2;
5648 gimple_seq copyin_seq;
5650 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5652 ptr_type = build_pointer_type (ctx->record_type);
5653 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5655 l0 = create_artificial_label ();
5656 l1 = create_artificial_label ();
5657 l2 = create_artificial_label ();
5659 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5660 t = fold_convert (ptr_type, t);
5661 gimplify_assign (ctx->receiver_decl, t, pre_p);
5663 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5664 build_int_cst (ptr_type, 0));
5665 t = build3 (COND_EXPR, void_type_node, t,
5666 build_and_jump (&l0), build_and_jump (&l1));
5667 gimplify_and_add (t, pre_p);
5669 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5671 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5673 copyin_seq = NULL;
5674 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5675 &copyin_seq, ctx);
5677 t = build_fold_addr_expr (ctx->sender_decl);
5678 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5679 gimplify_and_add (t, pre_p);
5681 t = build_and_jump (&l2);
5682 gimplify_and_add (t, pre_p);
5684 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5686 gimple_seq_add_seq (pre_p, copyin_seq);
5688 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5692 /* Expand code for an OpenMP single directive. */
5694 static void
5695 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5697 tree block;
5698 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5699 gimple_seq bind_body, dlist;
5700 struct gimplify_ctx gctx;
5702 push_gimplify_context (&gctx);
5704 bind_body = NULL;
5705 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5706 &bind_body, &dlist, ctx);
5707 lower_omp (gimple_omp_body (single_stmt), ctx);
5709 gimple_seq_add_stmt (&bind_body, single_stmt);
5711 if (ctx->record_type)
5712 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5713 else
5714 lower_omp_single_simple (single_stmt, &bind_body);
5716 gimple_omp_set_body (single_stmt, NULL);
5718 gimple_seq_add_seq (&bind_body, dlist);
5720 bind_body = maybe_catch_exception (bind_body);
5722 t = gimple_build_omp_return
5723 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5724 OMP_CLAUSE_NOWAIT));
5725 gimple_seq_add_stmt (&bind_body, t);
5727 block = make_node (BLOCK);
5728 bind = gimple_build_bind (NULL, bind_body, block);
5730 pop_gimplify_context (bind);
5732 gimple_bind_append_vars (bind, ctx->block_vars);
5733 BLOCK_VARS (block) = ctx->block_vars;
5734 gsi_replace (gsi_p, bind, true);
5735 if (BLOCK_VARS (block))
5736 TREE_USED (block) = 1;
5740 /* Expand code for an OpenMP master directive. */
5742 static void
5743 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5745 tree block, lab = NULL, x;
5746 gimple stmt = gsi_stmt (*gsi_p), bind;
5747 gimple_seq tseq;
5748 struct gimplify_ctx gctx;
5750 push_gimplify_context (&gctx);
5752 block = make_node (BLOCK);
5753 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5754 block);
5756 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5757 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5758 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5759 tseq = NULL;
5760 gimplify_and_add (x, &tseq);
5761 gimple_bind_add_seq (bind, tseq);
5763 lower_omp (gimple_omp_body (stmt), ctx);
5764 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5765 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5766 gimple_omp_set_body (stmt, NULL);
5768 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5770 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5772 pop_gimplify_context (bind);
5774 gimple_bind_append_vars (bind, ctx->block_vars);
5775 BLOCK_VARS (block) = ctx->block_vars;
5776 gsi_replace (gsi_p, bind, true);
5780 /* Expand code for an OpenMP ordered directive. */
5782 static void
5783 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5785 tree block;
5786 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5787 struct gimplify_ctx gctx;
5789 push_gimplify_context (&gctx);
5791 block = make_node (BLOCK);
5792 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5793 block);
5795 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5796 gimple_bind_add_stmt (bind, x);
5798 lower_omp (gimple_omp_body (stmt), ctx);
5799 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5800 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5801 gimple_omp_set_body (stmt, NULL);
5803 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5804 gimple_bind_add_stmt (bind, x);
5806 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5808 pop_gimplify_context (bind);
5810 gimple_bind_append_vars (bind, ctx->block_vars);
5811 BLOCK_VARS (block) = gimple_bind_vars (bind);
5812 gsi_replace (gsi_p, bind, true);
5816 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5817 substitution of a couple of function calls. But in the NAMED case,
5818 requires that languages coordinate a symbol name. It is therefore
5819 best put here in common code. */
5821 static GTY((param1_is (tree), param2_is (tree)))
5822 splay_tree critical_name_mutexes;
5824 static void
5825 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5827 tree block;
5828 tree name, lock, unlock;
5829 gimple stmt = gsi_stmt (*gsi_p), bind;
5830 gimple_seq tbody;
5831 struct gimplify_ctx gctx;
5833 name = gimple_omp_critical_name (stmt);
5834 if (name)
5836 tree decl;
5837 splay_tree_node n;
5839 if (!critical_name_mutexes)
5840 critical_name_mutexes
5841 = splay_tree_new_ggc (splay_tree_compare_pointers);
5843 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5844 if (n == NULL)
5846 char *new_str;
5848 decl = create_tmp_var_raw (ptr_type_node, NULL);
5850 new_str = ACONCAT ((".gomp_critical_user_",
5851 IDENTIFIER_POINTER (name), NULL));
5852 DECL_NAME (decl) = get_identifier (new_str);
5853 TREE_PUBLIC (decl) = 1;
5854 TREE_STATIC (decl) = 1;
5855 DECL_COMMON (decl) = 1;
5856 DECL_ARTIFICIAL (decl) = 1;
5857 DECL_IGNORED_P (decl) = 1;
5858 varpool_finalize_decl (decl);
5860 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5861 (splay_tree_value) decl);
5863 else
5864 decl = (tree) n->value;
5866 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5867 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5869 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5870 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5872 else
5874 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5875 lock = build_call_expr (lock, 0);
5877 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5878 unlock = build_call_expr (unlock, 0);
5881 push_gimplify_context (&gctx);
5883 block = make_node (BLOCK);
5884 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5886 tbody = gimple_bind_body (bind);
5887 gimplify_and_add (lock, &tbody);
5888 gimple_bind_set_body (bind, tbody);
5890 lower_omp (gimple_omp_body (stmt), ctx);
5891 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5892 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5893 gimple_omp_set_body (stmt, NULL);
5895 tbody = gimple_bind_body (bind);
5896 gimplify_and_add (unlock, &tbody);
5897 gimple_bind_set_body (bind, tbody);
5899 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5901 pop_gimplify_context (bind);
5902 gimple_bind_append_vars (bind, ctx->block_vars);
5903 BLOCK_VARS (block) = gimple_bind_vars (bind);
5904 gsi_replace (gsi_p, bind, true);
5908 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5909 for a lastprivate clause. Given a loop control predicate of (V
5910 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5911 is appended to *DLIST, iterator initialization is appended to
5912 *BODY_P. */
5914 static void
5915 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5916 gimple_seq *dlist, struct omp_context *ctx)
5918 tree clauses, cond, vinit;
5919 enum tree_code cond_code;
5920 gimple_seq stmts;
5922 cond_code = fd->loop.cond_code;
5923 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5925 /* When possible, use a strict equality expression. This can let VRP
5926 type optimizations deduce the value and remove a copy. */
5927 if (host_integerp (fd->loop.step, 0))
5929 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5930 if (step == 1 || step == -1)
5931 cond_code = EQ_EXPR;
5934 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5936 clauses = gimple_omp_for_clauses (fd->for_stmt);
5937 stmts = NULL;
5938 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5939 if (!gimple_seq_empty_p (stmts))
5941 gimple_seq_add_seq (&stmts, *dlist);
5942 *dlist = stmts;
5944 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5945 vinit = fd->loop.n1;
5946 if (cond_code == EQ_EXPR
5947 && host_integerp (fd->loop.n2, 0)
5948 && ! integer_zerop (fd->loop.n2))
5949 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5951 /* Initialize the iterator variable, so that threads that don't execute
5952 any iterations don't execute the lastprivate clauses by accident. */
5953 gimplify_assign (fd->loop.v, vinit, body_p);
5958 /* Lower code for an OpenMP loop directive. */
5960 static void
5961 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5963 tree *rhs_p, block;
5964 struct omp_for_data fd;
5965 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5966 gimple_seq omp_for_body, body, dlist, ilist;
5967 size_t i;
5968 struct gimplify_ctx gctx;
5970 push_gimplify_context (&gctx);
5972 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5973 lower_omp (gimple_omp_body (stmt), ctx);
5975 block = make_node (BLOCK);
5976 new_stmt = gimple_build_bind (NULL, NULL, block);
5978 /* Move declaration of temporaries in the loop body before we make
5979 it go away. */
5980 omp_for_body = gimple_omp_body (stmt);
5981 if (!gimple_seq_empty_p (omp_for_body)
5982 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5984 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5985 gimple_bind_append_vars (new_stmt, vars);
5988 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5989 ilist = NULL;
5990 dlist = NULL;
5991 body = NULL;
5992 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5993 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5995 /* Lower the header expressions. At this point, we can assume that
5996 the header is of the form:
5998 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6000 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6001 using the .omp_data_s mapping, if needed. */
6002 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6004 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6005 if (!is_gimple_min_invariant (*rhs_p))
6006 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6008 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6009 if (!is_gimple_min_invariant (*rhs_p))
6010 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6012 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6013 if (!is_gimple_min_invariant (*rhs_p))
6014 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6017 /* Once lowered, extract the bounds and clauses. */
6018 extract_omp_for_data (stmt, &fd, NULL);
6020 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6022 gimple_seq_add_stmt (&body, stmt);
6023 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6025 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6026 fd.loop.v));
6028 /* After the loop, add exit clauses. */
6029 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6030 gimple_seq_add_seq (&body, dlist);
6032 body = maybe_catch_exception (body);
6034 /* Region exit marker goes at the end of the loop body. */
6035 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6037 pop_gimplify_context (new_stmt);
6039 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6040 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6041 if (BLOCK_VARS (block))
6042 TREE_USED (block) = 1;
6044 gimple_bind_set_body (new_stmt, body);
6045 gimple_omp_set_body (stmt, NULL);
6046 gimple_omp_for_set_pre_body (stmt, NULL);
6047 gsi_replace (gsi_p, new_stmt, true);
6050 /* Callback for walk_stmts. Check if the current statement only contains
6051 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6053 static tree
6054 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6055 bool *handled_ops_p,
6056 struct walk_stmt_info *wi)
6058 int *info = (int *) wi->info;
6059 gimple stmt = gsi_stmt (*gsi_p);
6061 *handled_ops_p = true;
6062 switch (gimple_code (stmt))
6064 WALK_SUBSTMTS;
6066 case GIMPLE_OMP_FOR:
6067 case GIMPLE_OMP_SECTIONS:
6068 *info = *info == 0 ? 1 : -1;
6069 break;
6070 default:
6071 *info = -1;
6072 break;
6074 return NULL;
6077 struct omp_taskcopy_context
6079 /* This field must be at the beginning, as we do "inheritance": Some
6080 callback functions for tree-inline.c (e.g., omp_copy_decl)
6081 receive a copy_body_data pointer that is up-casted to an
6082 omp_context pointer. */
6083 copy_body_data cb;
6084 omp_context *ctx;
6087 static tree
6088 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6090 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6092 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6093 return create_tmp_var (TREE_TYPE (var), NULL);
6095 return var;
6098 static tree
6099 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6101 tree name, new_fields = NULL, type, f;
6103 type = lang_hooks.types.make_type (RECORD_TYPE);
6104 name = DECL_NAME (TYPE_NAME (orig_type));
6105 name = build_decl (TYPE_DECL, name, type);
6106 TYPE_NAME (type) = name;
6108 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6110 tree new_f = copy_node (f);
6111 DECL_CONTEXT (new_f) = type;
6112 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6113 TREE_CHAIN (new_f) = new_fields;
6114 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6115 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6116 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6117 &tcctx->cb, NULL);
6118 new_fields = new_f;
6119 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6121 TYPE_FIELDS (type) = nreverse (new_fields);
6122 layout_type (type);
6123 return type;
6126 /* Create task copyfn. */
6128 static void
6129 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6131 struct function *child_cfun;
6132 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6133 tree record_type, srecord_type, bind, list;
6134 bool record_needs_remap = false, srecord_needs_remap = false;
6135 splay_tree_node n;
6136 struct omp_taskcopy_context tcctx;
6137 struct gimplify_ctx gctx;
6139 child_fn = gimple_omp_task_copy_fn (task_stmt);
6140 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6141 gcc_assert (child_cfun->cfg == NULL);
6142 child_cfun->dont_save_pending_sizes_p = 1;
6143 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6145 /* Reset DECL_CONTEXT on function arguments. */
6146 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6147 DECL_CONTEXT (t) = child_fn;
6149 /* Populate the function. */
6150 push_gimplify_context (&gctx);
6151 current_function_decl = child_fn;
6153 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6154 TREE_SIDE_EFFECTS (bind) = 1;
6155 list = NULL;
6156 DECL_SAVED_TREE (child_fn) = bind;
6157 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6159 /* Remap src and dst argument types if needed. */
6160 record_type = ctx->record_type;
6161 srecord_type = ctx->srecord_type;
6162 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6163 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6165 record_needs_remap = true;
6166 break;
6168 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6169 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6171 srecord_needs_remap = true;
6172 break;
6175 if (record_needs_remap || srecord_needs_remap)
6177 memset (&tcctx, '\0', sizeof (tcctx));
6178 tcctx.cb.src_fn = ctx->cb.src_fn;
6179 tcctx.cb.dst_fn = child_fn;
6180 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6181 tcctx.cb.dst_node = tcctx.cb.src_node;
6182 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6183 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6184 tcctx.cb.eh_region = -1;
6185 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6186 tcctx.cb.decl_map = pointer_map_create ();
6187 tcctx.ctx = ctx;
6189 if (record_needs_remap)
6190 record_type = task_copyfn_remap_type (&tcctx, record_type);
6191 if (srecord_needs_remap)
6192 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6194 else
6195 tcctx.cb.decl_map = NULL;
6197 push_cfun (child_cfun);
6199 arg = DECL_ARGUMENTS (child_fn);
6200 TREE_TYPE (arg) = build_pointer_type (record_type);
6201 sarg = TREE_CHAIN (arg);
6202 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6204 /* First pass: initialize temporaries used in record_type and srecord_type
6205 sizes and field offsets. */
6206 if (tcctx.cb.decl_map)
6207 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6208 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6210 tree *p;
6212 decl = OMP_CLAUSE_DECL (c);
6213 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6214 if (p == NULL)
6215 continue;
6216 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6217 sf = (tree) n->value;
6218 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6219 src = build_fold_indirect_ref (sarg);
6220 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6221 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6222 append_to_statement_list (t, &list);
6225 /* Second pass: copy shared var pointers and copy construct non-VLA
6226 firstprivate vars. */
6227 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6228 switch (OMP_CLAUSE_CODE (c))
6230 case OMP_CLAUSE_SHARED:
6231 decl = OMP_CLAUSE_DECL (c);
6232 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6233 if (n == NULL)
6234 break;
6235 f = (tree) n->value;
6236 if (tcctx.cb.decl_map)
6237 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6238 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6239 sf = (tree) n->value;
6240 if (tcctx.cb.decl_map)
6241 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6242 src = build_fold_indirect_ref (sarg);
6243 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6244 dst = build_fold_indirect_ref (arg);
6245 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6246 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6247 append_to_statement_list (t, &list);
6248 break;
6249 case OMP_CLAUSE_FIRSTPRIVATE:
6250 decl = OMP_CLAUSE_DECL (c);
6251 if (is_variable_sized (decl))
6252 break;
6253 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6254 if (n == NULL)
6255 break;
6256 f = (tree) n->value;
6257 if (tcctx.cb.decl_map)
6258 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6259 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6260 if (n != NULL)
6262 sf = (tree) n->value;
6263 if (tcctx.cb.decl_map)
6264 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6265 src = build_fold_indirect_ref (sarg);
6266 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6267 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6268 src = build_fold_indirect_ref (src);
6270 else
6271 src = decl;
6272 dst = build_fold_indirect_ref (arg);
6273 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6274 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6275 append_to_statement_list (t, &list);
6276 break;
6277 case OMP_CLAUSE_PRIVATE:
6278 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6279 break;
6280 decl = OMP_CLAUSE_DECL (c);
6281 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6282 f = (tree) n->value;
6283 if (tcctx.cb.decl_map)
6284 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6285 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6286 if (n != NULL)
6288 sf = (tree) n->value;
6289 if (tcctx.cb.decl_map)
6290 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6291 src = build_fold_indirect_ref (sarg);
6292 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6293 if (use_pointer_for_field (decl, NULL))
6294 src = build_fold_indirect_ref (src);
6296 else
6297 src = decl;
6298 dst = build_fold_indirect_ref (arg);
6299 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6300 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6301 append_to_statement_list (t, &list);
6302 break;
6303 default:
6304 break;
6307 /* Last pass: handle VLA firstprivates. */
6308 if (tcctx.cb.decl_map)
6309 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6310 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6312 tree ind, ptr, df;
6314 decl = OMP_CLAUSE_DECL (c);
6315 if (!is_variable_sized (decl))
6316 continue;
6317 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6318 if (n == NULL)
6319 continue;
6320 f = (tree) n->value;
6321 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6322 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6323 ind = DECL_VALUE_EXPR (decl);
6324 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6325 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6326 n = splay_tree_lookup (ctx->sfield_map,
6327 (splay_tree_key) TREE_OPERAND (ind, 0));
6328 sf = (tree) n->value;
6329 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6330 src = build_fold_indirect_ref (sarg);
6331 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6332 src = build_fold_indirect_ref (src);
6333 dst = build_fold_indirect_ref (arg);
6334 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6335 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6336 append_to_statement_list (t, &list);
6337 n = splay_tree_lookup (ctx->field_map,
6338 (splay_tree_key) TREE_OPERAND (ind, 0));
6339 df = (tree) n->value;
6340 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6341 ptr = build_fold_indirect_ref (arg);
6342 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6343 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6344 build_fold_addr_expr (dst));
6345 append_to_statement_list (t, &list);
6348 t = build1 (RETURN_EXPR, void_type_node, NULL);
6349 append_to_statement_list (t, &list);
6351 if (tcctx.cb.decl_map)
6352 pointer_map_destroy (tcctx.cb.decl_map);
6353 pop_gimplify_context (NULL);
6354 BIND_EXPR_BODY (bind) = list;
6355 pop_cfun ();
6356 current_function_decl = ctx->cb.src_fn;
6359 /* Lower the OpenMP parallel or task directive in the current statement
6360 in GSI_P. CTX holds context information for the directive. */
6362 static void
6363 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6365 tree clauses;
6366 tree child_fn, t;
6367 gimple stmt = gsi_stmt (*gsi_p);
6368 gimple par_bind, bind;
6369 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6370 struct gimplify_ctx gctx;
6372 clauses = gimple_omp_taskreg_clauses (stmt);
6373 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6374 par_body = gimple_bind_body (par_bind);
6375 child_fn = ctx->cb.dst_fn;
6376 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6377 && !gimple_omp_parallel_combined_p (stmt))
6379 struct walk_stmt_info wi;
6380 int ws_num = 0;
6382 memset (&wi, 0, sizeof (wi));
6383 wi.info = &ws_num;
6384 wi.val_only = true;
6385 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6386 if (ws_num == 1)
6387 gimple_omp_parallel_set_combined_p (stmt, true);
6389 if (ctx->srecord_type)
6390 create_task_copyfn (stmt, ctx);
6392 push_gimplify_context (&gctx);
6394 par_olist = NULL;
6395 par_ilist = NULL;
6396 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6397 lower_omp (par_body, ctx);
6398 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6399 lower_reduction_clauses (clauses, &par_olist, ctx);
6401 /* Declare all the variables created by mapping and the variables
6402 declared in the scope of the parallel body. */
6403 record_vars_into (ctx->block_vars, child_fn);
6404 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6406 if (ctx->record_type)
6408 ctx->sender_decl
6409 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6410 : ctx->record_type, ".omp_data_o");
6411 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6412 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6415 olist = NULL;
6416 ilist = NULL;
6417 lower_send_clauses (clauses, &ilist, &olist, ctx);
6418 lower_send_shared_vars (&ilist, &olist, ctx);
6420 /* Once all the expansions are done, sequence all the different
6421 fragments inside gimple_omp_body. */
6423 new_body = NULL;
6425 if (ctx->record_type)
6427 t = build_fold_addr_expr (ctx->sender_decl);
6428 /* fixup_child_record_type might have changed receiver_decl's type. */
6429 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6430 gimple_seq_add_stmt (&new_body,
6431 gimple_build_assign (ctx->receiver_decl, t));
6434 gimple_seq_add_seq (&new_body, par_ilist);
6435 gimple_seq_add_seq (&new_body, par_body);
6436 gimple_seq_add_seq (&new_body, par_olist);
6437 new_body = maybe_catch_exception (new_body);
6438 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6439 gimple_omp_set_body (stmt, new_body);
6441 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6442 gimple_bind_add_stmt (bind, stmt);
6443 if (ilist || olist)
6445 gimple_seq_add_stmt (&ilist, bind);
6446 gimple_seq_add_seq (&ilist, olist);
6447 bind = gimple_build_bind (NULL, ilist, NULL);
6450 gsi_replace (gsi_p, bind, true);
6452 pop_gimplify_context (NULL);
6455 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6456 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6457 of OpenMP context, but with task_shared_vars set. */
6459 static tree
6460 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6461 void *data)
6463 tree t = *tp;
6465 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6466 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6467 return t;
6469 if (task_shared_vars
6470 && DECL_P (t)
6471 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6472 return t;
6474 /* If a global variable has been privatized, TREE_CONSTANT on
6475 ADDR_EXPR might be wrong. */
6476 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6477 recompute_tree_invariant_for_addr_expr (t);
6479 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6480 return NULL_TREE;
6483 static void
6484 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6486 gimple stmt = gsi_stmt (*gsi_p);
6487 struct walk_stmt_info wi;
6489 if (gimple_has_location (stmt))
6490 input_location = gimple_location (stmt);
6492 if (task_shared_vars)
6493 memset (&wi, '\0', sizeof (wi));
6495 /* If we have issued syntax errors, avoid doing any heavy lifting.
6496 Just replace the OpenMP directives with a NOP to avoid
6497 confusing RTL expansion. */
6498 if (errorcount && is_gimple_omp (stmt))
6500 gsi_replace (gsi_p, gimple_build_nop (), true);
6501 return;
6504 switch (gimple_code (stmt))
6506 case GIMPLE_COND:
6507 if ((ctx || task_shared_vars)
6508 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6509 ctx ? NULL : &wi, NULL)
6510 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6511 ctx ? NULL : &wi, NULL)))
6512 gimple_regimplify_operands (stmt, gsi_p);
6513 break;
6514 case GIMPLE_CATCH:
6515 lower_omp (gimple_catch_handler (stmt), ctx);
6516 break;
6517 case GIMPLE_EH_FILTER:
6518 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6519 break;
6520 case GIMPLE_TRY:
6521 lower_omp (gimple_try_eval (stmt), ctx);
6522 lower_omp (gimple_try_cleanup (stmt), ctx);
6523 break;
6524 case GIMPLE_BIND:
6525 lower_omp (gimple_bind_body (stmt), ctx);
6526 break;
6527 case GIMPLE_OMP_PARALLEL:
6528 case GIMPLE_OMP_TASK:
6529 ctx = maybe_lookup_ctx (stmt);
6530 lower_omp_taskreg (gsi_p, ctx);
6531 break;
6532 case GIMPLE_OMP_FOR:
6533 ctx = maybe_lookup_ctx (stmt);
6534 gcc_assert (ctx);
6535 lower_omp_for (gsi_p, ctx);
6536 break;
6537 case GIMPLE_OMP_SECTIONS:
6538 ctx = maybe_lookup_ctx (stmt);
6539 gcc_assert (ctx);
6540 lower_omp_sections (gsi_p, ctx);
6541 break;
6542 case GIMPLE_OMP_SINGLE:
6543 ctx = maybe_lookup_ctx (stmt);
6544 gcc_assert (ctx);
6545 lower_omp_single (gsi_p, ctx);
6546 break;
6547 case GIMPLE_OMP_MASTER:
6548 ctx = maybe_lookup_ctx (stmt);
6549 gcc_assert (ctx);
6550 lower_omp_master (gsi_p, ctx);
6551 break;
6552 case GIMPLE_OMP_ORDERED:
6553 ctx = maybe_lookup_ctx (stmt);
6554 gcc_assert (ctx);
6555 lower_omp_ordered (gsi_p, ctx);
6556 break;
6557 case GIMPLE_OMP_CRITICAL:
6558 ctx = maybe_lookup_ctx (stmt);
6559 gcc_assert (ctx);
6560 lower_omp_critical (gsi_p, ctx);
6561 break;
6562 case GIMPLE_OMP_ATOMIC_LOAD:
6563 if ((ctx || task_shared_vars)
6564 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6565 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6566 gimple_regimplify_operands (stmt, gsi_p);
6567 break;
6568 default:
6569 if ((ctx || task_shared_vars)
6570 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6571 ctx ? NULL : &wi))
6572 gimple_regimplify_operands (stmt, gsi_p);
6573 break;
6577 static void
6578 lower_omp (gimple_seq body, omp_context *ctx)
6580 location_t saved_location = input_location;
6581 gimple_stmt_iterator gsi = gsi_start (body);
6582 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6583 lower_omp_1 (&gsi, ctx);
6584 input_location = saved_location;
6587 /* Main entry point. */
6589 static unsigned int
6590 execute_lower_omp (void)
6592 gimple_seq body;
6594 /* This pass always runs, to provide PROP_gimple_lomp.
6595 But there is nothing to do unless -fopenmp is given. */
6596 if (flag_openmp == 0)
6597 return 0;
6599 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6600 delete_omp_context);
6602 body = gimple_body (current_function_decl);
6603 scan_omp (body, NULL);
6604 gcc_assert (taskreg_nesting_level == 0);
6606 if (all_contexts->root)
6608 struct gimplify_ctx gctx;
6610 if (task_shared_vars)
6611 push_gimplify_context (&gctx);
6612 lower_omp (body, NULL);
6613 if (task_shared_vars)
6614 pop_gimplify_context (NULL);
6617 if (all_contexts)
6619 splay_tree_delete (all_contexts);
6620 all_contexts = NULL;
6622 BITMAP_FREE (task_shared_vars);
6623 return 0;
6626 struct gimple_opt_pass pass_lower_omp =
6629 GIMPLE_PASS,
6630 "omplower", /* name */
6631 NULL, /* gate */
6632 execute_lower_omp, /* execute */
6633 NULL, /* sub */
6634 NULL, /* next */
6635 0, /* static_pass_number */
6636 TV_NONE, /* tv_id */
6637 PROP_gimple_any, /* properties_required */
6638 PROP_gimple_lomp, /* properties_provided */
6639 0, /* properties_destroyed */
6640 0, /* todo_flags_start */
6641 TODO_dump_func /* todo_flags_finish */
6645 /* The following is a utility to diagnose OpenMP structured block violations.
6646 It is not part of the "omplower" pass, as that's invoked too late. It
6647 should be invoked by the respective front ends after gimplification. */
6649 static splay_tree all_labels;
6651 /* Check for mismatched contexts and generate an error if needed. Return
6652 true if an error is detected. */
6654 static bool
6655 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6656 gimple branch_ctx, gimple label_ctx)
6658 if (label_ctx == branch_ctx)
6659 return false;
6663 Previously we kept track of the label's entire context in diagnose_sb_[12]
6664 so we could traverse it and issue a correct "exit" or "enter" error
6665 message upon a structured block violation.
6667 We built the context by building a list with tree_cons'ing, but there is
6668 no easy counterpart in gimple tuples. It seems like far too much work
6669 for issuing exit/enter error messages. If someone really misses the
6670 distinct error message... patches welcome.
6673 #if 0
6674 /* Try to avoid confusing the user by producing and error message
6675 with correct "exit" or "enter" verbiage. We prefer "exit"
6676 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6677 if (branch_ctx == NULL)
6678 exit_p = false;
6679 else
6681 while (label_ctx)
6683 if (TREE_VALUE (label_ctx) == branch_ctx)
6685 exit_p = false;
6686 break;
6688 label_ctx = TREE_CHAIN (label_ctx);
6692 if (exit_p)
6693 error ("invalid exit from OpenMP structured block");
6694 else
6695 error ("invalid entry to OpenMP structured block");
6696 #endif
6698 /* If it's obvious we have an invalid entry, be specific about the error. */
6699 if (branch_ctx == NULL)
6700 error ("invalid entry to OpenMP structured block");
6701 else
6702 /* Otherwise, be vague and lazy, but efficient. */
6703 error ("invalid branch to/from an OpenMP structured block");
6705 gsi_replace (gsi_p, gimple_build_nop (), false);
6706 return true;
6709 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6710 where each label is found. */
6712 static tree
6713 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6714 struct walk_stmt_info *wi)
6716 gimple context = (gimple) wi->info;
6717 gimple inner_context;
6718 gimple stmt = gsi_stmt (*gsi_p);
6720 *handled_ops_p = true;
6722 switch (gimple_code (stmt))
6724 WALK_SUBSTMTS;
6726 case GIMPLE_OMP_PARALLEL:
6727 case GIMPLE_OMP_TASK:
6728 case GIMPLE_OMP_SECTIONS:
6729 case GIMPLE_OMP_SINGLE:
6730 case GIMPLE_OMP_SECTION:
6731 case GIMPLE_OMP_MASTER:
6732 case GIMPLE_OMP_ORDERED:
6733 case GIMPLE_OMP_CRITICAL:
6734 /* The minimal context here is just the current OMP construct. */
6735 inner_context = stmt;
6736 wi->info = inner_context;
6737 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6738 wi->info = context;
6739 break;
6741 case GIMPLE_OMP_FOR:
6742 inner_context = stmt;
6743 wi->info = inner_context;
6744 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6745 walk them. */
6746 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6747 diagnose_sb_1, NULL, wi);
6748 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6749 wi->info = context;
6750 break;
6752 case GIMPLE_LABEL:
6753 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6754 (splay_tree_value) context);
6755 break;
6757 default:
6758 break;
6761 return NULL_TREE;
6764 /* Pass 2: Check each branch and see if its context differs from that of
6765 the destination label's context. */
6767 static tree
6768 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6769 struct walk_stmt_info *wi)
6771 gimple context = (gimple) wi->info;
6772 splay_tree_node n;
6773 gimple stmt = gsi_stmt (*gsi_p);
6775 *handled_ops_p = true;
6777 switch (gimple_code (stmt))
6779 WALK_SUBSTMTS;
6781 case GIMPLE_OMP_PARALLEL:
6782 case GIMPLE_OMP_TASK:
6783 case GIMPLE_OMP_SECTIONS:
6784 case GIMPLE_OMP_SINGLE:
6785 case GIMPLE_OMP_SECTION:
6786 case GIMPLE_OMP_MASTER:
6787 case GIMPLE_OMP_ORDERED:
6788 case GIMPLE_OMP_CRITICAL:
6789 wi->info = stmt;
6790 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6791 wi->info = context;
6792 break;
6794 case GIMPLE_OMP_FOR:
6795 wi->info = stmt;
6796 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6797 walk them. */
6798 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6799 diagnose_sb_2, NULL, wi);
6800 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6801 wi->info = context;
6802 break;
6804 case GIMPLE_GOTO:
6806 tree lab = gimple_goto_dest (stmt);
6807 if (TREE_CODE (lab) != LABEL_DECL)
6808 break;
6810 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6811 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6813 break;
6815 case GIMPLE_SWITCH:
6817 unsigned int i;
6818 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6820 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6821 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6822 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6823 break;
6826 break;
6828 case GIMPLE_RETURN:
6829 diagnose_sb_0 (gsi_p, context, NULL);
6830 break;
6832 default:
6833 break;
6836 return NULL_TREE;
6839 void
6840 diagnose_omp_structured_block_errors (tree fndecl)
6842 tree save_current = current_function_decl;
6843 struct walk_stmt_info wi;
6844 struct function *old_cfun = cfun;
6845 gimple_seq body = gimple_body (fndecl);
6847 current_function_decl = fndecl;
6848 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6850 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6852 memset (&wi, 0, sizeof (wi));
6853 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6855 memset (&wi, 0, sizeof (wi));
6856 wi.want_locations = true;
6857 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6859 splay_tree_delete (all_labels);
6860 all_labels = NULL;
6862 set_cfun (old_cfun);
6863 current_function_decl = save_current;
6866 #include "gt-omp-low.h"