2009-05-29 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
[official-gcc.git] / gcc / omp-low.c
blobe1fb7231c3e9383d94994a4d151a0e075a63cacf
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 default:
260 break;
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
349 if (iter_type != long_long_unsigned_type_node)
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
357 tree n;
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
371 tree n1, n2;
373 if (loop->cond_code == LT_EXPR)
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
379 else
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
393 if (collapse_count && *collapse_count == NULL)
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
401 tree itype = TREE_TYPE (loop->v);
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
431 else
432 count = NULL_TREE;
436 if (count)
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
456 if (fd->collapse > 1)
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
481 Is lowered into:
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
523 extract_omp_for_data (ws_stmt, &fd, NULL);
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
541 return true;
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
549 static tree
550 get_ws_args_for (gimple ws_stmt)
552 tree t;
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
556 struct omp_for_data fd;
557 tree ws_args;
559 extract_omp_for_data (ws_stmt, &fd, NULL);
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
577 return ws_args;
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
590 gcc_unreachable ();
594 /* Discover whether REGION is a combined parallel+workshare region. */
596 static void
597 determine_parallel_type (struct omp_region *region)
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
627 gimple ws_stmt = last_stmt (ws_entry_bb);
629 if (region->inner->type == GIMPLE_OMP_FOR)
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
659 /* Return true if EXPR is variable sized. */
661 static inline bool
662 is_variable_sized (const_tree expr)
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
667 /* Return true if DECL is a reference type. */
669 static inline bool
670 is_reference (tree decl)
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
760 omp_context *up;
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
766 if (up)
768 tree c;
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
776 if (c)
777 return true;
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
798 return true;
802 return false;
805 /* Create a new VAR_DECL and copy information from VAR to it. */
807 tree
808 copy_var_decl (tree var, tree name, tree type)
810 tree copy = build_decl (VAR_DECL, name, type);
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
816 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
817 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
818 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
819 TREE_USED (copy) = 1;
820 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822 return copy;
825 /* Construct a new automatic decl similar to VAR. */
827 static tree
828 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 tree copy = copy_var_decl (var, name, type);
832 DECL_CONTEXT (copy) = current_function_decl;
833 TREE_CHAIN (copy) = ctx->block_vars;
834 ctx->block_vars = copy;
836 return copy;
839 static tree
840 omp_copy_decl_1 (tree var, omp_context *ctx)
842 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
845 /* Build tree nodes to access the field for VAR on the receiver side. */
847 static tree
848 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 tree x, field = lookup_field (var, ctx);
852 /* If the receiver record type was remapped in the child function,
853 remap the field into the new record type. */
854 x = maybe_lookup_field (field, ctx);
855 if (x != NULL)
856 field = x;
858 x = build_fold_indirect_ref (ctx->receiver_decl);
859 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
860 if (by_ref)
861 x = build_fold_indirect_ref (x);
863 return x;
866 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
867 of a parallel, this is a component reference; for workshare constructs
868 this is some variable. */
870 static tree
871 build_outer_var_ref (tree var, omp_context *ctx)
873 tree x;
875 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
876 x = var;
877 else if (is_variable_sized (var))
879 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
880 x = build_outer_var_ref (x, ctx);
881 x = build_fold_indirect_ref (x);
883 else if (is_taskreg_ctx (ctx))
885 bool by_ref = use_pointer_for_field (var, NULL);
886 x = build_receiver_ref (var, by_ref, ctx);
888 else if (ctx->outer)
889 x = lookup_decl (var, ctx->outer);
890 else if (is_reference (var))
891 /* This can happen with orphaned constructs. If var is reference, it is
892 possible it is shared and as such valid. */
893 x = var;
894 else
895 gcc_unreachable ();
897 if (is_reference (var))
898 x = build_fold_indirect_ref (x);
900 return x;
903 /* Build tree nodes to access the field for VAR on the sender side. */
905 static tree
906 build_sender_ref (tree var, omp_context *ctx)
908 tree field = lookup_sfield (var, ctx);
909 return build3 (COMPONENT_REF, TREE_TYPE (field),
910 ctx->sender_decl, field, NULL);
913 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915 static void
916 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 tree field, type, sfield = NULL_TREE;
920 gcc_assert ((mask & 1) == 0
921 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
922 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
923 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925 type = TREE_TYPE (var);
926 if (by_ref)
927 type = build_pointer_type (type);
928 else if ((mask & 3) == 1 && is_reference (var))
929 type = TREE_TYPE (type);
931 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933 /* Remember what variable this field was created for. This does have a
934 side effect of making dwarf2out ignore this member, so for helpful
935 debugging we clear it later in delete_omp_context. */
936 DECL_ABSTRACT_ORIGIN (field) = var;
937 if (type == TREE_TYPE (var))
939 DECL_ALIGN (field) = DECL_ALIGN (var);
940 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
941 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 else
944 DECL_ALIGN (field) = TYPE_ALIGN (type);
946 if ((mask & 3) == 3)
948 insert_field_into_struct (ctx->record_type, field);
949 if (ctx->srecord_type)
951 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
952 DECL_ABSTRACT_ORIGIN (sfield) = var;
953 DECL_ALIGN (sfield) = DECL_ALIGN (field);
954 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
955 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
956 insert_field_into_struct (ctx->srecord_type, sfield);
959 else
961 if (ctx->srecord_type == NULL_TREE)
963 tree t;
965 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
966 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
967 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
970 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
971 insert_field_into_struct (ctx->srecord_type, sfield);
972 splay_tree_insert (ctx->sfield_map,
973 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
974 (splay_tree_value) sfield);
977 sfield = field;
978 insert_field_into_struct ((mask & 1) ? ctx->record_type
979 : ctx->srecord_type, field);
982 if (mask & 1)
983 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
984 (splay_tree_value) field);
985 if ((mask & 2) && ctx->sfield_map)
986 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
987 (splay_tree_value) sfield);
990 static tree
991 install_var_local (tree var, omp_context *ctx)
993 tree new_var = omp_copy_decl_1 (var, ctx);
994 insert_decl_map (&ctx->cb, var, new_var);
995 return new_var;
998 /* Adjust the replacement for DECL in CTX for the new context. This means
999 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001 static void
1002 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 tree new_decl, size;
1006 new_decl = lookup_decl (decl, ctx);
1008 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1011 && DECL_HAS_VALUE_EXPR_P (decl))
1013 tree ve = DECL_VALUE_EXPR (decl);
1014 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1015 SET_DECL_VALUE_EXPR (new_decl, ve);
1016 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1019 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1022 if (size == error_mark_node)
1023 size = TYPE_SIZE (TREE_TYPE (new_decl));
1024 DECL_SIZE (new_decl) = size;
1026 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1027 if (size == error_mark_node)
1028 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1029 DECL_SIZE_UNIT (new_decl) = size;
1033 /* The callback for remap_decl. Search all containing contexts for a
1034 mapping of the variable; this avoids having to duplicate the splay
1035 tree ahead of time. We know a mapping doesn't already exist in the
1036 given context. Create new mappings to implement default semantics. */
1038 static tree
1039 omp_copy_decl (tree var, copy_body_data *cb)
1041 omp_context *ctx = (omp_context *) cb;
1042 tree new_var;
1044 if (TREE_CODE (var) == LABEL_DECL)
1046 new_var = create_artificial_label ();
1047 DECL_CONTEXT (new_var) = current_function_decl;
1048 insert_decl_map (&ctx->cb, var, new_var);
1049 return new_var;
1052 while (!is_taskreg_ctx (ctx))
1054 ctx = ctx->outer;
1055 if (ctx == NULL)
1056 return var;
1057 new_var = maybe_lookup_decl (var, ctx);
1058 if (new_var)
1059 return new_var;
1062 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1063 return var;
1065 return error_mark_node;
1069 /* Return the parallel region associated with STMT. */
1071 /* Debugging dumps for parallel regions. */
1072 void dump_omp_region (FILE *, struct omp_region *, int);
1073 void debug_omp_region (struct omp_region *);
1074 void debug_all_omp_regions (void);
1076 /* Dump the parallel region tree rooted at REGION. */
1078 void
1079 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1082 gimple_code_name[region->type]);
1084 if (region->inner)
1085 dump_omp_region (file, region->inner, indent + 4);
1087 if (region->cont)
1089 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1090 region->cont->index);
1093 if (region->exit)
1094 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1095 region->exit->index);
1096 else
1097 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099 if (region->next)
1100 dump_omp_region (file, region->next, indent);
1103 void
1104 debug_omp_region (struct omp_region *region)
1106 dump_omp_region (stderr, region, 0);
1109 void
1110 debug_all_omp_regions (void)
1112 dump_omp_region (stderr, root_omp_region, 0);
1116 /* Create a new parallel region starting at STMT inside region PARENT. */
1118 struct omp_region *
1119 new_omp_region (basic_block bb, enum gimple_code type,
1120 struct omp_region *parent)
1122 struct omp_region *region = XCNEW (struct omp_region);
1124 region->outer = parent;
1125 region->entry = bb;
1126 region->type = type;
1128 if (parent)
1130 /* This is a nested region. Add it to the list of inner
1131 regions in PARENT. */
1132 region->next = parent->inner;
1133 parent->inner = region;
1135 else
1137 /* This is a toplevel region. Add it to the list of toplevel
1138 regions in ROOT_OMP_REGION. */
1139 region->next = root_omp_region;
1140 root_omp_region = region;
1143 return region;
1146 /* Release the memory associated with the region tree rooted at REGION. */
1148 static void
1149 free_omp_region_1 (struct omp_region *region)
1151 struct omp_region *i, *n;
1153 for (i = region->inner; i ; i = n)
1155 n = i->next;
1156 free_omp_region_1 (i);
1159 free (region);
1162 /* Release the memory for the entire omp region tree. */
1164 void
1165 free_omp_regions (void)
1167 struct omp_region *r, *n;
1168 for (r = root_omp_region; r ; r = n)
1170 n = r->next;
1171 free_omp_region_1 (r);
1173 root_omp_region = NULL;
1177 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179 static omp_context *
1180 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 omp_context *ctx = XCNEW (omp_context);
1184 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1185 (splay_tree_value) ctx);
1186 ctx->stmt = stmt;
1188 if (outer_ctx)
1190 ctx->outer = outer_ctx;
1191 ctx->cb = outer_ctx->cb;
1192 ctx->cb.block = NULL;
1193 ctx->depth = outer_ctx->depth + 1;
1195 else
1197 ctx->cb.src_fn = current_function_decl;
1198 ctx->cb.dst_fn = current_function_decl;
1199 ctx->cb.src_node = cgraph_node (current_function_decl);
1200 ctx->cb.dst_node = ctx->cb.src_node;
1201 ctx->cb.src_cfun = cfun;
1202 ctx->cb.copy_decl = omp_copy_decl;
1203 ctx->cb.eh_region = -1;
1204 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1205 ctx->depth = 1;
1208 ctx->cb.decl_map = pointer_map_create ();
1210 return ctx;
1213 static gimple_seq maybe_catch_exception (gimple_seq);
1215 /* Finalize task copyfn. */
1217 static void
1218 finalize_task_copyfn (gimple task_stmt)
1220 struct function *child_cfun;
1221 tree child_fn, old_fn;
1222 gimple_seq seq, new_seq;
1223 gimple bind;
1225 child_fn = gimple_omp_task_copy_fn (task_stmt);
1226 if (child_fn == NULL_TREE)
1227 return;
1229 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231 /* Inform the callgraph about the new function. */
1232 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1233 = cfun->curr_properties;
1235 old_fn = current_function_decl;
1236 push_cfun (child_cfun);
1237 current_function_decl = child_fn;
1238 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1239 seq = gimple_seq_alloc ();
1240 gimple_seq_add_stmt (&seq, bind);
1241 new_seq = maybe_catch_exception (seq);
1242 if (new_seq != seq)
1244 bind = gimple_build_bind (NULL, new_seq, NULL);
1245 seq = gimple_seq_alloc ();
1246 gimple_seq_add_stmt (&seq, bind);
1248 gimple_set_body (child_fn, seq);
1249 pop_cfun ();
1250 current_function_decl = old_fn;
1252 cgraph_add_new_function (child_fn, false);
1255 /* Destroy a omp_context data structures. Called through the splay tree
1256 value delete callback. */
1258 static void
1259 delete_omp_context (splay_tree_value value)
1261 omp_context *ctx = (omp_context *) value;
1263 pointer_map_destroy (ctx->cb.decl_map);
1265 if (ctx->field_map)
1266 splay_tree_delete (ctx->field_map);
1267 if (ctx->sfield_map)
1268 splay_tree_delete (ctx->sfield_map);
1270 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1271 it produces corrupt debug information. */
1272 if (ctx->record_type)
1274 tree t;
1275 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1276 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 if (ctx->srecord_type)
1280 tree t;
1281 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1282 DECL_ABSTRACT_ORIGIN (t) = NULL;
1285 if (is_task_ctx (ctx))
1286 finalize_task_copyfn (ctx->stmt);
1288 XDELETE (ctx);
1291 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1292 context. */
1294 static void
1295 fixup_child_record_type (omp_context *ctx)
1297 tree f, type = ctx->record_type;
1299 /* ??? It isn't sufficient to just call remap_type here, because
1300 variably_modified_type_p doesn't work the way we expect for
1301 record types. Testing each field for whether it needs remapping
1302 and creating a new record by hand works, however. */
1303 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1304 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1305 break;
1306 if (f)
1308 tree name, new_fields = NULL;
1310 type = lang_hooks.types.make_type (RECORD_TYPE);
1311 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1312 name = build_decl (TYPE_DECL, name, type);
1313 TYPE_NAME (type) = name;
1315 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 tree new_f = copy_node (f);
1318 DECL_CONTEXT (new_f) = type;
1319 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1320 TREE_CHAIN (new_f) = new_fields;
1321 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1322 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1323 &ctx->cb, NULL);
1324 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1325 &ctx->cb, NULL);
1326 new_fields = new_f;
1328 /* Arrange to be able to look up the receiver field
1329 given the sender field. */
1330 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1331 (splay_tree_value) new_f);
1333 TYPE_FIELDS (type) = nreverse (new_fields);
1334 layout_type (type);
1337 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1340 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1341 specified by CLAUSES. */
1343 static void
1344 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 tree c, decl;
1347 bool scan_array_reductions = false;
1349 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 bool by_ref;
1353 switch (OMP_CLAUSE_CODE (c))
1355 case OMP_CLAUSE_PRIVATE:
1356 decl = OMP_CLAUSE_DECL (c);
1357 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1358 goto do_private;
1359 else if (!is_variable_sized (decl))
1360 install_var_local (decl, ctx);
1361 break;
1363 case OMP_CLAUSE_SHARED:
1364 gcc_assert (is_taskreg_ctx (ctx));
1365 decl = OMP_CLAUSE_DECL (c);
1366 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1367 || !is_variable_sized (decl));
1368 /* Global variables don't need to be copied,
1369 the receiver side will use them directly. */
1370 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1371 break;
1372 by_ref = use_pointer_for_field (decl, ctx);
1373 if (! TREE_READONLY (decl)
1374 || TREE_ADDRESSABLE (decl)
1375 || by_ref
1376 || is_reference (decl))
1378 install_var_field (decl, by_ref, 3, ctx);
1379 install_var_local (decl, ctx);
1380 break;
1382 /* We don't need to copy const scalar vars back. */
1383 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1384 goto do_private;
1386 case OMP_CLAUSE_LASTPRIVATE:
1387 /* Let the corresponding firstprivate clause create
1388 the variable. */
1389 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1390 break;
1391 /* FALLTHRU */
1393 case OMP_CLAUSE_FIRSTPRIVATE:
1394 case OMP_CLAUSE_REDUCTION:
1395 decl = OMP_CLAUSE_DECL (c);
1396 do_private:
1397 if (is_variable_sized (decl))
1399 if (is_task_ctx (ctx))
1400 install_var_field (decl, false, 1, ctx);
1401 break;
1403 else if (is_taskreg_ctx (ctx))
1405 bool global
1406 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1407 by_ref = use_pointer_for_field (decl, NULL);
1409 if (is_task_ctx (ctx)
1410 && (global || by_ref || is_reference (decl)))
1412 install_var_field (decl, false, 1, ctx);
1413 if (!global)
1414 install_var_field (decl, by_ref, 2, ctx);
1416 else if (!global)
1417 install_var_field (decl, by_ref, 3, ctx);
1419 install_var_local (decl, ctx);
1420 break;
1422 case OMP_CLAUSE_COPYPRIVATE:
1423 if (ctx->outer)
1424 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1425 /* FALLTHRU */
1427 case OMP_CLAUSE_COPYIN:
1428 decl = OMP_CLAUSE_DECL (c);
1429 by_ref = use_pointer_for_field (decl, NULL);
1430 install_var_field (decl, by_ref, 3, ctx);
1431 break;
1433 case OMP_CLAUSE_DEFAULT:
1434 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1435 break;
1437 case OMP_CLAUSE_IF:
1438 case OMP_CLAUSE_NUM_THREADS:
1439 case OMP_CLAUSE_SCHEDULE:
1440 if (ctx->outer)
1441 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1442 break;
1444 case OMP_CLAUSE_NOWAIT:
1445 case OMP_CLAUSE_ORDERED:
1446 case OMP_CLAUSE_COLLAPSE:
1447 case OMP_CLAUSE_UNTIED:
1448 break;
1450 default:
1451 gcc_unreachable ();
1455 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 switch (OMP_CLAUSE_CODE (c))
1459 case OMP_CLAUSE_LASTPRIVATE:
1460 /* Let the corresponding firstprivate clause create
1461 the variable. */
1462 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1463 scan_array_reductions = true;
1464 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1465 break;
1466 /* FALLTHRU */
1468 case OMP_CLAUSE_PRIVATE:
1469 case OMP_CLAUSE_FIRSTPRIVATE:
1470 case OMP_CLAUSE_REDUCTION:
1471 decl = OMP_CLAUSE_DECL (c);
1472 if (is_variable_sized (decl))
1473 install_var_local (decl, ctx);
1474 fixup_remapped_decl (decl, ctx,
1475 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1476 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1477 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1478 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1479 scan_array_reductions = true;
1480 break;
1482 case OMP_CLAUSE_SHARED:
1483 decl = OMP_CLAUSE_DECL (c);
1484 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1485 fixup_remapped_decl (decl, ctx, false);
1486 break;
1488 case OMP_CLAUSE_COPYPRIVATE:
1489 case OMP_CLAUSE_COPYIN:
1490 case OMP_CLAUSE_DEFAULT:
1491 case OMP_CLAUSE_IF:
1492 case OMP_CLAUSE_NUM_THREADS:
1493 case OMP_CLAUSE_SCHEDULE:
1494 case OMP_CLAUSE_NOWAIT:
1495 case OMP_CLAUSE_ORDERED:
1496 case OMP_CLAUSE_COLLAPSE:
1497 case OMP_CLAUSE_UNTIED:
1498 break;
1500 default:
1501 gcc_unreachable ();
1505 if (scan_array_reductions)
1506 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1507 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1508 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1514 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1515 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1518 /* Create a new name for omp child function. Returns an identifier. */
1520 static GTY(()) unsigned int tmp_ompfn_id_num;
1522 static tree
1523 create_omp_child_function_name (bool task_copy)
1525 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1526 size_t len = IDENTIFIER_LENGTH (name);
1527 char *tmp_name, *prefix;
1528 const char *suffix;
1530 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1531 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1532 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1533 strcpy (prefix + len, suffix);
1534 #ifndef NO_DOT_IN_LABEL
1535 prefix[len] = '.';
1536 #elif !defined NO_DOLLAR_IN_LABEL
1537 prefix[len] = '$';
1538 #endif
1539 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1540 return get_identifier (tmp_name);
1543 /* Build a decl for the omp child function. It'll not contain a body
1544 yet, just the bare decl. */
1546 static void
1547 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 tree decl, type, name, t;
1551 name = create_omp_child_function_name (task_copy);
1552 if (task_copy)
1553 type = build_function_type_list (void_type_node, ptr_type_node,
1554 ptr_type_node, NULL_TREE);
1555 else
1556 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558 decl = build_decl (FUNCTION_DECL, name, type);
1559 decl = lang_hooks.decls.pushdecl (decl);
1561 if (!task_copy)
1562 ctx->cb.dst_fn = decl;
1563 else
1564 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566 TREE_STATIC (decl) = 1;
1567 TREE_USED (decl) = 1;
1568 DECL_ARTIFICIAL (decl) = 1;
1569 DECL_IGNORED_P (decl) = 0;
1570 TREE_PUBLIC (decl) = 0;
1571 DECL_UNINLINABLE (decl) = 1;
1572 DECL_EXTERNAL (decl) = 0;
1573 DECL_CONTEXT (decl) = NULL_TREE;
1574 DECL_INITIAL (decl) = make_node (BLOCK);
1576 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1577 DECL_ARTIFICIAL (t) = 1;
1578 DECL_IGNORED_P (t) = 1;
1579 DECL_CONTEXT (t) = decl;
1580 DECL_RESULT (decl) = t;
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1605 it afterward. */
1606 push_struct_function (decl);
1607 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1609 pop_cfun ();
1613 /* Scan an OpenMP parallel directive. */
1615 static void
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1618 omp_context *ctx;
1619 tree name;
1620 gimple stmt = gsi_stmt (*gsi);
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1624 if (optimize > 0
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1629 gsi_replace (gsi, gimple_build_nop (), false);
1630 return;
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1657 /* Scan an OpenMP task directive. */
1659 static void
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1662 omp_context *ctx;
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
1666 /* Ignore task directives with empty bodies. */
1667 if (optimize > 0
1668 && empty_body_p (gimple_omp_body (stmt)))
1670 gsi_replace (gsi, gimple_build_nop (), false);
1671 return;
1674 ctx = new_omp_context (stmt, outer_ctx);
1675 if (taskreg_nesting_level > 1)
1676 ctx->is_nested = true;
1677 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1678 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1679 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1680 name = create_tmp_var_name (".omp_data_s");
1681 name = build_decl (TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1688 if (ctx->srecord_type)
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1692 TYPE_NAME (ctx->srecord_type) = name;
1693 create_omp_child_function (ctx, true);
1696 scan_omp (gimple_omp_body (stmt), ctx);
1698 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 ctx->record_type = ctx->receiver_decl = NULL;
1701 t = build_int_cst (long_integer_type_node, 0);
1702 gimple_omp_task_set_arg_size (stmt, t);
1703 t = build_int_cst (long_integer_type_node, 1);
1704 gimple_omp_task_set_arg_align (stmt, t);
1706 else
1708 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1709 /* Move VLA fields to the end. */
1710 p = &TYPE_FIELDS (ctx->record_type);
1711 while (*p)
1712 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1713 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 *q = *p;
1716 *p = TREE_CHAIN (*p);
1717 TREE_CHAIN (*q) = NULL_TREE;
1718 q = &TREE_CHAIN (*q);
1720 else
1721 p = &TREE_CHAIN (*p);
1722 *p = vla_fields;
1723 layout_type (ctx->record_type);
1724 fixup_child_record_type (ctx);
1725 if (ctx->srecord_type)
1726 layout_type (ctx->srecord_type);
1727 t = fold_convert (long_integer_type_node,
1728 TYPE_SIZE_UNIT (ctx->record_type));
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node,
1731 TYPE_ALIGN_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_align (stmt, t);
1737 /* Scan an OpenMP loop directive. */
1739 static void
1740 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1742 omp_context *ctx;
1743 size_t i;
1745 ctx = new_omp_context (stmt, outer_ctx);
1747 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1749 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1750 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1752 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1757 scan_omp (gimple_omp_body (stmt), ctx);
1760 /* Scan an OpenMP sections directive. */
1762 static void
1763 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1765 omp_context *ctx;
1767 ctx = new_omp_context (stmt, outer_ctx);
1768 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1769 scan_omp (gimple_omp_body (stmt), ctx);
1772 /* Scan an OpenMP single directive. */
1774 static void
1775 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1777 omp_context *ctx;
1778 tree name;
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1782 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1783 name = create_tmp_var_name (".omp_copy_s");
1784 name = build_decl (TYPE_DECL, name, ctx->record_type);
1785 TYPE_NAME (ctx->record_type) = name;
1787 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1790 if (TYPE_FIELDS (ctx->record_type) == NULL)
1791 ctx->record_type = NULL;
1792 else
1793 layout_type (ctx->record_type);
1797 /* Check OpenMP nesting restrictions. */
1798 static void
1799 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1801 switch (gimple_code (stmt))
1803 case GIMPLE_OMP_FOR:
1804 case GIMPLE_OMP_SECTIONS:
1805 case GIMPLE_OMP_SINGLE:
1806 case GIMPLE_CALL:
1807 for (; ctx != NULL; ctx = ctx->outer)
1808 switch (gimple_code (ctx->stmt))
1810 case GIMPLE_OMP_FOR:
1811 case GIMPLE_OMP_SECTIONS:
1812 case GIMPLE_OMP_SINGLE:
1813 case GIMPLE_OMP_ORDERED:
1814 case GIMPLE_OMP_MASTER:
1815 case GIMPLE_OMP_TASK:
1816 if (is_gimple_call (stmt))
1818 warning (0, "barrier region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or "
1820 "explicit task region");
1821 return;
1823 warning (0, "work-sharing region may not be closely nested inside "
1824 "of work-sharing, critical, ordered, master or explicit "
1825 "task region");
1826 return;
1827 case GIMPLE_OMP_PARALLEL:
1828 return;
1829 default:
1830 break;
1832 break;
1833 case GIMPLE_OMP_MASTER:
1834 for (; ctx != NULL; ctx = ctx->outer)
1835 switch (gimple_code (ctx->stmt))
1837 case GIMPLE_OMP_FOR:
1838 case GIMPLE_OMP_SECTIONS:
1839 case GIMPLE_OMP_SINGLE:
1840 case GIMPLE_OMP_TASK:
1841 warning (0, "master region may not be closely nested inside "
1842 "of work-sharing or explicit task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1849 break;
1850 case GIMPLE_OMP_ORDERED:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1854 case GIMPLE_OMP_CRITICAL:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "ordered region may not be closely nested inside "
1857 "of critical or explicit task region");
1858 return;
1859 case GIMPLE_OMP_FOR:
1860 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1861 OMP_CLAUSE_ORDERED) == NULL)
1862 warning (0, "ordered region must be closely nested inside "
1863 "a loop region with an ordered clause");
1864 return;
1865 case GIMPLE_OMP_PARALLEL:
1866 return;
1867 default:
1868 break;
1870 break;
1871 case GIMPLE_OMP_CRITICAL:
1872 for (; ctx != NULL; ctx = ctx->outer)
1873 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1874 && (gimple_omp_critical_name (stmt)
1875 == gimple_omp_critical_name (ctx->stmt)))
1877 warning (0, "critical region may not be nested inside a critical "
1878 "region with the same name");
1879 return;
1881 break;
1882 default:
1883 break;
1888 /* Helper function scan_omp.
1890 Callback for walk_tree or operators in walk_gimple_stmt used to
1891 scan for OpenMP directives in TP. */
1893 static tree
1894 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1896 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1897 omp_context *ctx = (omp_context *) wi->info;
1898 tree t = *tp;
1900 switch (TREE_CODE (t))
1902 case VAR_DECL:
1903 case PARM_DECL:
1904 case LABEL_DECL:
1905 case RESULT_DECL:
1906 if (ctx)
1907 *tp = remap_decl (t, &ctx->cb);
1908 break;
1910 default:
1911 if (ctx && TYPE_P (t))
1912 *tp = remap_type (t, &ctx->cb);
1913 else if (!DECL_P (t))
1915 *walk_subtrees = 1;
1916 if (ctx)
1917 TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1919 break;
1922 return NULL_TREE;
1926 /* Helper function for scan_omp.
1928 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1929 the current statement in GSI. */
1931 static tree
1932 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1933 struct walk_stmt_info *wi)
1935 gimple stmt = gsi_stmt (*gsi);
1936 omp_context *ctx = (omp_context *) wi->info;
1938 if (gimple_has_location (stmt))
1939 input_location = gimple_location (stmt);
1941 /* Check the OpenMP nesting restrictions. */
1942 if (ctx != NULL)
1944 if (is_gimple_omp (stmt))
1945 check_omp_nesting_restrictions (stmt, ctx);
1946 else if (is_gimple_call (stmt))
1948 tree fndecl = gimple_call_fndecl (stmt);
1949 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1950 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1951 check_omp_nesting_restrictions (stmt, ctx);
1955 *handled_ops_p = true;
1957 switch (gimple_code (stmt))
1959 case GIMPLE_OMP_PARALLEL:
1960 taskreg_nesting_level++;
1961 scan_omp_parallel (gsi, ctx);
1962 taskreg_nesting_level--;
1963 break;
1965 case GIMPLE_OMP_TASK:
1966 taskreg_nesting_level++;
1967 scan_omp_task (gsi, ctx);
1968 taskreg_nesting_level--;
1969 break;
1971 case GIMPLE_OMP_FOR:
1972 scan_omp_for (stmt, ctx);
1973 break;
1975 case GIMPLE_OMP_SECTIONS:
1976 scan_omp_sections (stmt, ctx);
1977 break;
1979 case GIMPLE_OMP_SINGLE:
1980 scan_omp_single (stmt, ctx);
1981 break;
1983 case GIMPLE_OMP_SECTION:
1984 case GIMPLE_OMP_MASTER:
1985 case GIMPLE_OMP_ORDERED:
1986 case GIMPLE_OMP_CRITICAL:
1987 ctx = new_omp_context (stmt, ctx);
1988 scan_omp (gimple_omp_body (stmt), ctx);
1989 break;
1991 case GIMPLE_BIND:
1993 tree var;
1995 *handled_ops_p = false;
1996 if (ctx)
1997 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1998 insert_decl_map (&ctx->cb, var, var);
2000 break;
2001 default:
2002 *handled_ops_p = false;
2003 break;
2006 return NULL_TREE;
2010 /* Scan all the statements starting at the current statement. CTX
2011 contains context information about the OpenMP directives and
2012 clauses found during the scan. */
2014 static void
2015 scan_omp (gimple_seq body, omp_context *ctx)
2017 location_t saved_location;
2018 struct walk_stmt_info wi;
2020 memset (&wi, 0, sizeof (wi));
2021 wi.info = ctx;
2022 wi.want_locations = true;
2024 saved_location = input_location;
2025 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2026 input_location = saved_location;
2029 /* Re-gimplification and code generation routines. */
2031 /* Build a call to GOMP_barrier. */
2033 static tree
2034 build_omp_barrier (void)
2036 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2039 /* If a context was created for STMT when it was scanned, return it. */
2041 static omp_context *
2042 maybe_lookup_ctx (gimple stmt)
2044 splay_tree_node n;
2045 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2046 return n ? (omp_context *) n->value : NULL;
2050 /* Find the mapping for DECL in CTX or the immediately enclosing
2051 context that has a mapping for DECL.
2053 If CTX is a nested parallel directive, we may have to use the decl
2054 mappings created in CTX's parent context. Suppose that we have the
2055 following parallel nesting (variable UIDs showed for clarity):
2057 iD.1562 = 0;
2058 #omp parallel shared(iD.1562) -> outer parallel
2059 iD.1562 = iD.1562 + 1;
2061 #omp parallel shared (iD.1562) -> inner parallel
2062 iD.1562 = iD.1562 - 1;
2064 Each parallel structure will create a distinct .omp_data_s structure
2065 for copying iD.1562 in/out of the directive:
2067 outer parallel .omp_data_s.1.i -> iD.1562
2068 inner parallel .omp_data_s.2.i -> iD.1562
2070 A shared variable mapping will produce a copy-out operation before
2071 the parallel directive and a copy-in operation after it. So, in
2072 this case we would have:
2074 iD.1562 = 0;
2075 .omp_data_o.1.i = iD.1562;
2076 #omp parallel shared(iD.1562) -> outer parallel
2077 .omp_data_i.1 = &.omp_data_o.1
2078 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2080 .omp_data_o.2.i = iD.1562; -> **
2081 #omp parallel shared(iD.1562) -> inner parallel
2082 .omp_data_i.2 = &.omp_data_o.2
2083 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2086 ** This is a problem. The symbol iD.1562 cannot be referenced
2087 inside the body of the outer parallel region. But since we are
2088 emitting this copy operation while expanding the inner parallel
2089 directive, we need to access the CTX structure of the outer
2090 parallel directive to get the correct mapping:
2092 .omp_data_o.2.i = .omp_data_i.1->i
2094 Since there may be other workshare or parallel directives enclosing
2095 the parallel directive, it may be necessary to walk up the context
2096 parent chain. This is not a problem in general because nested
2097 parallelism happens only rarely. */
2099 static tree
2100 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2102 tree t;
2103 omp_context *up;
2105 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2106 t = maybe_lookup_decl (decl, up);
2108 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2110 return t ? t : decl;
2114 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2115 in outer contexts. */
2117 static tree
2118 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2120 tree t = NULL;
2121 omp_context *up;
2123 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2124 t = maybe_lookup_decl (decl, up);
2126 return t ? t : decl;
2130 /* Construct the initialization value for reduction CLAUSE. */
2132 tree
2133 omp_reduction_init (tree clause, tree type)
2135 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2137 case PLUS_EXPR:
2138 case MINUS_EXPR:
2139 case BIT_IOR_EXPR:
2140 case BIT_XOR_EXPR:
2141 case TRUTH_OR_EXPR:
2142 case TRUTH_ORIF_EXPR:
2143 case TRUTH_XOR_EXPR:
2144 case NE_EXPR:
2145 return fold_convert (type, integer_zero_node);
2147 case MULT_EXPR:
2148 case TRUTH_AND_EXPR:
2149 case TRUTH_ANDIF_EXPR:
2150 case EQ_EXPR:
2151 return fold_convert (type, integer_one_node);
2153 case BIT_AND_EXPR:
2154 return fold_convert (type, integer_minus_one_node);
2156 case MAX_EXPR:
2157 if (SCALAR_FLOAT_TYPE_P (type))
2159 REAL_VALUE_TYPE max, min;
2160 if (HONOR_INFINITIES (TYPE_MODE (type)))
2162 real_inf (&max);
2163 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2165 else
2166 real_maxval (&min, 1, TYPE_MODE (type));
2167 return build_real (type, min);
2169 else
2171 gcc_assert (INTEGRAL_TYPE_P (type));
2172 return TYPE_MIN_VALUE (type);
2175 case MIN_EXPR:
2176 if (SCALAR_FLOAT_TYPE_P (type))
2178 REAL_VALUE_TYPE max;
2179 if (HONOR_INFINITIES (TYPE_MODE (type)))
2180 real_inf (&max);
2181 else
2182 real_maxval (&max, 0, TYPE_MODE (type));
2183 return build_real (type, max);
2185 else
2187 gcc_assert (INTEGRAL_TYPE_P (type));
2188 return TYPE_MAX_VALUE (type);
2191 default:
2192 gcc_unreachable ();
2196 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2197 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2198 private variables. Initialization statements go in ILIST, while calls
2199 to destructors go in DLIST. */
2201 static void
2202 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2203 omp_context *ctx)
2205 gimple_stmt_iterator diter;
2206 tree c, dtor, copyin_seq, x, ptr;
2207 bool copyin_by_ref = false;
2208 bool lastprivate_firstprivate = false;
2209 int pass;
2211 *dlist = gimple_seq_alloc ();
2212 diter = gsi_start (*dlist);
2213 copyin_seq = NULL;
2215 /* Do all the fixed sized types in the first pass, and the variable sized
2216 types in the second pass. This makes sure that the scalar arguments to
2217 the variable sized types are processed before we use them in the
2218 variable sized operations. */
2219 for (pass = 0; pass < 2; ++pass)
2221 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2223 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2224 tree var, new_var;
2225 bool by_ref;
2227 switch (c_kind)
2229 case OMP_CLAUSE_PRIVATE:
2230 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2231 continue;
2232 break;
2233 case OMP_CLAUSE_SHARED:
2234 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2236 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2237 continue;
2239 case OMP_CLAUSE_FIRSTPRIVATE:
2240 case OMP_CLAUSE_COPYIN:
2241 case OMP_CLAUSE_REDUCTION:
2242 break;
2243 case OMP_CLAUSE_LASTPRIVATE:
2244 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2246 lastprivate_firstprivate = true;
2247 if (pass != 0)
2248 continue;
2250 break;
2251 default:
2252 continue;
2255 new_var = var = OMP_CLAUSE_DECL (c);
2256 if (c_kind != OMP_CLAUSE_COPYIN)
2257 new_var = lookup_decl (var, ctx);
2259 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2261 if (pass != 0)
2262 continue;
2264 else if (is_variable_sized (var))
2266 /* For variable sized types, we need to allocate the
2267 actual storage here. Call alloca and store the
2268 result in the pointer decl that we created elsewhere. */
2269 if (pass == 0)
2270 continue;
2272 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2274 gimple stmt;
2275 tree tmp;
2277 ptr = DECL_VALUE_EXPR (new_var);
2278 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2279 ptr = TREE_OPERAND (ptr, 0);
2280 gcc_assert (DECL_P (ptr));
2281 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2283 /* void *tmp = __builtin_alloca */
2284 stmt
2285 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2286 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2287 gimple_add_tmp_var (tmp);
2288 gimple_call_set_lhs (stmt, tmp);
2290 gimple_seq_add_stmt (ilist, stmt);
2292 x = fold_convert (TREE_TYPE (ptr), tmp);
2293 gimplify_assign (ptr, x, ilist);
2296 else if (is_reference (var))
2298 /* For references that are being privatized for Fortran,
2299 allocate new backing storage for the new pointer
2300 variable. This allows us to avoid changing all the
2301 code that expects a pointer to something that expects
2302 a direct variable. Note that this doesn't apply to
2303 C++, since reference types are disallowed in data
2304 sharing clauses there, except for NRV optimized
2305 return values. */
2306 if (pass == 0)
2307 continue;
2309 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2310 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2312 x = build_receiver_ref (var, false, ctx);
2313 x = build_fold_addr_expr (x);
2315 else if (TREE_CONSTANT (x))
2317 const char *name = NULL;
2318 if (DECL_NAME (var))
2319 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2321 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2322 name);
2323 gimple_add_tmp_var (x);
2324 TREE_ADDRESSABLE (x) = 1;
2325 x = build_fold_addr_expr (x);
2327 else
2329 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2332 x = fold_convert (TREE_TYPE (new_var), x);
2333 gimplify_assign (new_var, x, ilist);
2335 new_var = build_fold_indirect_ref (new_var);
2337 else if (c_kind == OMP_CLAUSE_REDUCTION
2338 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2340 if (pass == 0)
2341 continue;
2343 else if (pass != 0)
2344 continue;
2346 switch (OMP_CLAUSE_CODE (c))
2348 case OMP_CLAUSE_SHARED:
2349 /* Shared global vars are just accessed directly. */
2350 if (is_global_var (new_var))
2351 break;
2352 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2353 needs to be delayed until after fixup_child_record_type so
2354 that we get the correct type during the dereference. */
2355 by_ref = use_pointer_for_field (var, ctx);
2356 x = build_receiver_ref (var, by_ref, ctx);
2357 SET_DECL_VALUE_EXPR (new_var, x);
2358 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2360 /* ??? If VAR is not passed by reference, and the variable
2361 hasn't been initialized yet, then we'll get a warning for
2362 the store into the omp_data_s structure. Ideally, we'd be
2363 able to notice this and not store anything at all, but
2364 we're generating code too early. Suppress the warning. */
2365 if (!by_ref)
2366 TREE_NO_WARNING (var) = 1;
2367 break;
2369 case OMP_CLAUSE_LASTPRIVATE:
2370 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2371 break;
2372 /* FALLTHRU */
2374 case OMP_CLAUSE_PRIVATE:
2375 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2376 x = build_outer_var_ref (var, ctx);
2377 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2379 if (is_task_ctx (ctx))
2380 x = build_receiver_ref (var, false, ctx);
2381 else
2382 x = build_outer_var_ref (var, ctx);
2384 else
2385 x = NULL;
2386 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2387 if (x)
2388 gimplify_and_add (x, ilist);
2389 /* FALLTHRU */
2391 do_dtor:
2392 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2393 if (x)
2395 gimple_seq tseq = NULL;
2397 dtor = x;
2398 gimplify_stmt (&dtor, &tseq);
2399 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2401 break;
2403 case OMP_CLAUSE_FIRSTPRIVATE:
2404 if (is_task_ctx (ctx))
2406 if (is_reference (var) || is_variable_sized (var))
2407 goto do_dtor;
2408 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2409 ctx))
2410 || use_pointer_for_field (var, NULL))
2412 x = build_receiver_ref (var, false, ctx);
2413 SET_DECL_VALUE_EXPR (new_var, x);
2414 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2415 goto do_dtor;
2418 x = build_outer_var_ref (var, ctx);
2419 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2420 gimplify_and_add (x, ilist);
2421 goto do_dtor;
2422 break;
2424 case OMP_CLAUSE_COPYIN:
2425 by_ref = use_pointer_for_field (var, NULL);
2426 x = build_receiver_ref (var, by_ref, ctx);
2427 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2428 append_to_statement_list (x, &copyin_seq);
2429 copyin_by_ref |= by_ref;
2430 break;
2432 case OMP_CLAUSE_REDUCTION:
2433 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2435 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2436 x = build_outer_var_ref (var, ctx);
2438 if (is_reference (var))
2439 x = build_fold_addr_expr (x);
2440 SET_DECL_VALUE_EXPR (placeholder, x);
2441 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2442 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2443 gimple_seq_add_seq (ilist,
2444 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2445 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2446 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2448 else
2450 x = omp_reduction_init (c, TREE_TYPE (new_var));
2451 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2452 gimplify_assign (new_var, x, ilist);
2454 break;
2456 default:
2457 gcc_unreachable ();
2462 /* The copyin sequence is not to be executed by the main thread, since
2463 that would result in self-copies. Perhaps not visible to scalars,
2464 but it certainly is to C++ operator=. */
2465 if (copyin_seq)
2467 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2468 x = build2 (NE_EXPR, boolean_type_node, x,
2469 build_int_cst (TREE_TYPE (x), 0));
2470 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2471 gimplify_and_add (x, ilist);
2474 /* If any copyin variable is passed by reference, we must ensure the
2475 master thread doesn't modify it before it is copied over in all
2476 threads. Similarly for variables in both firstprivate and
2477 lastprivate clauses we need to ensure the lastprivate copying
2478 happens after firstprivate copying in all threads. */
2479 if (copyin_by_ref || lastprivate_firstprivate)
2480 gimplify_and_add (build_omp_barrier (), ilist);
2484 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2485 both parallel and workshare constructs. PREDICATE may be NULL if it's
2486 always true. */
2488 static void
2489 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2490 omp_context *ctx)
2492 tree x, c, label = NULL;
2493 bool par_clauses = false;
2495 /* Early exit if there are no lastprivate clauses. */
2496 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2497 if (clauses == NULL)
2499 /* If this was a workshare clause, see if it had been combined
2500 with its parallel. In that case, look for the clauses on the
2501 parallel statement itself. */
2502 if (is_parallel_ctx (ctx))
2503 return;
2505 ctx = ctx->outer;
2506 if (ctx == NULL || !is_parallel_ctx (ctx))
2507 return;
2509 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2510 OMP_CLAUSE_LASTPRIVATE);
2511 if (clauses == NULL)
2512 return;
2513 par_clauses = true;
2516 if (predicate)
2518 gimple stmt;
2519 tree label_true, arm1, arm2;
2521 label = create_artificial_label ();
2522 label_true = create_artificial_label ();
2523 arm1 = TREE_OPERAND (predicate, 0);
2524 arm2 = TREE_OPERAND (predicate, 1);
2525 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2526 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2527 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2528 label_true, label);
2529 gimple_seq_add_stmt (stmt_list, stmt);
2530 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2533 for (c = clauses; c ;)
2535 tree var, new_var;
2537 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2539 var = OMP_CLAUSE_DECL (c);
2540 new_var = lookup_decl (var, ctx);
2542 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2544 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2545 gimple_seq_add_seq (stmt_list,
2546 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2548 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2550 x = build_outer_var_ref (var, ctx);
2551 if (is_reference (var))
2552 new_var = build_fold_indirect_ref (new_var);
2553 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2554 gimplify_and_add (x, stmt_list);
2556 c = OMP_CLAUSE_CHAIN (c);
2557 if (c == NULL && !par_clauses)
2559 /* If this was a workshare clause, see if it had been combined
2560 with its parallel. In that case, continue looking for the
2561 clauses also on the parallel statement itself. */
2562 if (is_parallel_ctx (ctx))
2563 break;
2565 ctx = ctx->outer;
2566 if (ctx == NULL || !is_parallel_ctx (ctx))
2567 break;
2569 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2570 OMP_CLAUSE_LASTPRIVATE);
2571 par_clauses = true;
2575 if (label)
2576 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2580 /* Generate code to implement the REDUCTION clauses. */
2582 static void
2583 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2585 gimple_seq sub_seq = NULL;
2586 gimple stmt;
2587 tree x, c;
2588 int count = 0;
2590 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2591 update in that case, otherwise use a lock. */
2592 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2593 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2595 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2597 /* Never use OMP_ATOMIC for array reductions. */
2598 count = -1;
2599 break;
2601 count++;
2604 if (count == 0)
2605 return;
2607 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2609 tree var, ref, new_var;
2610 enum tree_code code;
2612 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2613 continue;
2615 var = OMP_CLAUSE_DECL (c);
2616 new_var = lookup_decl (var, ctx);
2617 if (is_reference (var))
2618 new_var = build_fold_indirect_ref (new_var);
2619 ref = build_outer_var_ref (var, ctx);
2620 code = OMP_CLAUSE_REDUCTION_CODE (c);
2622 /* reduction(-:var) sums up the partial results, so it acts
2623 identically to reduction(+:var). */
2624 if (code == MINUS_EXPR)
2625 code = PLUS_EXPR;
2627 if (count == 1)
2629 tree addr = build_fold_addr_expr (ref);
2631 addr = save_expr (addr);
2632 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2633 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2634 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2635 gimplify_and_add (x, stmt_seqp);
2636 return;
2639 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2641 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2643 if (is_reference (var))
2644 ref = build_fold_addr_expr (ref);
2645 SET_DECL_VALUE_EXPR (placeholder, ref);
2646 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2647 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2648 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2649 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2650 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2652 else
2654 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2655 ref = build_outer_var_ref (var, ctx);
2656 gimplify_assign (ref, x, &sub_seq);
2660 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2661 gimple_seq_add_stmt (stmt_seqp, stmt);
2663 gimple_seq_add_seq (stmt_seqp, sub_seq);
2665 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2666 gimple_seq_add_stmt (stmt_seqp, stmt);
2670 /* Generate code to implement the COPYPRIVATE clauses. */
2672 static void
2673 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2674 omp_context *ctx)
2676 tree c;
2678 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2680 tree var, ref, x;
2681 bool by_ref;
2683 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2684 continue;
2686 var = OMP_CLAUSE_DECL (c);
2687 by_ref = use_pointer_for_field (var, NULL);
2689 ref = build_sender_ref (var, ctx);
2690 x = lookup_decl_in_outer_ctx (var, ctx);
2691 x = by_ref ? build_fold_addr_expr (x) : x;
2692 gimplify_assign (ref, x, slist);
2694 ref = build_receiver_ref (var, by_ref, ctx);
2695 if (is_reference (var))
2697 ref = build_fold_indirect_ref (ref);
2698 var = build_fold_indirect_ref (var);
2700 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2701 gimplify_and_add (x, rlist);
2706 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2707 and REDUCTION from the sender (aka parent) side. */
2709 static void
2710 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2711 omp_context *ctx)
2713 tree c;
2715 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2717 tree val, ref, x, var;
2718 bool by_ref, do_in = false, do_out = false;
2720 switch (OMP_CLAUSE_CODE (c))
2722 case OMP_CLAUSE_PRIVATE:
2723 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2724 break;
2725 continue;
2726 case OMP_CLAUSE_FIRSTPRIVATE:
2727 case OMP_CLAUSE_COPYIN:
2728 case OMP_CLAUSE_LASTPRIVATE:
2729 case OMP_CLAUSE_REDUCTION:
2730 break;
2731 default:
2732 continue;
2735 val = OMP_CLAUSE_DECL (c);
2736 var = lookup_decl_in_outer_ctx (val, ctx);
2738 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2739 && is_global_var (var))
2740 continue;
2741 if (is_variable_sized (val))
2742 continue;
2743 by_ref = use_pointer_for_field (val, NULL);
2745 switch (OMP_CLAUSE_CODE (c))
2747 case OMP_CLAUSE_PRIVATE:
2748 case OMP_CLAUSE_FIRSTPRIVATE:
2749 case OMP_CLAUSE_COPYIN:
2750 do_in = true;
2751 break;
2753 case OMP_CLAUSE_LASTPRIVATE:
2754 if (by_ref || is_reference (val))
2756 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2757 continue;
2758 do_in = true;
2760 else
2762 do_out = true;
2763 if (lang_hooks.decls.omp_private_outer_ref (val))
2764 do_in = true;
2766 break;
2768 case OMP_CLAUSE_REDUCTION:
2769 do_in = true;
2770 do_out = !(by_ref || is_reference (val));
2771 break;
2773 default:
2774 gcc_unreachable ();
2777 if (do_in)
2779 ref = build_sender_ref (val, ctx);
2780 x = by_ref ? build_fold_addr_expr (var) : var;
2781 gimplify_assign (ref, x, ilist);
2782 if (is_task_ctx (ctx))
2783 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2786 if (do_out)
2788 ref = build_sender_ref (val, ctx);
2789 gimplify_assign (var, ref, olist);
2794 /* Generate code to implement SHARED from the sender (aka parent)
2795 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2796 list things that got automatically shared. */
2798 static void
2799 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2801 tree var, ovar, nvar, f, x, record_type;
2803 if (ctx->record_type == NULL)
2804 return;
2806 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2807 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2809 ovar = DECL_ABSTRACT_ORIGIN (f);
2810 nvar = maybe_lookup_decl (ovar, ctx);
2811 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2812 continue;
2814 /* If CTX is a nested parallel directive. Find the immediately
2815 enclosing parallel or workshare construct that contains a
2816 mapping for OVAR. */
2817 var = lookup_decl_in_outer_ctx (ovar, ctx);
2819 if (use_pointer_for_field (ovar, ctx))
2821 x = build_sender_ref (ovar, ctx);
2822 var = build_fold_addr_expr (var);
2823 gimplify_assign (x, var, ilist);
2825 else
2827 x = build_sender_ref (ovar, ctx);
2828 gimplify_assign (x, var, ilist);
2830 if (!TREE_READONLY (var)
2831 /* We don't need to receive a new reference to a result
2832 or parm decl. In fact we may not store to it as we will
2833 invalidate any pending RSO and generate wrong gimple
2834 during inlining. */
2835 && !((TREE_CODE (var) == RESULT_DECL
2836 || TREE_CODE (var) == PARM_DECL)
2837 && DECL_BY_REFERENCE (var)))
2839 x = build_sender_ref (ovar, ctx);
2840 gimplify_assign (var, x, olist);
2847 /* A convenience function to build an empty GIMPLE_COND with just the
2848 condition. */
2850 static gimple
2851 gimple_build_cond_empty (tree cond)
2853 enum tree_code pred_code;
2854 tree lhs, rhs;
2856 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2857 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2861 /* Build the function calls to GOMP_parallel_start etc to actually
2862 generate the parallel operation. REGION is the parallel region
2863 being expanded. BB is the block where to insert the code. WS_ARGS
2864 will be set if this is a call to a combined parallel+workshare
2865 construct, it contains the list of additional arguments needed by
2866 the workshare construct. */
2868 static void
2869 expand_parallel_call (struct omp_region *region, basic_block bb,
2870 gimple entry_stmt, tree ws_args)
2872 tree t, t1, t2, val, cond, c, clauses;
2873 gimple_stmt_iterator gsi;
2874 gimple stmt;
2875 int start_ix;
2877 clauses = gimple_omp_parallel_clauses (entry_stmt);
2879 /* Determine what flavor of GOMP_parallel_start we will be
2880 emitting. */
2881 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2882 if (is_combined_parallel (region))
2884 switch (region->inner->type)
2886 case GIMPLE_OMP_FOR:
2887 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2888 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2889 + (region->inner->sched_kind
2890 == OMP_CLAUSE_SCHEDULE_RUNTIME
2891 ? 3 : region->inner->sched_kind);
2892 break;
2893 case GIMPLE_OMP_SECTIONS:
2894 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2895 break;
2896 default:
2897 gcc_unreachable ();
2901 /* By default, the value of NUM_THREADS is zero (selected at run time)
2902 and there is no conditional. */
2903 cond = NULL_TREE;
2904 val = build_int_cst (unsigned_type_node, 0);
2906 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2907 if (c)
2908 cond = OMP_CLAUSE_IF_EXPR (c);
2910 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2911 if (c)
2912 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2914 /* Ensure 'val' is of the correct type. */
2915 val = fold_convert (unsigned_type_node, val);
2917 /* If we found the clause 'if (cond)', build either
2918 (cond != 0) or (cond ? val : 1u). */
2919 if (cond)
2921 gimple_stmt_iterator gsi;
2923 cond = gimple_boolify (cond);
2925 if (integer_zerop (val))
2926 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2927 build_int_cst (TREE_TYPE (cond), 0));
2928 else
2930 basic_block cond_bb, then_bb, else_bb;
2931 edge e, e_then, e_else;
2932 tree tmp_then, tmp_else, tmp_join, tmp_var;
2934 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2935 if (gimple_in_ssa_p (cfun))
2937 tmp_then = make_ssa_name (tmp_var, NULL);
2938 tmp_else = make_ssa_name (tmp_var, NULL);
2939 tmp_join = make_ssa_name (tmp_var, NULL);
2941 else
2943 tmp_then = tmp_var;
2944 tmp_else = tmp_var;
2945 tmp_join = tmp_var;
2948 e = split_block (bb, NULL);
2949 cond_bb = e->src;
2950 bb = e->dest;
2951 remove_edge (e);
2953 then_bb = create_empty_bb (cond_bb);
2954 else_bb = create_empty_bb (then_bb);
2955 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2956 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2958 stmt = gimple_build_cond_empty (cond);
2959 gsi = gsi_start_bb (cond_bb);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2962 gsi = gsi_start_bb (then_bb);
2963 stmt = gimple_build_assign (tmp_then, val);
2964 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2966 gsi = gsi_start_bb (else_bb);
2967 stmt = gimple_build_assign
2968 (tmp_else, build_int_cst (unsigned_type_node, 1));
2969 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2971 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2972 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2973 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2974 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2976 if (gimple_in_ssa_p (cfun))
2978 gimple phi = create_phi_node (tmp_join, bb);
2979 SSA_NAME_DEF_STMT (tmp_join) = phi;
2980 add_phi_arg (phi, tmp_then, e_then);
2981 add_phi_arg (phi, tmp_else, e_else);
2984 val = tmp_join;
2987 gsi = gsi_start_bb (bb);
2988 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2989 false, GSI_CONTINUE_LINKING);
2992 gsi = gsi_last_bb (bb);
2993 t = gimple_omp_parallel_data_arg (entry_stmt);
2994 if (t == NULL)
2995 t1 = null_pointer_node;
2996 else
2997 t1 = build_fold_addr_expr (t);
2998 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3000 if (ws_args)
3002 tree args = tree_cons (NULL, t2,
3003 tree_cons (NULL, t1,
3004 tree_cons (NULL, val, ws_args)));
3005 t = build_function_call_expr (built_in_decls[start_ix], args);
3007 else
3008 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3010 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3011 false, GSI_CONTINUE_LINKING);
3013 t = gimple_omp_parallel_data_arg (entry_stmt);
3014 if (t == NULL)
3015 t = null_pointer_node;
3016 else
3017 t = build_fold_addr_expr (t);
3018 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3022 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3023 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3024 false, GSI_CONTINUE_LINKING);
3028 /* Build the function call to GOMP_task to actually
3029 generate the task operation. BB is the block where to insert the code. */
3031 static void
3032 expand_task_call (basic_block bb, gimple entry_stmt)
3034 tree t, t1, t2, t3, flags, cond, c, clauses;
3035 gimple_stmt_iterator gsi;
3037 clauses = gimple_omp_task_clauses (entry_stmt);
3039 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3040 if (c)
3041 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3042 else
3043 cond = boolean_true_node;
3045 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3046 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3048 gsi = gsi_last_bb (bb);
3049 t = gimple_omp_task_data_arg (entry_stmt);
3050 if (t == NULL)
3051 t2 = null_pointer_node;
3052 else
3053 t2 = build_fold_addr_expr (t);
3054 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3055 t = gimple_omp_task_copy_fn (entry_stmt);
3056 if (t == NULL)
3057 t3 = null_pointer_node;
3058 else
3059 t3 = build_fold_addr_expr (t);
3061 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3062 gimple_omp_task_arg_size (entry_stmt),
3063 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3065 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3066 false, GSI_CONTINUE_LINKING);
3070 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3071 catch handler and return it. This prevents programs from violating the
3072 structured block semantics with throws. */
3074 static gimple_seq
3075 maybe_catch_exception (gimple_seq body)
3077 gimple f, t;
3079 if (!flag_exceptions)
3080 return body;
3082 if (lang_protect_cleanup_actions)
3083 t = lang_protect_cleanup_actions ();
3084 else
3085 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3087 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3088 gimple_eh_filter_set_must_not_throw (f, true);
3090 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3091 GIMPLE_TRY_CATCH);
3093 return gimple_seq_alloc_with_stmt (t);
3096 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3098 static tree
3099 list2chain (tree list)
3101 tree t;
3103 for (t = list; t; t = TREE_CHAIN (t))
3105 tree var = TREE_VALUE (t);
3106 if (TREE_CHAIN (t))
3107 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3108 else
3109 TREE_CHAIN (var) = NULL_TREE;
3112 return list ? TREE_VALUE (list) : NULL_TREE;
3116 /* Remove barriers in REGION->EXIT's block. Note that this is only
3117 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3118 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3119 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3120 removed. */
3122 static void
3123 remove_exit_barrier (struct omp_region *region)
3125 gimple_stmt_iterator gsi;
3126 basic_block exit_bb;
3127 edge_iterator ei;
3128 edge e;
3129 gimple stmt;
3130 int any_addressable_vars = -1;
3132 exit_bb = region->exit;
3134 /* If the parallel region doesn't return, we don't have REGION->EXIT
3135 block at all. */
3136 if (! exit_bb)
3137 return;
3139 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3140 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3141 statements that can appear in between are extremely limited -- no
3142 memory operations at all. Here, we allow nothing at all, so the
3143 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3144 gsi = gsi_last_bb (exit_bb);
3145 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3146 gsi_prev (&gsi);
3147 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3148 return;
3150 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3152 gsi = gsi_last_bb (e->src);
3153 if (gsi_end_p (gsi))
3154 continue;
3155 stmt = gsi_stmt (gsi);
3156 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3157 && !gimple_omp_return_nowait_p (stmt))
3159 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3160 in many cases. If there could be tasks queued, the barrier
3161 might be needed to let the tasks run before some local
3162 variable of the parallel that the task uses as shared
3163 runs out of scope. The task can be spawned either
3164 from within current function (this would be easy to check)
3165 or from some function it calls and gets passed an address
3166 of such a variable. */
3167 if (any_addressable_vars < 0)
3169 gimple parallel_stmt = last_stmt (region->entry);
3170 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3171 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3172 tree block;
3174 any_addressable_vars = 0;
3175 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3176 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3178 any_addressable_vars = 1;
3179 break;
3181 for (block = gimple_block (stmt);
3182 !any_addressable_vars
3183 && block
3184 && TREE_CODE (block) == BLOCK;
3185 block = BLOCK_SUPERCONTEXT (block))
3187 for (local_decls = BLOCK_VARS (block);
3188 local_decls;
3189 local_decls = TREE_CHAIN (local_decls))
3190 if (TREE_ADDRESSABLE (local_decls))
3192 any_addressable_vars = 1;
3193 break;
3195 if (block == gimple_block (parallel_stmt))
3196 break;
3199 if (!any_addressable_vars)
3200 gimple_omp_return_set_nowait (stmt);
3205 static void
3206 remove_exit_barriers (struct omp_region *region)
3208 if (region->type == GIMPLE_OMP_PARALLEL)
3209 remove_exit_barrier (region);
3211 if (region->inner)
3213 region = region->inner;
3214 remove_exit_barriers (region);
3215 while (region->next)
3217 region = region->next;
3218 remove_exit_barriers (region);
3223 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3224 calls. These can't be declared as const functions, but
3225 within one parallel body they are constant, so they can be
3226 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3227 which are declared const. Similarly for task body, except
3228 that in untied task omp_get_thread_num () can change at any task
3229 scheduling point. */
3231 static void
3232 optimize_omp_library_calls (gimple entry_stmt)
3234 basic_block bb;
3235 gimple_stmt_iterator gsi;
3236 tree thr_num_id
3237 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3238 tree num_thr_id
3239 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3240 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3241 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3242 OMP_CLAUSE_UNTIED) != NULL);
3244 FOR_EACH_BB (bb)
3245 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3247 gimple call = gsi_stmt (gsi);
3248 tree decl;
3250 if (is_gimple_call (call)
3251 && (decl = gimple_call_fndecl (call))
3252 && DECL_EXTERNAL (decl)
3253 && TREE_PUBLIC (decl)
3254 && DECL_INITIAL (decl) == NULL)
3256 tree built_in;
3258 if (DECL_NAME (decl) == thr_num_id)
3260 /* In #pragma omp task untied omp_get_thread_num () can change
3261 during the execution of the task region. */
3262 if (untied_task)
3263 continue;
3264 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3266 else if (DECL_NAME (decl) == num_thr_id)
3267 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3268 else
3269 continue;
3271 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3272 || gimple_call_num_args (call) != 0)
3273 continue;
3275 if (flag_exceptions && !TREE_NOTHROW (decl))
3276 continue;
3278 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3279 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3280 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3281 continue;
3283 gimple_call_set_fndecl (call, built_in);
3288 /* Expand the OpenMP parallel or task directive starting at REGION. */
3290 static void
3291 expand_omp_taskreg (struct omp_region *region)
3293 basic_block entry_bb, exit_bb, new_bb;
3294 struct function *child_cfun;
3295 tree child_fn, block, t, ws_args, *tp;
3296 tree save_current;
3297 gimple_stmt_iterator gsi;
3298 gimple entry_stmt, stmt;
3299 edge e;
3301 entry_stmt = last_stmt (region->entry);
3302 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3303 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3304 /* If this function has been already instrumented, make sure
3305 the child function isn't instrumented again. */
3306 child_cfun->after_tree_profile = cfun->after_tree_profile;
3308 entry_bb = region->entry;
3309 exit_bb = region->exit;
3311 if (is_combined_parallel (region))
3312 ws_args = region->ws_args;
3313 else
3314 ws_args = NULL_TREE;
3316 if (child_cfun->cfg)
3318 /* Due to inlining, it may happen that we have already outlined
3319 the region, in which case all we need to do is make the
3320 sub-graph unreachable and emit the parallel call. */
3321 edge entry_succ_e, exit_succ_e;
3322 gimple_stmt_iterator gsi;
3324 entry_succ_e = single_succ_edge (entry_bb);
3326 gsi = gsi_last_bb (entry_bb);
3327 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3328 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3329 gsi_remove (&gsi, true);
3331 new_bb = entry_bb;
3332 if (exit_bb)
3334 exit_succ_e = single_succ_edge (exit_bb);
3335 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3337 remove_edge_and_dominated_blocks (entry_succ_e);
3339 else
3341 /* If the parallel region needs data sent from the parent
3342 function, then the very first statement (except possible
3343 tree profile counter updates) of the parallel body
3344 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3345 &.OMP_DATA_O is passed as an argument to the child function,
3346 we need to replace it with the argument as seen by the child
3347 function.
3349 In most cases, this will end up being the identity assignment
3350 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3351 a function call that has been inlined, the original PARM_DECL
3352 .OMP_DATA_I may have been converted into a different local
3353 variable. In which case, we need to keep the assignment. */
3354 if (gimple_omp_taskreg_data_arg (entry_stmt))
3356 basic_block entry_succ_bb = single_succ (entry_bb);
3357 gimple_stmt_iterator gsi;
3358 tree arg, narg;
3359 gimple parcopy_stmt = NULL;
3361 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3363 gimple stmt;
3365 gcc_assert (!gsi_end_p (gsi));
3366 stmt = gsi_stmt (gsi);
3367 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3368 continue;
3370 if (gimple_num_ops (stmt) == 2)
3372 tree arg = gimple_assign_rhs1 (stmt);
3374 /* We're ignore the subcode because we're
3375 effectively doing a STRIP_NOPS. */
3377 if (TREE_CODE (arg) == ADDR_EXPR
3378 && TREE_OPERAND (arg, 0)
3379 == gimple_omp_taskreg_data_arg (entry_stmt))
3381 parcopy_stmt = stmt;
3382 break;
3387 gcc_assert (parcopy_stmt != NULL);
3388 arg = DECL_ARGUMENTS (child_fn);
3390 if (!gimple_in_ssa_p (cfun))
3392 if (gimple_assign_lhs (parcopy_stmt) == arg)
3393 gsi_remove (&gsi, true);
3394 else
3396 /* ?? Is setting the subcode really necessary ?? */
3397 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3398 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3401 else
3403 /* If we are in ssa form, we must load the value from the default
3404 definition of the argument. That should not be defined now,
3405 since the argument is not used uninitialized. */
3406 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3407 narg = make_ssa_name (arg, gimple_build_nop ());
3408 set_default_def (arg, narg);
3409 /* ?? Is setting the subcode really necessary ?? */
3410 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3411 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3412 update_stmt (parcopy_stmt);
3416 /* Declare local variables needed in CHILD_CFUN. */
3417 block = DECL_INITIAL (child_fn);
3418 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3419 /* The gimplifier could record temporaries in parallel/task block
3420 rather than in containing function's local_decls chain,
3421 which would mean cgraph missed finalizing them. Do it now. */
3422 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3423 if (TREE_CODE (t) == VAR_DECL
3424 && TREE_STATIC (t)
3425 && !DECL_EXTERNAL (t))
3426 varpool_finalize_decl (t);
3427 DECL_SAVED_TREE (child_fn) = NULL;
3428 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3429 TREE_USED (block) = 1;
3431 /* Reset DECL_CONTEXT on function arguments. */
3432 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3433 DECL_CONTEXT (t) = child_fn;
3435 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3436 so that it can be moved to the child function. */
3437 gsi = gsi_last_bb (entry_bb);
3438 stmt = gsi_stmt (gsi);
3439 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3440 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3441 gsi_remove (&gsi, true);
3442 e = split_block (entry_bb, stmt);
3443 entry_bb = e->dest;
3444 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3446 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3447 if (exit_bb)
3449 gsi = gsi_last_bb (exit_bb);
3450 gcc_assert (!gsi_end_p (gsi)
3451 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3452 stmt = gimple_build_return (NULL);
3453 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3454 gsi_remove (&gsi, true);
3457 /* Move the parallel region into CHILD_CFUN. */
3459 if (gimple_in_ssa_p (cfun))
3461 push_cfun (child_cfun);
3462 init_tree_ssa (child_cfun);
3463 init_ssa_operands ();
3464 cfun->gimple_df->in_ssa_p = true;
3465 pop_cfun ();
3466 block = NULL_TREE;
3468 else
3469 block = gimple_block (entry_stmt);
3471 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3472 if (exit_bb)
3473 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3475 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3476 for (tp = &child_cfun->local_decls; *tp; )
3477 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3478 tp = &TREE_CHAIN (*tp);
3479 else
3480 *tp = TREE_CHAIN (*tp);
3482 /* Inform the callgraph about the new function. */
3483 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3484 = cfun->curr_properties;
3485 cgraph_add_new_function (child_fn, true);
3487 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3488 fixed in a following pass. */
3489 push_cfun (child_cfun);
3490 save_current = current_function_decl;
3491 current_function_decl = child_fn;
3492 if (optimize)
3493 optimize_omp_library_calls (entry_stmt);
3494 rebuild_cgraph_edges ();
3496 /* Some EH regions might become dead, see PR34608. If
3497 pass_cleanup_cfg isn't the first pass to happen with the
3498 new child, these dead EH edges might cause problems.
3499 Clean them up now. */
3500 if (flag_exceptions)
3502 basic_block bb;
3503 bool changed = false;
3505 FOR_EACH_BB (bb)
3506 changed |= gimple_purge_dead_eh_edges (bb);
3507 if (changed)
3508 cleanup_tree_cfg ();
3510 if (gimple_in_ssa_p (cfun))
3511 update_ssa (TODO_update_ssa);
3512 current_function_decl = save_current;
3513 pop_cfun ();
3516 /* Emit a library call to launch the children threads. */
3517 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3518 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3519 else
3520 expand_task_call (new_bb, entry_stmt);
3521 update_ssa (TODO_update_ssa_only_virtuals);
3525 /* A subroutine of expand_omp_for. Generate code for a parallel
3526 loop with any schedule. Given parameters:
3528 for (V = N1; V cond N2; V += STEP) BODY;
3530 where COND is "<" or ">", we generate pseudocode
3532 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3533 if (more) goto L0; else goto L3;
3535 V = istart0;
3536 iend = iend0;
3538 BODY;
3539 V += STEP;
3540 if (V cond iend) goto L1; else goto L2;
3542 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3545 If this is a combined omp parallel loop, instead of the call to
3546 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3548 For collapsed loops, given parameters:
3549 collapse(3)
3550 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3551 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3552 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3553 BODY;
3555 we generate pseudocode
3557 if (cond3 is <)
3558 adj = STEP3 - 1;
3559 else
3560 adj = STEP3 + 1;
3561 count3 = (adj + N32 - N31) / STEP3;
3562 if (cond2 is <)
3563 adj = STEP2 - 1;
3564 else
3565 adj = STEP2 + 1;
3566 count2 = (adj + N22 - N21) / STEP2;
3567 if (cond1 is <)
3568 adj = STEP1 - 1;
3569 else
3570 adj = STEP1 + 1;
3571 count1 = (adj + N12 - N11) / STEP1;
3572 count = count1 * count2 * count3;
3573 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3574 if (more) goto L0; else goto L3;
3576 V = istart0;
3577 T = V;
3578 V3 = N31 + (T % count3) * STEP3;
3579 T = T / count3;
3580 V2 = N21 + (T % count2) * STEP2;
3581 T = T / count2;
3582 V1 = N11 + T * STEP1;
3583 iend = iend0;
3585 BODY;
3586 V += 1;
3587 if (V < iend) goto L10; else goto L2;
3588 L10:
3589 V3 += STEP3;
3590 if (V3 cond3 N32) goto L1; else goto L11;
3591 L11:
3592 V3 = N31;
3593 V2 += STEP2;
3594 if (V2 cond2 N22) goto L1; else goto L12;
3595 L12:
3596 V2 = N21;
3597 V1 += STEP1;
3598 goto L1;
3600 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3605 static void
3606 expand_omp_for_generic (struct omp_region *region,
3607 struct omp_for_data *fd,
3608 enum built_in_function start_fn,
3609 enum built_in_function next_fn)
3611 tree type, istart0, iend0, iend;
3612 tree t, vmain, vback, bias = NULL_TREE;
3613 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3614 basic_block l2_bb = NULL, l3_bb = NULL;
3615 gimple_stmt_iterator gsi;
3616 gimple stmt;
3617 bool in_combined_parallel = is_combined_parallel (region);
3618 bool broken_loop = region->cont == NULL;
3619 edge e, ne;
3620 tree *counts = NULL;
3621 int i;
3623 gcc_assert (!broken_loop || !in_combined_parallel);
3624 gcc_assert (fd->iter_type == long_integer_type_node
3625 || !in_combined_parallel);
3627 type = TREE_TYPE (fd->loop.v);
3628 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3629 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3630 TREE_ADDRESSABLE (istart0) = 1;
3631 TREE_ADDRESSABLE (iend0) = 1;
3632 if (gimple_in_ssa_p (cfun))
3634 add_referenced_var (istart0);
3635 add_referenced_var (iend0);
3638 /* See if we need to bias by LLONG_MIN. */
3639 if (fd->iter_type == long_long_unsigned_type_node
3640 && TREE_CODE (type) == INTEGER_TYPE
3641 && !TYPE_UNSIGNED (type))
3643 tree n1, n2;
3645 if (fd->loop.cond_code == LT_EXPR)
3647 n1 = fd->loop.n1;
3648 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3650 else
3652 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3653 n2 = fd->loop.n1;
3655 if (TREE_CODE (n1) != INTEGER_CST
3656 || TREE_CODE (n2) != INTEGER_CST
3657 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3658 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3661 entry_bb = region->entry;
3662 cont_bb = region->cont;
3663 collapse_bb = NULL;
3664 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3665 gcc_assert (broken_loop
3666 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3667 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3668 l1_bb = single_succ (l0_bb);
3669 if (!broken_loop)
3671 l2_bb = create_empty_bb (cont_bb);
3672 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3673 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3675 else
3676 l2_bb = NULL;
3677 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3678 exit_bb = region->exit;
3680 gsi = gsi_last_bb (entry_bb);
3682 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3683 if (fd->collapse > 1)
3685 /* collapsed loops need work for expansion in SSA form. */
3686 gcc_assert (!gimple_in_ssa_p (cfun));
3687 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3688 for (i = 0; i < fd->collapse; i++)
3690 tree itype = TREE_TYPE (fd->loops[i].v);
3692 if (POINTER_TYPE_P (itype))
3693 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3694 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3695 ? -1 : 1));
3696 t = fold_build2 (PLUS_EXPR, itype,
3697 fold_convert (itype, fd->loops[i].step), t);
3698 t = fold_build2 (PLUS_EXPR, itype, t,
3699 fold_convert (itype, fd->loops[i].n2));
3700 t = fold_build2 (MINUS_EXPR, itype, t,
3701 fold_convert (itype, fd->loops[i].n1));
3702 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3703 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3704 fold_build1 (NEGATE_EXPR, itype, t),
3705 fold_build1 (NEGATE_EXPR, itype,
3706 fold_convert (itype,
3707 fd->loops[i].step)));
3708 else
3709 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3710 fold_convert (itype, fd->loops[i].step));
3711 t = fold_convert (type, t);
3712 if (TREE_CODE (t) == INTEGER_CST)
3713 counts[i] = t;
3714 else
3716 counts[i] = create_tmp_var (type, ".count");
3717 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3718 true, GSI_SAME_STMT);
3719 stmt = gimple_build_assign (counts[i], t);
3720 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3722 if (SSA_VAR_P (fd->loop.n2))
3724 if (i == 0)
3725 t = counts[0];
3726 else
3728 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3729 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3730 true, GSI_SAME_STMT);
3732 stmt = gimple_build_assign (fd->loop.n2, t);
3733 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3737 if (in_combined_parallel)
3739 /* In a combined parallel loop, emit a call to
3740 GOMP_loop_foo_next. */
3741 t = build_call_expr (built_in_decls[next_fn], 2,
3742 build_fold_addr_expr (istart0),
3743 build_fold_addr_expr (iend0));
3745 else
3747 tree t0, t1, t2, t3, t4;
3748 /* If this is not a combined parallel loop, emit a call to
3749 GOMP_loop_foo_start in ENTRY_BB. */
3750 t4 = build_fold_addr_expr (iend0);
3751 t3 = build_fold_addr_expr (istart0);
3752 t2 = fold_convert (fd->iter_type, fd->loop.step);
3753 if (POINTER_TYPE_P (type)
3754 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3756 /* Avoid casting pointers to integer of a different size. */
3757 tree itype
3758 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3759 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3760 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3762 else
3764 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3765 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3767 if (bias)
3769 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3770 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3772 if (fd->iter_type == long_integer_type_node)
3774 if (fd->chunk_size)
3776 t = fold_convert (fd->iter_type, fd->chunk_size);
3777 t = build_call_expr (built_in_decls[start_fn], 6,
3778 t0, t1, t2, t, t3, t4);
3780 else
3781 t = build_call_expr (built_in_decls[start_fn], 5,
3782 t0, t1, t2, t3, t4);
3784 else
3786 tree t5;
3787 tree c_bool_type;
3789 /* The GOMP_loop_ull_*start functions have additional boolean
3790 argument, true for < loops and false for > loops.
3791 In Fortran, the C bool type can be different from
3792 boolean_type_node. */
3793 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3794 t5 = build_int_cst (c_bool_type,
3795 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3796 if (fd->chunk_size)
3798 t = fold_convert (fd->iter_type, fd->chunk_size);
3799 t = build_call_expr (built_in_decls[start_fn], 7,
3800 t5, t0, t1, t2, t, t3, t4);
3802 else
3803 t = build_call_expr (built_in_decls[start_fn], 6,
3804 t5, t0, t1, t2, t3, t4);
3807 if (TREE_TYPE (t) != boolean_type_node)
3808 t = fold_build2 (NE_EXPR, boolean_type_node,
3809 t, build_int_cst (TREE_TYPE (t), 0));
3810 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3811 true, GSI_SAME_STMT);
3812 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3814 /* Remove the GIMPLE_OMP_FOR statement. */
3815 gsi_remove (&gsi, true);
3817 /* Iteration setup for sequential loop goes in L0_BB. */
3818 gsi = gsi_start_bb (l0_bb);
3819 t = istart0;
3820 if (bias)
3821 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3822 if (POINTER_TYPE_P (type))
3823 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3824 0), t);
3825 t = fold_convert (type, t);
3826 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3827 false, GSI_CONTINUE_LINKING);
3828 stmt = gimple_build_assign (fd->loop.v, t);
3829 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3831 t = iend0;
3832 if (bias)
3833 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3834 if (POINTER_TYPE_P (type))
3835 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3836 0), t);
3837 t = fold_convert (type, t);
3838 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3839 false, GSI_CONTINUE_LINKING);
3840 if (fd->collapse > 1)
3842 tree tem = create_tmp_var (type, ".tem");
3844 stmt = gimple_build_assign (tem, fd->loop.v);
3845 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3846 for (i = fd->collapse - 1; i >= 0; i--)
3848 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3849 itype = vtype;
3850 if (POINTER_TYPE_P (vtype))
3851 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3852 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3853 t = fold_convert (itype, t);
3854 t = fold_build2 (MULT_EXPR, itype, t,
3855 fold_convert (itype, fd->loops[i].step));
3856 if (POINTER_TYPE_P (vtype))
3857 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3858 fd->loops[i].n1, fold_convert (sizetype, t));
3859 else
3860 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3861 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3862 false, GSI_CONTINUE_LINKING);
3863 stmt = gimple_build_assign (fd->loops[i].v, t);
3864 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3865 if (i != 0)
3867 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3868 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3869 false, GSI_CONTINUE_LINKING);
3870 stmt = gimple_build_assign (tem, t);
3871 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3876 if (!broken_loop)
3878 /* Code to control the increment and predicate for the sequential
3879 loop goes in the CONT_BB. */
3880 gsi = gsi_last_bb (cont_bb);
3881 stmt = gsi_stmt (gsi);
3882 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3883 vmain = gimple_omp_continue_control_use (stmt);
3884 vback = gimple_omp_continue_control_def (stmt);
3886 if (POINTER_TYPE_P (type))
3887 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3888 fold_convert (sizetype, fd->loop.step));
3889 else
3890 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3891 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3892 true, GSI_SAME_STMT);
3893 stmt = gimple_build_assign (vback, t);
3894 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3896 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3897 stmt = gimple_build_cond_empty (t);
3898 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3900 /* Remove GIMPLE_OMP_CONTINUE. */
3901 gsi_remove (&gsi, true);
3903 if (fd->collapse > 1)
3905 basic_block last_bb, bb;
3907 last_bb = cont_bb;
3908 for (i = fd->collapse - 1; i >= 0; i--)
3910 tree vtype = TREE_TYPE (fd->loops[i].v);
3912 bb = create_empty_bb (last_bb);
3913 gsi = gsi_start_bb (bb);
3915 if (i < fd->collapse - 1)
3917 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3918 e->probability = REG_BR_PROB_BASE / 8;
3920 t = fd->loops[i + 1].n1;
3921 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3924 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3926 else
3927 collapse_bb = bb;
3929 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3931 if (POINTER_TYPE_P (vtype))
3932 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3933 fd->loops[i].v,
3934 fold_convert (sizetype, fd->loops[i].step));
3935 else
3936 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3937 fd->loops[i].step);
3938 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3939 false, GSI_CONTINUE_LINKING);
3940 stmt = gimple_build_assign (fd->loops[i].v, t);
3941 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3943 if (i > 0)
3945 t = fd->loops[i].n2;
3946 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3947 false, GSI_CONTINUE_LINKING);
3948 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3949 fd->loops[i].v, t);
3950 stmt = gimple_build_cond_empty (t);
3951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3952 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3953 e->probability = REG_BR_PROB_BASE * 7 / 8;
3955 else
3956 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3957 last_bb = bb;
3961 /* Emit code to get the next parallel iteration in L2_BB. */
3962 gsi = gsi_start_bb (l2_bb);
3964 t = build_call_expr (built_in_decls[next_fn], 2,
3965 build_fold_addr_expr (istart0),
3966 build_fold_addr_expr (iend0));
3967 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3968 false, GSI_CONTINUE_LINKING);
3969 if (TREE_TYPE (t) != boolean_type_node)
3970 t = fold_build2 (NE_EXPR, boolean_type_node,
3971 t, build_int_cst (TREE_TYPE (t), 0));
3972 stmt = gimple_build_cond_empty (t);
3973 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3976 /* Add the loop cleanup function. */
3977 gsi = gsi_last_bb (exit_bb);
3978 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3979 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3980 else
3981 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3982 stmt = gimple_build_call (t, 0);
3983 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3984 gsi_remove (&gsi, true);
3986 /* Connect the new blocks. */
3987 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3988 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3990 if (!broken_loop)
3992 gimple_seq phis;
3994 e = find_edge (cont_bb, l3_bb);
3995 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3997 phis = phi_nodes (l3_bb);
3998 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4000 gimple phi = gsi_stmt (gsi);
4001 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4002 PHI_ARG_DEF_FROM_EDGE (phi, e));
4004 remove_edge (e);
4006 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4007 if (fd->collapse > 1)
4009 e = find_edge (cont_bb, l1_bb);
4010 remove_edge (e);
4011 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4013 else
4015 e = find_edge (cont_bb, l1_bb);
4016 e->flags = EDGE_TRUE_VALUE;
4018 e->probability = REG_BR_PROB_BASE * 7 / 8;
4019 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4020 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4022 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4023 recompute_dominator (CDI_DOMINATORS, l2_bb));
4024 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4025 recompute_dominator (CDI_DOMINATORS, l3_bb));
4026 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4027 recompute_dominator (CDI_DOMINATORS, l0_bb));
4028 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4029 recompute_dominator (CDI_DOMINATORS, l1_bb));
4034 /* A subroutine of expand_omp_for. Generate code for a parallel
4035 loop with static schedule and no specified chunk size. Given
4036 parameters:
4038 for (V = N1; V cond N2; V += STEP) BODY;
4040 where COND is "<" or ">", we generate pseudocode
4042 if (cond is <)
4043 adj = STEP - 1;
4044 else
4045 adj = STEP + 1;
4046 if ((__typeof (V)) -1 > 0 && cond is >)
4047 n = -(adj + N2 - N1) / -STEP;
4048 else
4049 n = (adj + N2 - N1) / STEP;
4050 q = n / nthreads;
4051 q += (q * nthreads != n);
4052 s0 = q * threadid;
4053 e0 = min(s0 + q, n);
4054 V = s0 * STEP + N1;
4055 if (s0 >= e0) goto L2; else goto L0;
4057 e = e0 * STEP + N1;
4059 BODY;
4060 V += STEP;
4061 if (V cond e) goto L1;
4065 static void
4066 expand_omp_for_static_nochunk (struct omp_region *region,
4067 struct omp_for_data *fd)
4069 tree n, q, s0, e0, e, t, nthreads, threadid;
4070 tree type, itype, vmain, vback;
4071 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4072 basic_block fin_bb;
4073 gimple_stmt_iterator gsi;
4074 gimple stmt;
4076 itype = type = TREE_TYPE (fd->loop.v);
4077 if (POINTER_TYPE_P (type))
4078 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4080 entry_bb = region->entry;
4081 cont_bb = region->cont;
4082 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4083 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4084 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4085 body_bb = single_succ (seq_start_bb);
4086 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4087 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4088 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4089 exit_bb = region->exit;
4091 /* Iteration space partitioning goes in ENTRY_BB. */
4092 gsi = gsi_last_bb (entry_bb);
4093 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4095 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4096 t = fold_convert (itype, t);
4097 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4098 true, GSI_SAME_STMT);
4100 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4101 t = fold_convert (itype, t);
4102 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4103 true, GSI_SAME_STMT);
4105 fd->loop.n1
4106 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4107 true, NULL_TREE, true, GSI_SAME_STMT);
4108 fd->loop.n2
4109 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4110 true, NULL_TREE, true, GSI_SAME_STMT);
4111 fd->loop.step
4112 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4113 true, NULL_TREE, true, GSI_SAME_STMT);
4115 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4116 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4117 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4118 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4119 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4120 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4121 fold_build1 (NEGATE_EXPR, itype, t),
4122 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4123 else
4124 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4125 t = fold_convert (itype, t);
4126 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4128 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4129 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4131 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4132 t = fold_build2 (NE_EXPR, itype, t, n);
4133 t = fold_build2 (PLUS_EXPR, itype, q, t);
4134 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4136 t = build2 (MULT_EXPR, itype, q, threadid);
4137 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4139 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4140 t = fold_build2 (MIN_EXPR, itype, t, n);
4141 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4143 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4144 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4146 /* Remove the GIMPLE_OMP_FOR statement. */
4147 gsi_remove (&gsi, true);
4149 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4150 gsi = gsi_start_bb (seq_start_bb);
4152 t = fold_convert (itype, s0);
4153 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4154 if (POINTER_TYPE_P (type))
4155 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4156 fold_convert (sizetype, t));
4157 else
4158 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4159 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4160 false, GSI_CONTINUE_LINKING);
4161 stmt = gimple_build_assign (fd->loop.v, t);
4162 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4164 t = fold_convert (itype, e0);
4165 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4166 if (POINTER_TYPE_P (type))
4167 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4168 fold_convert (sizetype, t));
4169 else
4170 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4171 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4172 false, GSI_CONTINUE_LINKING);
4174 /* The code controlling the sequential loop replaces the
4175 GIMPLE_OMP_CONTINUE. */
4176 gsi = gsi_last_bb (cont_bb);
4177 stmt = gsi_stmt (gsi);
4178 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4179 vmain = gimple_omp_continue_control_use (stmt);
4180 vback = gimple_omp_continue_control_def (stmt);
4182 if (POINTER_TYPE_P (type))
4183 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4184 fold_convert (sizetype, fd->loop.step));
4185 else
4186 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4187 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4188 true, GSI_SAME_STMT);
4189 stmt = gimple_build_assign (vback, t);
4190 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4192 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4193 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4195 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4196 gsi_remove (&gsi, true);
4198 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4199 gsi = gsi_last_bb (exit_bb);
4200 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4201 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4202 false, GSI_SAME_STMT);
4203 gsi_remove (&gsi, true);
4205 /* Connect all the blocks. */
4206 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4207 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4209 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4210 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4212 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4213 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4214 recompute_dominator (CDI_DOMINATORS, body_bb));
4215 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4216 recompute_dominator (CDI_DOMINATORS, fin_bb));
4220 /* A subroutine of expand_omp_for. Generate code for a parallel
4221 loop with static schedule and a specified chunk size. Given
4222 parameters:
4224 for (V = N1; V cond N2; V += STEP) BODY;
4226 where COND is "<" or ">", we generate pseudocode
4228 if (cond is <)
4229 adj = STEP - 1;
4230 else
4231 adj = STEP + 1;
4232 if ((__typeof (V)) -1 > 0 && cond is >)
4233 n = -(adj + N2 - N1) / -STEP;
4234 else
4235 n = (adj + N2 - N1) / STEP;
4236 trip = 0;
4237 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4238 here so that V is defined
4239 if the loop is not entered
4241 s0 = (trip * nthreads + threadid) * CHUNK;
4242 e0 = min(s0 + CHUNK, n);
4243 if (s0 < n) goto L1; else goto L4;
4245 V = s0 * STEP + N1;
4246 e = e0 * STEP + N1;
4248 BODY;
4249 V += STEP;
4250 if (V cond e) goto L2; else goto L3;
4252 trip += 1;
4253 goto L0;
4257 static void
4258 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4260 tree n, s0, e0, e, t;
4261 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4262 tree type, itype, v_main, v_back, v_extra;
4263 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4264 basic_block trip_update_bb, cont_bb, fin_bb;
4265 gimple_stmt_iterator si;
4266 gimple stmt;
4267 edge se;
4269 itype = type = TREE_TYPE (fd->loop.v);
4270 if (POINTER_TYPE_P (type))
4271 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4273 entry_bb = region->entry;
4274 se = split_block (entry_bb, last_stmt (entry_bb));
4275 entry_bb = se->src;
4276 iter_part_bb = se->dest;
4277 cont_bb = region->cont;
4278 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4279 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4280 == FALLTHRU_EDGE (cont_bb)->dest);
4281 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4282 body_bb = single_succ (seq_start_bb);
4283 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4284 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4285 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4286 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4287 exit_bb = region->exit;
4289 /* Trip and adjustment setup goes in ENTRY_BB. */
4290 si = gsi_last_bb (entry_bb);
4291 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4293 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4294 t = fold_convert (itype, t);
4295 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4296 true, GSI_SAME_STMT);
4298 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4299 t = fold_convert (itype, t);
4300 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4301 true, GSI_SAME_STMT);
4303 fd->loop.n1
4304 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4305 true, NULL_TREE, true, GSI_SAME_STMT);
4306 fd->loop.n2
4307 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4308 true, NULL_TREE, true, GSI_SAME_STMT);
4309 fd->loop.step
4310 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4311 true, NULL_TREE, true, GSI_SAME_STMT);
4312 fd->chunk_size
4313 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4314 true, NULL_TREE, true, GSI_SAME_STMT);
4316 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4317 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4318 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4319 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4320 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4321 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4322 fold_build1 (NEGATE_EXPR, itype, t),
4323 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4324 else
4325 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4326 t = fold_convert (itype, t);
4327 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4328 true, GSI_SAME_STMT);
4330 trip_var = create_tmp_var (itype, ".trip");
4331 if (gimple_in_ssa_p (cfun))
4333 add_referenced_var (trip_var);
4334 trip_init = make_ssa_name (trip_var, NULL);
4335 trip_main = make_ssa_name (trip_var, NULL);
4336 trip_back = make_ssa_name (trip_var, NULL);
4338 else
4340 trip_init = trip_var;
4341 trip_main = trip_var;
4342 trip_back = trip_var;
4345 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4346 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4348 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4349 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4350 if (POINTER_TYPE_P (type))
4351 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4352 fold_convert (sizetype, t));
4353 else
4354 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4355 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4356 true, GSI_SAME_STMT);
4358 /* Remove the GIMPLE_OMP_FOR. */
4359 gsi_remove (&si, true);
4361 /* Iteration space partitioning goes in ITER_PART_BB. */
4362 si = gsi_last_bb (iter_part_bb);
4364 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4365 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4366 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4367 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4368 false, GSI_CONTINUE_LINKING);
4370 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4371 t = fold_build2 (MIN_EXPR, itype, t, n);
4372 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4373 false, GSI_CONTINUE_LINKING);
4375 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4376 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4378 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4379 si = gsi_start_bb (seq_start_bb);
4381 t = fold_convert (itype, s0);
4382 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4383 if (POINTER_TYPE_P (type))
4384 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4385 fold_convert (sizetype, t));
4386 else
4387 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4388 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4389 false, GSI_CONTINUE_LINKING);
4390 stmt = gimple_build_assign (fd->loop.v, t);
4391 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4393 t = fold_convert (itype, e0);
4394 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4395 if (POINTER_TYPE_P (type))
4396 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4397 fold_convert (sizetype, t));
4398 else
4399 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4400 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4401 false, GSI_CONTINUE_LINKING);
4403 /* The code controlling the sequential loop goes in CONT_BB,
4404 replacing the GIMPLE_OMP_CONTINUE. */
4405 si = gsi_last_bb (cont_bb);
4406 stmt = gsi_stmt (si);
4407 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4408 v_main = gimple_omp_continue_control_use (stmt);
4409 v_back = gimple_omp_continue_control_def (stmt);
4411 if (POINTER_TYPE_P (type))
4412 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4413 fold_convert (sizetype, fd->loop.step));
4414 else
4415 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4416 stmt = gimple_build_assign (v_back, t);
4417 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4419 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4420 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4422 /* Remove GIMPLE_OMP_CONTINUE. */
4423 gsi_remove (&si, true);
4425 /* Trip update code goes into TRIP_UPDATE_BB. */
4426 si = gsi_start_bb (trip_update_bb);
4428 t = build_int_cst (itype, 1);
4429 t = build2 (PLUS_EXPR, itype, trip_main, t);
4430 stmt = gimple_build_assign (trip_back, t);
4431 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4433 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4434 si = gsi_last_bb (exit_bb);
4435 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4436 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4437 false, GSI_SAME_STMT);
4438 gsi_remove (&si, true);
4440 /* Connect the new blocks. */
4441 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4442 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4444 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4445 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4447 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4449 if (gimple_in_ssa_p (cfun))
4451 gimple_stmt_iterator psi;
4452 gimple phi;
4453 edge re, ene;
4454 edge_var_map_vector head;
4455 edge_var_map *vm;
4456 size_t i;
4458 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4459 remove arguments of the phi nodes in fin_bb. We need to create
4460 appropriate phi nodes in iter_part_bb instead. */
4461 se = single_pred_edge (fin_bb);
4462 re = single_succ_edge (trip_update_bb);
4463 head = redirect_edge_var_map_vector (re);
4464 ene = single_succ_edge (entry_bb);
4466 psi = gsi_start_phis (fin_bb);
4467 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4468 gsi_next (&psi), ++i)
4470 gimple nphi;
4472 phi = gsi_stmt (psi);
4473 t = gimple_phi_result (phi);
4474 gcc_assert (t == redirect_edge_var_map_result (vm));
4475 nphi = create_phi_node (t, iter_part_bb);
4476 SSA_NAME_DEF_STMT (t) = nphi;
4478 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4479 /* A special case -- fd->loop.v is not yet computed in
4480 iter_part_bb, we need to use v_extra instead. */
4481 if (t == fd->loop.v)
4482 t = v_extra;
4483 add_phi_arg (nphi, t, ene);
4484 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4486 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4487 redirect_edge_var_map_clear (re);
4488 while (1)
4490 psi = gsi_start_phis (fin_bb);
4491 if (gsi_end_p (psi))
4492 break;
4493 remove_phi_node (&psi, false);
4496 /* Make phi node for trip. */
4497 phi = create_phi_node (trip_main, iter_part_bb);
4498 SSA_NAME_DEF_STMT (trip_main) = phi;
4499 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4500 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4503 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4504 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4505 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4506 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4507 recompute_dominator (CDI_DOMINATORS, fin_bb));
4508 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4509 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4510 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4511 recompute_dominator (CDI_DOMINATORS, body_bb));
4515 /* Expand the OpenMP loop defined by REGION. */
4517 static void
4518 expand_omp_for (struct omp_region *region)
4520 struct omp_for_data fd;
4521 struct omp_for_data_loop *loops;
4523 loops
4524 = (struct omp_for_data_loop *)
4525 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4526 * sizeof (struct omp_for_data_loop));
4527 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4528 region->sched_kind = fd.sched_kind;
4530 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4531 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4532 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4533 if (region->cont)
4535 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4536 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4537 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4540 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4541 && !fd.have_ordered
4542 && fd.collapse == 1
4543 && region->cont != NULL)
4545 if (fd.chunk_size == NULL)
4546 expand_omp_for_static_nochunk (region, &fd);
4547 else
4548 expand_omp_for_static_chunk (region, &fd);
4550 else
4552 int fn_index, start_ix, next_ix;
4554 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4555 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4556 ? 3 : fd.sched_kind;
4557 fn_index += fd.have_ordered * 4;
4558 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4559 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4560 if (fd.iter_type == long_long_unsigned_type_node)
4562 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4563 - BUILT_IN_GOMP_LOOP_STATIC_START;
4564 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4565 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4567 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4568 (enum built_in_function) next_ix);
4571 update_ssa (TODO_update_ssa_only_virtuals);
4575 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4577 v = GOMP_sections_start (n);
4579 switch (v)
4581 case 0:
4582 goto L2;
4583 case 1:
4584 section 1;
4585 goto L1;
4586 case 2:
4588 case n:
4590 default:
4591 abort ();
4594 v = GOMP_sections_next ();
4595 goto L0;
4597 reduction;
4599 If this is a combined parallel sections, replace the call to
4600 GOMP_sections_start with call to GOMP_sections_next. */
4602 static void
4603 expand_omp_sections (struct omp_region *region)
4605 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4606 VEC (tree,heap) *label_vec;
4607 unsigned len;
4608 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4609 gimple_stmt_iterator si, switch_si;
4610 gimple sections_stmt, stmt, cont;
4611 edge_iterator ei;
4612 edge e;
4613 struct omp_region *inner;
4614 unsigned i, casei;
4615 bool exit_reachable = region->cont != NULL;
4617 gcc_assert (exit_reachable == (region->exit != NULL));
4618 entry_bb = region->entry;
4619 l0_bb = single_succ (entry_bb);
4620 l1_bb = region->cont;
4621 l2_bb = region->exit;
4622 if (exit_reachable)
4624 if (single_pred (l2_bb) == l0_bb)
4625 l2 = gimple_block_label (l2_bb);
4626 else
4628 /* This can happen if there are reductions. */
4629 len = EDGE_COUNT (l0_bb->succs);
4630 gcc_assert (len > 0);
4631 e = EDGE_SUCC (l0_bb, len - 1);
4632 si = gsi_last_bb (e->dest);
4633 l2 = NULL_TREE;
4634 if (gsi_end_p (si)
4635 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4636 l2 = gimple_block_label (e->dest);
4637 else
4638 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4640 si = gsi_last_bb (e->dest);
4641 if (gsi_end_p (si)
4642 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4644 l2 = gimple_block_label (e->dest);
4645 break;
4649 default_bb = create_empty_bb (l1_bb->prev_bb);
4650 l1 = gimple_block_label (l1_bb);
4652 else
4654 default_bb = create_empty_bb (l0_bb);
4655 l1 = NULL_TREE;
4656 l2 = gimple_block_label (default_bb);
4659 /* We will build a switch() with enough cases for all the
4660 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4661 and a default case to abort if something goes wrong. */
4662 len = EDGE_COUNT (l0_bb->succs);
4664 /* Use VEC_quick_push on label_vec throughout, since we know the size
4665 in advance. */
4666 label_vec = VEC_alloc (tree, heap, len);
4668 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4669 GIMPLE_OMP_SECTIONS statement. */
4670 si = gsi_last_bb (entry_bb);
4671 sections_stmt = gsi_stmt (si);
4672 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4673 vin = gimple_omp_sections_control (sections_stmt);
4674 if (!is_combined_parallel (region))
4676 /* If we are not inside a combined parallel+sections region,
4677 call GOMP_sections_start. */
4678 t = build_int_cst (unsigned_type_node,
4679 exit_reachable ? len - 1 : len);
4680 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4681 stmt = gimple_build_call (u, 1, t);
4683 else
4685 /* Otherwise, call GOMP_sections_next. */
4686 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4687 stmt = gimple_build_call (u, 0);
4689 gimple_call_set_lhs (stmt, vin);
4690 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4691 gsi_remove (&si, true);
4693 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4694 L0_BB. */
4695 switch_si = gsi_last_bb (l0_bb);
4696 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4697 if (exit_reachable)
4699 cont = last_stmt (l1_bb);
4700 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4701 vmain = gimple_omp_continue_control_use (cont);
4702 vnext = gimple_omp_continue_control_def (cont);
4704 else
4706 vmain = vin;
4707 vnext = NULL_TREE;
4710 i = 0;
4711 if (exit_reachable)
4713 t = build3 (CASE_LABEL_EXPR, void_type_node,
4714 build_int_cst (unsigned_type_node, 0), NULL, l2);
4715 VEC_quick_push (tree, label_vec, t);
4716 i++;
4719 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4720 for (inner = region->inner, casei = 1;
4721 inner;
4722 inner = inner->next, i++, casei++)
4724 basic_block s_entry_bb, s_exit_bb;
4726 /* Skip optional reduction region. */
4727 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4729 --i;
4730 --casei;
4731 continue;
4734 s_entry_bb = inner->entry;
4735 s_exit_bb = inner->exit;
4737 t = gimple_block_label (s_entry_bb);
4738 u = build_int_cst (unsigned_type_node, casei);
4739 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4740 VEC_quick_push (tree, label_vec, u);
4742 si = gsi_last_bb (s_entry_bb);
4743 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4744 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4745 gsi_remove (&si, true);
4746 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4748 if (s_exit_bb == NULL)
4749 continue;
4751 si = gsi_last_bb (s_exit_bb);
4752 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4753 gsi_remove (&si, true);
4755 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4758 /* Error handling code goes in DEFAULT_BB. */
4759 t = gimple_block_label (default_bb);
4760 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4761 make_edge (l0_bb, default_bb, 0);
4763 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4764 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4765 gsi_remove (&switch_si, true);
4766 VEC_free (tree, heap, label_vec);
4768 si = gsi_start_bb (default_bb);
4769 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4770 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4772 if (exit_reachable)
4774 /* Code to get the next section goes in L1_BB. */
4775 si = gsi_last_bb (l1_bb);
4776 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4778 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4779 gimple_call_set_lhs (stmt, vnext);
4780 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4781 gsi_remove (&si, true);
4783 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4785 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4786 si = gsi_last_bb (l2_bb);
4787 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4788 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4789 else
4790 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4791 stmt = gimple_build_call (t, 0);
4792 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4793 gsi_remove (&si, true);
4796 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4800 /* Expand code for an OpenMP single directive. We've already expanded
4801 much of the code, here we simply place the GOMP_barrier call. */
4803 static void
4804 expand_omp_single (struct omp_region *region)
4806 basic_block entry_bb, exit_bb;
4807 gimple_stmt_iterator si;
4808 bool need_barrier = false;
4810 entry_bb = region->entry;
4811 exit_bb = region->exit;
4813 si = gsi_last_bb (entry_bb);
4814 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4815 be removed. We need to ensure that the thread that entered the single
4816 does not exit before the data is copied out by the other threads. */
4817 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4818 OMP_CLAUSE_COPYPRIVATE))
4819 need_barrier = true;
4820 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4821 gsi_remove (&si, true);
4822 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4824 si = gsi_last_bb (exit_bb);
4825 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4826 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4827 false, GSI_SAME_STMT);
4828 gsi_remove (&si, true);
4829 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4833 /* Generic expansion for OpenMP synchronization directives: master,
4834 ordered and critical. All we need to do here is remove the entry
4835 and exit markers for REGION. */
4837 static void
4838 expand_omp_synch (struct omp_region *region)
4840 basic_block entry_bb, exit_bb;
4841 gimple_stmt_iterator si;
4843 entry_bb = region->entry;
4844 exit_bb = region->exit;
4846 si = gsi_last_bb (entry_bb);
4847 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4848 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4849 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4850 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4851 gsi_remove (&si, true);
4852 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4854 if (exit_bb)
4856 si = gsi_last_bb (exit_bb);
4857 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4858 gsi_remove (&si, true);
4859 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4863 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4864 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4865 size of the data type, and thus usable to find the index of the builtin
4866 decl. Returns false if the expression is not of the proper form. */
4868 static bool
4869 expand_omp_atomic_fetch_op (basic_block load_bb,
4870 tree addr, tree loaded_val,
4871 tree stored_val, int index)
4873 enum built_in_function base;
4874 tree decl, itype, call;
4875 enum insn_code *optab;
4876 tree rhs;
4877 basic_block store_bb = single_succ (load_bb);
4878 gimple_stmt_iterator gsi;
4879 gimple stmt;
4881 /* We expect to find the following sequences:
4883 load_bb:
4884 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4886 store_bb:
4887 val = tmp OP something; (or: something OP tmp)
4888 GIMPLE_OMP_STORE (val)
4890 ???FIXME: Allow a more flexible sequence.
4891 Perhaps use data flow to pick the statements.
4895 gsi = gsi_after_labels (store_bb);
4896 stmt = gsi_stmt (gsi);
4897 if (!is_gimple_assign (stmt))
4898 return false;
4899 gsi_next (&gsi);
4900 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4901 return false;
4903 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4904 return false;
4906 /* Check for one of the supported fetch-op operations. */
4907 switch (gimple_assign_rhs_code (stmt))
4909 case PLUS_EXPR:
4910 case POINTER_PLUS_EXPR:
4911 base = BUILT_IN_FETCH_AND_ADD_N;
4912 optab = sync_add_optab;
4913 break;
4914 case MINUS_EXPR:
4915 base = BUILT_IN_FETCH_AND_SUB_N;
4916 optab = sync_add_optab;
4917 break;
4918 case BIT_AND_EXPR:
4919 base = BUILT_IN_FETCH_AND_AND_N;
4920 optab = sync_and_optab;
4921 break;
4922 case BIT_IOR_EXPR:
4923 base = BUILT_IN_FETCH_AND_OR_N;
4924 optab = sync_ior_optab;
4925 break;
4926 case BIT_XOR_EXPR:
4927 base = BUILT_IN_FETCH_AND_XOR_N;
4928 optab = sync_xor_optab;
4929 break;
4930 default:
4931 return false;
4933 /* Make sure the expression is of the proper form. */
4934 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4935 rhs = gimple_assign_rhs2 (stmt);
4936 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4937 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4938 rhs = gimple_assign_rhs1 (stmt);
4939 else
4940 return false;
4942 decl = built_in_decls[base + index + 1];
4943 itype = TREE_TYPE (TREE_TYPE (decl));
4945 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4946 return false;
4948 gsi = gsi_last_bb (load_bb);
4949 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4950 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4951 call = fold_convert (void_type_node, call);
4952 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4953 gsi_remove (&gsi, true);
4955 gsi = gsi_last_bb (store_bb);
4956 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4957 gsi_remove (&gsi, true);
4958 gsi = gsi_last_bb (store_bb);
4959 gsi_remove (&gsi, true);
4961 if (gimple_in_ssa_p (cfun))
4962 update_ssa (TODO_update_ssa_no_phi);
4964 return true;
4967 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4969 oldval = *addr;
4970 repeat:
4971 newval = rhs; // with oldval replacing *addr in rhs
4972 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4973 if (oldval != newval)
4974 goto repeat;
4976 INDEX is log2 of the size of the data type, and thus usable to find the
4977 index of the builtin decl. */
4979 static bool
4980 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4981 tree addr, tree loaded_val, tree stored_val,
4982 int index)
4984 tree loadedi, storedi, initial, new_storedi, old_vali;
4985 tree type, itype, cmpxchg, iaddr;
4986 gimple_stmt_iterator si;
4987 basic_block loop_header = single_succ (load_bb);
4988 gimple phi, stmt;
4989 edge e;
4991 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4992 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4993 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4995 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4996 return false;
4998 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4999 si = gsi_last_bb (load_bb);
5000 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5002 /* For floating-point values, we'll need to view-convert them to integers
5003 so that we can perform the atomic compare and swap. Simplify the
5004 following code by always setting up the "i"ntegral variables. */
5005 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5007 tree iaddr_val;
5009 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5010 iaddr_val
5011 = force_gimple_operand_gsi (&si,
5012 fold_convert (TREE_TYPE (iaddr), addr),
5013 false, NULL_TREE, true, GSI_SAME_STMT);
5014 stmt = gimple_build_assign (iaddr, iaddr_val);
5015 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5016 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5017 loadedi = create_tmp_var (itype, NULL);
5018 if (gimple_in_ssa_p (cfun))
5020 add_referenced_var (iaddr);
5021 add_referenced_var (loadedi);
5022 loadedi = make_ssa_name (loadedi, NULL);
5025 else
5027 iaddr = addr;
5028 loadedi = loaded_val;
5031 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5032 true, NULL_TREE, true, GSI_SAME_STMT);
5034 /* Move the value to the LOADEDI temporary. */
5035 if (gimple_in_ssa_p (cfun))
5037 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5038 phi = create_phi_node (loadedi, loop_header);
5039 SSA_NAME_DEF_STMT (loadedi) = phi;
5040 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5041 initial);
5043 else
5044 gsi_insert_before (&si,
5045 gimple_build_assign (loadedi, initial),
5046 GSI_SAME_STMT);
5047 if (loadedi != loaded_val)
5049 gimple_stmt_iterator gsi2;
5050 tree x;
5052 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5053 gsi2 = gsi_start_bb (loop_header);
5054 if (gimple_in_ssa_p (cfun))
5056 gimple stmt;
5057 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5058 true, GSI_SAME_STMT);
5059 stmt = gimple_build_assign (loaded_val, x);
5060 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5062 else
5064 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5065 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5066 true, GSI_SAME_STMT);
5069 gsi_remove (&si, true);
5071 si = gsi_last_bb (store_bb);
5072 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5074 if (iaddr == addr)
5075 storedi = stored_val;
5076 else
5077 storedi =
5078 force_gimple_operand_gsi (&si,
5079 build1 (VIEW_CONVERT_EXPR, itype,
5080 stored_val), true, NULL_TREE, true,
5081 GSI_SAME_STMT);
5083 /* Build the compare&swap statement. */
5084 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5085 new_storedi = force_gimple_operand_gsi (&si,
5086 fold_convert (TREE_TYPE (loadedi),
5087 new_storedi),
5088 true, NULL_TREE,
5089 true, GSI_SAME_STMT);
5091 if (gimple_in_ssa_p (cfun))
5092 old_vali = loadedi;
5093 else
5095 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5096 if (gimple_in_ssa_p (cfun))
5097 add_referenced_var (old_vali);
5098 stmt = gimple_build_assign (old_vali, loadedi);
5099 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5101 stmt = gimple_build_assign (loadedi, new_storedi);
5102 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5105 /* Note that we always perform the comparison as an integer, even for
5106 floating point. This allows the atomic operation to properly
5107 succeed even with NaNs and -0.0. */
5108 stmt = gimple_build_cond_empty
5109 (build2 (NE_EXPR, boolean_type_node,
5110 new_storedi, old_vali));
5111 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5113 /* Update cfg. */
5114 e = single_succ_edge (store_bb);
5115 e->flags &= ~EDGE_FALLTHRU;
5116 e->flags |= EDGE_FALSE_VALUE;
5118 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5120 /* Copy the new value to loadedi (we already did that before the condition
5121 if we are not in SSA). */
5122 if (gimple_in_ssa_p (cfun))
5124 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5125 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5128 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5129 gsi_remove (&si, true);
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_no_phi);
5134 return true;
5137 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5139 GOMP_atomic_start ();
5140 *addr = rhs;
5141 GOMP_atomic_end ();
5143 The result is not globally atomic, but works so long as all parallel
5144 references are within #pragma omp atomic directives. According to
5145 responses received from omp@openmp.org, appears to be within spec.
5146 Which makes sense, since that's how several other compilers handle
5147 this situation as well.
5148 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5149 expanding. STORED_VAL is the operand of the matching
5150 GIMPLE_OMP_ATOMIC_STORE.
5152 We replace
5153 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5154 loaded_val = *addr;
5156 and replace
5157 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5158 *addr = stored_val;
5161 static bool
5162 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5163 tree addr, tree loaded_val, tree stored_val)
5165 gimple_stmt_iterator si;
5166 gimple stmt;
5167 tree t;
5169 si = gsi_last_bb (load_bb);
5170 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5172 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5173 t = build_function_call_expr (t, 0);
5174 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5176 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5177 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5178 gsi_remove (&si, true);
5180 si = gsi_last_bb (store_bb);
5181 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5183 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5184 stored_val);
5185 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5187 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5188 t = build_function_call_expr (t, 0);
5189 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5190 gsi_remove (&si, true);
5192 if (gimple_in_ssa_p (cfun))
5193 update_ssa (TODO_update_ssa_no_phi);
5194 return true;
5197 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5198 using expand_omp_atomic_fetch_op. If it failed, we try to
5199 call expand_omp_atomic_pipeline, and if it fails too, the
5200 ultimate fallback is wrapping the operation in a mutex
5201 (expand_omp_atomic_mutex). REGION is the atomic region built
5202 by build_omp_regions_1(). */
5204 static void
5205 expand_omp_atomic (struct omp_region *region)
5207 basic_block load_bb = region->entry, store_bb = region->exit;
5208 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5209 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5210 tree addr = gimple_omp_atomic_load_rhs (load);
5211 tree stored_val = gimple_omp_atomic_store_val (store);
5212 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5213 HOST_WIDE_INT index;
5215 /* Make sure the type is one of the supported sizes. */
5216 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5217 index = exact_log2 (index);
5218 if (index >= 0 && index <= 4)
5220 unsigned int align = TYPE_ALIGN_UNIT (type);
5222 /* __sync builtins require strict data alignment. */
5223 if (exact_log2 (align) >= index)
5225 /* When possible, use specialized atomic update functions. */
5226 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5227 && store_bb == single_succ (load_bb))
5229 if (expand_omp_atomic_fetch_op (load_bb, addr,
5230 loaded_val, stored_val, index))
5231 return;
5234 /* If we don't have specialized __sync builtins, try and implement
5235 as a compare and swap loop. */
5236 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5237 loaded_val, stored_val, index))
5238 return;
5242 /* The ultimate fallback is wrapping the operation in a mutex. */
5243 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5247 /* Expand the parallel region tree rooted at REGION. Expansion
5248 proceeds in depth-first order. Innermost regions are expanded
5249 first. This way, parallel regions that require a new function to
5250 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5251 internal dependencies in their body. */
5253 static void
5254 expand_omp (struct omp_region *region)
5256 while (region)
5258 location_t saved_location;
5260 /* First, determine whether this is a combined parallel+workshare
5261 region. */
5262 if (region->type == GIMPLE_OMP_PARALLEL)
5263 determine_parallel_type (region);
5265 if (region->inner)
5266 expand_omp (region->inner);
5268 saved_location = input_location;
5269 if (gimple_has_location (last_stmt (region->entry)))
5270 input_location = gimple_location (last_stmt (region->entry));
5272 switch (region->type)
5274 case GIMPLE_OMP_PARALLEL:
5275 case GIMPLE_OMP_TASK:
5276 expand_omp_taskreg (region);
5277 break;
5279 case GIMPLE_OMP_FOR:
5280 expand_omp_for (region);
5281 break;
5283 case GIMPLE_OMP_SECTIONS:
5284 expand_omp_sections (region);
5285 break;
5287 case GIMPLE_OMP_SECTION:
5288 /* Individual omp sections are handled together with their
5289 parent GIMPLE_OMP_SECTIONS region. */
5290 break;
5292 case GIMPLE_OMP_SINGLE:
5293 expand_omp_single (region);
5294 break;
5296 case GIMPLE_OMP_MASTER:
5297 case GIMPLE_OMP_ORDERED:
5298 case GIMPLE_OMP_CRITICAL:
5299 expand_omp_synch (region);
5300 break;
5302 case GIMPLE_OMP_ATOMIC_LOAD:
5303 expand_omp_atomic (region);
5304 break;
5306 default:
5307 gcc_unreachable ();
5310 input_location = saved_location;
5311 region = region->next;
5316 /* Helper for build_omp_regions. Scan the dominator tree starting at
5317 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5318 true, the function ends once a single tree is built (otherwise, whole
5319 forest of OMP constructs may be built). */
5321 static void
5322 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5323 bool single_tree)
5325 gimple_stmt_iterator gsi;
5326 gimple stmt;
5327 basic_block son;
5329 gsi = gsi_last_bb (bb);
5330 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5332 struct omp_region *region;
5333 enum gimple_code code;
5335 stmt = gsi_stmt (gsi);
5336 code = gimple_code (stmt);
5337 if (code == GIMPLE_OMP_RETURN)
5339 /* STMT is the return point out of region PARENT. Mark it
5340 as the exit point and make PARENT the immediately
5341 enclosing region. */
5342 gcc_assert (parent);
5343 region = parent;
5344 region->exit = bb;
5345 parent = parent->outer;
5347 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5349 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5350 GIMPLE_OMP_RETURN, but matches with
5351 GIMPLE_OMP_ATOMIC_LOAD. */
5352 gcc_assert (parent);
5353 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5354 region = parent;
5355 region->exit = bb;
5356 parent = parent->outer;
5359 else if (code == GIMPLE_OMP_CONTINUE)
5361 gcc_assert (parent);
5362 parent->cont = bb;
5364 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5366 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5367 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5370 else
5372 /* Otherwise, this directive becomes the parent for a new
5373 region. */
5374 region = new_omp_region (bb, code, parent);
5375 parent = region;
5379 if (single_tree && !parent)
5380 return;
5382 for (son = first_dom_son (CDI_DOMINATORS, bb);
5383 son;
5384 son = next_dom_son (CDI_DOMINATORS, son))
5385 build_omp_regions_1 (son, parent, single_tree);
5388 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5389 root_omp_region. */
5391 static void
5392 build_omp_regions_root (basic_block root)
5394 gcc_assert (root_omp_region == NULL);
5395 build_omp_regions_1 (root, NULL, true);
5396 gcc_assert (root_omp_region != NULL);
5399 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5401 void
5402 omp_expand_local (basic_block head)
5404 build_omp_regions_root (head);
5405 if (dump_file && (dump_flags & TDF_DETAILS))
5407 fprintf (dump_file, "\nOMP region tree\n\n");
5408 dump_omp_region (dump_file, root_omp_region, 0);
5409 fprintf (dump_file, "\n");
5412 remove_exit_barriers (root_omp_region);
5413 expand_omp (root_omp_region);
5415 free_omp_regions ();
5418 /* Scan the CFG and build a tree of OMP regions. Return the root of
5419 the OMP region tree. */
5421 static void
5422 build_omp_regions (void)
5424 gcc_assert (root_omp_region == NULL);
5425 calculate_dominance_info (CDI_DOMINATORS);
5426 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5429 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5431 static unsigned int
5432 execute_expand_omp (void)
5434 build_omp_regions ();
5436 if (!root_omp_region)
5437 return 0;
5439 if (dump_file)
5441 fprintf (dump_file, "\nOMP region tree\n\n");
5442 dump_omp_region (dump_file, root_omp_region, 0);
5443 fprintf (dump_file, "\n");
5446 remove_exit_barriers (root_omp_region);
5448 expand_omp (root_omp_region);
5450 cleanup_tree_cfg ();
5452 free_omp_regions ();
5454 return 0;
5457 /* OMP expansion -- the default pass, run before creation of SSA form. */
5459 static bool
5460 gate_expand_omp (void)
5462 return (flag_openmp != 0 && errorcount == 0);
5465 struct gimple_opt_pass pass_expand_omp =
5468 GIMPLE_PASS,
5469 "ompexp", /* name */
5470 gate_expand_omp, /* gate */
5471 execute_expand_omp, /* execute */
5472 NULL, /* sub */
5473 NULL, /* next */
5474 0, /* static_pass_number */
5475 TV_NONE, /* tv_id */
5476 PROP_gimple_any, /* properties_required */
5477 0, /* properties_provided */
5478 0, /* properties_destroyed */
5479 0, /* todo_flags_start */
5480 TODO_dump_func /* todo_flags_finish */
5484 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5486 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5487 CTX is the enclosing OMP context for the current statement. */
5489 static void
5490 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5492 tree block, control;
5493 gimple_stmt_iterator tgsi;
5494 unsigned i, len;
5495 gimple stmt, new_stmt, bind, t;
5496 gimple_seq ilist, dlist, olist, new_body, body;
5497 struct gimplify_ctx gctx;
5499 stmt = gsi_stmt (*gsi_p);
5501 push_gimplify_context (&gctx);
5503 dlist = NULL;
5504 ilist = NULL;
5505 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5506 &ilist, &dlist, ctx);
5508 tgsi = gsi_start (gimple_omp_body (stmt));
5509 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5510 continue;
5512 tgsi = gsi_start (gimple_omp_body (stmt));
5513 body = NULL;
5514 for (i = 0; i < len; i++, gsi_next (&tgsi))
5516 omp_context *sctx;
5517 gimple sec_start;
5519 sec_start = gsi_stmt (tgsi);
5520 sctx = maybe_lookup_ctx (sec_start);
5521 gcc_assert (sctx);
5523 gimple_seq_add_stmt (&body, sec_start);
5525 lower_omp (gimple_omp_body (sec_start), sctx);
5526 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5527 gimple_omp_set_body (sec_start, NULL);
5529 if (i == len - 1)
5531 gimple_seq l = NULL;
5532 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5533 &l, ctx);
5534 gimple_seq_add_seq (&body, l);
5535 gimple_omp_section_set_last (sec_start);
5538 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5541 block = make_node (BLOCK);
5542 bind = gimple_build_bind (NULL, body, block);
5544 olist = NULL;
5545 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5547 block = make_node (BLOCK);
5548 new_stmt = gimple_build_bind (NULL, NULL, block);
5550 pop_gimplify_context (new_stmt);
5551 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5552 BLOCK_VARS (block) = gimple_bind_vars (bind);
5553 if (BLOCK_VARS (block))
5554 TREE_USED (block) = 1;
5556 new_body = NULL;
5557 gimple_seq_add_seq (&new_body, ilist);
5558 gimple_seq_add_stmt (&new_body, stmt);
5559 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5560 gimple_seq_add_stmt (&new_body, bind);
5562 control = create_tmp_var (unsigned_type_node, ".section");
5563 t = gimple_build_omp_continue (control, control);
5564 gimple_omp_sections_set_control (stmt, control);
5565 gimple_seq_add_stmt (&new_body, t);
5567 gimple_seq_add_seq (&new_body, olist);
5568 gimple_seq_add_seq (&new_body, dlist);
5570 new_body = maybe_catch_exception (new_body);
5572 t = gimple_build_omp_return
5573 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5574 OMP_CLAUSE_NOWAIT));
5575 gimple_seq_add_stmt (&new_body, t);
5577 gimple_bind_set_body (new_stmt, new_body);
5578 gimple_omp_set_body (stmt, NULL);
5580 gsi_replace (gsi_p, new_stmt, true);
5584 /* A subroutine of lower_omp_single. Expand the simple form of
5585 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5587 if (GOMP_single_start ())
5588 BODY;
5589 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5591 FIXME. It may be better to delay expanding the logic of this until
5592 pass_expand_omp. The expanded logic may make the job more difficult
5593 to a synchronization analysis pass. */
5595 static void
5596 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5598 tree tlabel = create_artificial_label ();
5599 tree flabel = create_artificial_label ();
5600 gimple call, cond;
5601 tree lhs, decl;
5603 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5604 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5605 call = gimple_build_call (decl, 0);
5606 gimple_call_set_lhs (call, lhs);
5607 gimple_seq_add_stmt (pre_p, call);
5609 cond = gimple_build_cond (EQ_EXPR, lhs,
5610 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5611 tlabel, flabel);
5612 gimple_seq_add_stmt (pre_p, cond);
5613 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5614 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5615 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5619 /* A subroutine of lower_omp_single. Expand the simple form of
5620 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5622 #pragma omp single copyprivate (a, b, c)
5624 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5627 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5629 BODY;
5630 copyout.a = a;
5631 copyout.b = b;
5632 copyout.c = c;
5633 GOMP_single_copy_end (&copyout);
5635 else
5637 a = copyout_p->a;
5638 b = copyout_p->b;
5639 c = copyout_p->c;
5641 GOMP_barrier ();
5644 FIXME. It may be better to delay expanding the logic of this until
5645 pass_expand_omp. The expanded logic may make the job more difficult
5646 to a synchronization analysis pass. */
5648 static void
5649 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5651 tree ptr_type, t, l0, l1, l2;
5652 gimple_seq copyin_seq;
5654 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5656 ptr_type = build_pointer_type (ctx->record_type);
5657 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5659 l0 = create_artificial_label ();
5660 l1 = create_artificial_label ();
5661 l2 = create_artificial_label ();
5663 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5664 t = fold_convert (ptr_type, t);
5665 gimplify_assign (ctx->receiver_decl, t, pre_p);
5667 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5668 build_int_cst (ptr_type, 0));
5669 t = build3 (COND_EXPR, void_type_node, t,
5670 build_and_jump (&l0), build_and_jump (&l1));
5671 gimplify_and_add (t, pre_p);
5673 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5675 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5677 copyin_seq = NULL;
5678 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5679 &copyin_seq, ctx);
5681 t = build_fold_addr_expr (ctx->sender_decl);
5682 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5683 gimplify_and_add (t, pre_p);
5685 t = build_and_jump (&l2);
5686 gimplify_and_add (t, pre_p);
5688 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5690 gimple_seq_add_seq (pre_p, copyin_seq);
5692 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5696 /* Expand code for an OpenMP single directive. */
5698 static void
5699 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5701 tree block;
5702 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5703 gimple_seq bind_body, dlist;
5704 struct gimplify_ctx gctx;
5706 push_gimplify_context (&gctx);
5708 bind_body = NULL;
5709 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5710 &bind_body, &dlist, ctx);
5711 lower_omp (gimple_omp_body (single_stmt), ctx);
5713 gimple_seq_add_stmt (&bind_body, single_stmt);
5715 if (ctx->record_type)
5716 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5717 else
5718 lower_omp_single_simple (single_stmt, &bind_body);
5720 gimple_omp_set_body (single_stmt, NULL);
5722 gimple_seq_add_seq (&bind_body, dlist);
5724 bind_body = maybe_catch_exception (bind_body);
5726 t = gimple_build_omp_return
5727 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5728 OMP_CLAUSE_NOWAIT));
5729 gimple_seq_add_stmt (&bind_body, t);
5731 block = make_node (BLOCK);
5732 bind = gimple_build_bind (NULL, bind_body, block);
5734 pop_gimplify_context (bind);
5736 gimple_bind_append_vars (bind, ctx->block_vars);
5737 BLOCK_VARS (block) = ctx->block_vars;
5738 gsi_replace (gsi_p, bind, true);
5739 if (BLOCK_VARS (block))
5740 TREE_USED (block) = 1;
5744 /* Expand code for an OpenMP master directive. */
5746 static void
5747 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5749 tree block, lab = NULL, x;
5750 gimple stmt = gsi_stmt (*gsi_p), bind;
5751 gimple_seq tseq;
5752 struct gimplify_ctx gctx;
5754 push_gimplify_context (&gctx);
5756 block = make_node (BLOCK);
5757 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5758 block);
5760 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5761 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5762 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5763 tseq = NULL;
5764 gimplify_and_add (x, &tseq);
5765 gimple_bind_add_seq (bind, tseq);
5767 lower_omp (gimple_omp_body (stmt), ctx);
5768 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5769 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5770 gimple_omp_set_body (stmt, NULL);
5772 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5774 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5776 pop_gimplify_context (bind);
5778 gimple_bind_append_vars (bind, ctx->block_vars);
5779 BLOCK_VARS (block) = ctx->block_vars;
5780 gsi_replace (gsi_p, bind, true);
5784 /* Expand code for an OpenMP ordered directive. */
5786 static void
5787 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5789 tree block;
5790 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5791 struct gimplify_ctx gctx;
5793 push_gimplify_context (&gctx);
5795 block = make_node (BLOCK);
5796 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5797 block);
5799 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5800 gimple_bind_add_stmt (bind, x);
5802 lower_omp (gimple_omp_body (stmt), ctx);
5803 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5804 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5805 gimple_omp_set_body (stmt, NULL);
5807 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5808 gimple_bind_add_stmt (bind, x);
5810 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5812 pop_gimplify_context (bind);
5814 gimple_bind_append_vars (bind, ctx->block_vars);
5815 BLOCK_VARS (block) = gimple_bind_vars (bind);
5816 gsi_replace (gsi_p, bind, true);
5820 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5821 substitution of a couple of function calls. But in the NAMED case,
5822 requires that languages coordinate a symbol name. It is therefore
5823 best put here in common code. */
5825 static GTY((param1_is (tree), param2_is (tree)))
5826 splay_tree critical_name_mutexes;
5828 static void
5829 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5831 tree block;
5832 tree name, lock, unlock;
5833 gimple stmt = gsi_stmt (*gsi_p), bind;
5834 gimple_seq tbody;
5835 struct gimplify_ctx gctx;
5837 name = gimple_omp_critical_name (stmt);
5838 if (name)
5840 tree decl;
5841 splay_tree_node n;
5843 if (!critical_name_mutexes)
5844 critical_name_mutexes
5845 = splay_tree_new_ggc (splay_tree_compare_pointers);
5847 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5848 if (n == NULL)
5850 char *new_str;
5852 decl = create_tmp_var_raw (ptr_type_node, NULL);
5854 new_str = ACONCAT ((".gomp_critical_user_",
5855 IDENTIFIER_POINTER (name), NULL));
5856 DECL_NAME (decl) = get_identifier (new_str);
5857 TREE_PUBLIC (decl) = 1;
5858 TREE_STATIC (decl) = 1;
5859 DECL_COMMON (decl) = 1;
5860 DECL_ARTIFICIAL (decl) = 1;
5861 DECL_IGNORED_P (decl) = 1;
5862 varpool_finalize_decl (decl);
5864 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5865 (splay_tree_value) decl);
5867 else
5868 decl = (tree) n->value;
5870 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5871 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5873 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5874 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5876 else
5878 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5879 lock = build_call_expr (lock, 0);
5881 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5882 unlock = build_call_expr (unlock, 0);
5885 push_gimplify_context (&gctx);
5887 block = make_node (BLOCK);
5888 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5890 tbody = gimple_bind_body (bind);
5891 gimplify_and_add (lock, &tbody);
5892 gimple_bind_set_body (bind, tbody);
5894 lower_omp (gimple_omp_body (stmt), ctx);
5895 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5896 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5897 gimple_omp_set_body (stmt, NULL);
5899 tbody = gimple_bind_body (bind);
5900 gimplify_and_add (unlock, &tbody);
5901 gimple_bind_set_body (bind, tbody);
5903 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5905 pop_gimplify_context (bind);
5906 gimple_bind_append_vars (bind, ctx->block_vars);
5907 BLOCK_VARS (block) = gimple_bind_vars (bind);
5908 gsi_replace (gsi_p, bind, true);
5912 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5913 for a lastprivate clause. Given a loop control predicate of (V
5914 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5915 is appended to *DLIST, iterator initialization is appended to
5916 *BODY_P. */
5918 static void
5919 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5920 gimple_seq *dlist, struct omp_context *ctx)
5922 tree clauses, cond, vinit;
5923 enum tree_code cond_code;
5924 gimple_seq stmts;
5926 cond_code = fd->loop.cond_code;
5927 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5929 /* When possible, use a strict equality expression. This can let VRP
5930 type optimizations deduce the value and remove a copy. */
5931 if (host_integerp (fd->loop.step, 0))
5933 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5934 if (step == 1 || step == -1)
5935 cond_code = EQ_EXPR;
5938 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5940 clauses = gimple_omp_for_clauses (fd->for_stmt);
5941 stmts = NULL;
5942 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5943 if (!gimple_seq_empty_p (stmts))
5945 gimple_seq_add_seq (&stmts, *dlist);
5946 *dlist = stmts;
5948 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5949 vinit = fd->loop.n1;
5950 if (cond_code == EQ_EXPR
5951 && host_integerp (fd->loop.n2, 0)
5952 && ! integer_zerop (fd->loop.n2))
5953 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5955 /* Initialize the iterator variable, so that threads that don't execute
5956 any iterations don't execute the lastprivate clauses by accident. */
5957 gimplify_assign (fd->loop.v, vinit, body_p);
5962 /* Lower code for an OpenMP loop directive. */
5964 static void
5965 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5967 tree *rhs_p, block;
5968 struct omp_for_data fd;
5969 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5970 gimple_seq omp_for_body, body, dlist, ilist;
5971 size_t i;
5972 struct gimplify_ctx gctx;
5974 push_gimplify_context (&gctx);
5976 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5977 lower_omp (gimple_omp_body (stmt), ctx);
5979 block = make_node (BLOCK);
5980 new_stmt = gimple_build_bind (NULL, NULL, block);
5982 /* Move declaration of temporaries in the loop body before we make
5983 it go away. */
5984 omp_for_body = gimple_omp_body (stmt);
5985 if (!gimple_seq_empty_p (omp_for_body)
5986 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5988 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5989 gimple_bind_append_vars (new_stmt, vars);
5992 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5993 ilist = NULL;
5994 dlist = NULL;
5995 body = NULL;
5996 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5997 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5999 /* Lower the header expressions. At this point, we can assume that
6000 the header is of the form:
6002 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6004 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6005 using the .omp_data_s mapping, if needed. */
6006 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6008 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6009 if (!is_gimple_min_invariant (*rhs_p))
6010 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6012 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6013 if (!is_gimple_min_invariant (*rhs_p))
6014 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6016 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6017 if (!is_gimple_min_invariant (*rhs_p))
6018 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6021 /* Once lowered, extract the bounds and clauses. */
6022 extract_omp_for_data (stmt, &fd, NULL);
6024 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6026 gimple_seq_add_stmt (&body, stmt);
6027 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6029 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6030 fd.loop.v));
6032 /* After the loop, add exit clauses. */
6033 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6034 gimple_seq_add_seq (&body, dlist);
6036 body = maybe_catch_exception (body);
6038 /* Region exit marker goes at the end of the loop body. */
6039 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6041 pop_gimplify_context (new_stmt);
6043 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6044 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6045 if (BLOCK_VARS (block))
6046 TREE_USED (block) = 1;
6048 gimple_bind_set_body (new_stmt, body);
6049 gimple_omp_set_body (stmt, NULL);
6050 gimple_omp_for_set_pre_body (stmt, NULL);
6051 gsi_replace (gsi_p, new_stmt, true);
6054 /* Callback for walk_stmts. Check if the current statement only contains
6055 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6057 static tree
6058 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6059 bool *handled_ops_p,
6060 struct walk_stmt_info *wi)
6062 int *info = (int *) wi->info;
6063 gimple stmt = gsi_stmt (*gsi_p);
6065 *handled_ops_p = true;
6066 switch (gimple_code (stmt))
6068 WALK_SUBSTMTS;
6070 case GIMPLE_OMP_FOR:
6071 case GIMPLE_OMP_SECTIONS:
6072 *info = *info == 0 ? 1 : -1;
6073 break;
6074 default:
6075 *info = -1;
6076 break;
6078 return NULL;
6081 struct omp_taskcopy_context
6083 /* This field must be at the beginning, as we do "inheritance": Some
6084 callback functions for tree-inline.c (e.g., omp_copy_decl)
6085 receive a copy_body_data pointer that is up-casted to an
6086 omp_context pointer. */
6087 copy_body_data cb;
6088 omp_context *ctx;
6091 static tree
6092 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6094 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6096 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6097 return create_tmp_var (TREE_TYPE (var), NULL);
6099 return var;
6102 static tree
6103 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6105 tree name, new_fields = NULL, type, f;
6107 type = lang_hooks.types.make_type (RECORD_TYPE);
6108 name = DECL_NAME (TYPE_NAME (orig_type));
6109 name = build_decl (TYPE_DECL, name, type);
6110 TYPE_NAME (type) = name;
6112 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6114 tree new_f = copy_node (f);
6115 DECL_CONTEXT (new_f) = type;
6116 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6117 TREE_CHAIN (new_f) = new_fields;
6118 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6119 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6120 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6121 &tcctx->cb, NULL);
6122 new_fields = new_f;
6123 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6125 TYPE_FIELDS (type) = nreverse (new_fields);
6126 layout_type (type);
6127 return type;
6130 /* Create task copyfn. */
6132 static void
6133 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6135 struct function *child_cfun;
6136 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6137 tree record_type, srecord_type, bind, list;
6138 bool record_needs_remap = false, srecord_needs_remap = false;
6139 splay_tree_node n;
6140 struct omp_taskcopy_context tcctx;
6141 struct gimplify_ctx gctx;
6143 child_fn = gimple_omp_task_copy_fn (task_stmt);
6144 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6145 gcc_assert (child_cfun->cfg == NULL);
6146 child_cfun->dont_save_pending_sizes_p = 1;
6147 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6149 /* Reset DECL_CONTEXT on function arguments. */
6150 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6151 DECL_CONTEXT (t) = child_fn;
6153 /* Populate the function. */
6154 push_gimplify_context (&gctx);
6155 current_function_decl = child_fn;
6157 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6158 TREE_SIDE_EFFECTS (bind) = 1;
6159 list = NULL;
6160 DECL_SAVED_TREE (child_fn) = bind;
6161 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6163 /* Remap src and dst argument types if needed. */
6164 record_type = ctx->record_type;
6165 srecord_type = ctx->srecord_type;
6166 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6167 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6169 record_needs_remap = true;
6170 break;
6172 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6173 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6175 srecord_needs_remap = true;
6176 break;
6179 if (record_needs_remap || srecord_needs_remap)
6181 memset (&tcctx, '\0', sizeof (tcctx));
6182 tcctx.cb.src_fn = ctx->cb.src_fn;
6183 tcctx.cb.dst_fn = child_fn;
6184 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6185 tcctx.cb.dst_node = tcctx.cb.src_node;
6186 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6187 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6188 tcctx.cb.eh_region = -1;
6189 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6190 tcctx.cb.decl_map = pointer_map_create ();
6191 tcctx.ctx = ctx;
6193 if (record_needs_remap)
6194 record_type = task_copyfn_remap_type (&tcctx, record_type);
6195 if (srecord_needs_remap)
6196 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6198 else
6199 tcctx.cb.decl_map = NULL;
6201 push_cfun (child_cfun);
6203 arg = DECL_ARGUMENTS (child_fn);
6204 TREE_TYPE (arg) = build_pointer_type (record_type);
6205 sarg = TREE_CHAIN (arg);
6206 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6208 /* First pass: initialize temporaries used in record_type and srecord_type
6209 sizes and field offsets. */
6210 if (tcctx.cb.decl_map)
6211 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6212 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6214 tree *p;
6216 decl = OMP_CLAUSE_DECL (c);
6217 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6218 if (p == NULL)
6219 continue;
6220 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6221 sf = (tree) n->value;
6222 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6223 src = build_fold_indirect_ref (sarg);
6224 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6225 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6226 append_to_statement_list (t, &list);
6229 /* Second pass: copy shared var pointers and copy construct non-VLA
6230 firstprivate vars. */
6231 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6232 switch (OMP_CLAUSE_CODE (c))
6234 case OMP_CLAUSE_SHARED:
6235 decl = OMP_CLAUSE_DECL (c);
6236 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6237 if (n == NULL)
6238 break;
6239 f = (tree) n->value;
6240 if (tcctx.cb.decl_map)
6241 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6242 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6243 sf = (tree) n->value;
6244 if (tcctx.cb.decl_map)
6245 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6246 src = build_fold_indirect_ref (sarg);
6247 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6248 dst = build_fold_indirect_ref (arg);
6249 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6250 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6251 append_to_statement_list (t, &list);
6252 break;
6253 case OMP_CLAUSE_FIRSTPRIVATE:
6254 decl = OMP_CLAUSE_DECL (c);
6255 if (is_variable_sized (decl))
6256 break;
6257 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6258 if (n == NULL)
6259 break;
6260 f = (tree) n->value;
6261 if (tcctx.cb.decl_map)
6262 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6263 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6264 if (n != NULL)
6266 sf = (tree) n->value;
6267 if (tcctx.cb.decl_map)
6268 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6269 src = build_fold_indirect_ref (sarg);
6270 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6271 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6272 src = build_fold_indirect_ref (src);
6274 else
6275 src = decl;
6276 dst = build_fold_indirect_ref (arg);
6277 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6278 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6279 append_to_statement_list (t, &list);
6280 break;
6281 case OMP_CLAUSE_PRIVATE:
6282 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6283 break;
6284 decl = OMP_CLAUSE_DECL (c);
6285 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6286 f = (tree) n->value;
6287 if (tcctx.cb.decl_map)
6288 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6289 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6290 if (n != NULL)
6292 sf = (tree) n->value;
6293 if (tcctx.cb.decl_map)
6294 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6295 src = build_fold_indirect_ref (sarg);
6296 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6297 if (use_pointer_for_field (decl, NULL))
6298 src = build_fold_indirect_ref (src);
6300 else
6301 src = decl;
6302 dst = build_fold_indirect_ref (arg);
6303 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6304 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6305 append_to_statement_list (t, &list);
6306 break;
6307 default:
6308 break;
6311 /* Last pass: handle VLA firstprivates. */
6312 if (tcctx.cb.decl_map)
6313 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6314 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6316 tree ind, ptr, df;
6318 decl = OMP_CLAUSE_DECL (c);
6319 if (!is_variable_sized (decl))
6320 continue;
6321 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6322 if (n == NULL)
6323 continue;
6324 f = (tree) n->value;
6325 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6326 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6327 ind = DECL_VALUE_EXPR (decl);
6328 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6329 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6330 n = splay_tree_lookup (ctx->sfield_map,
6331 (splay_tree_key) TREE_OPERAND (ind, 0));
6332 sf = (tree) n->value;
6333 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6334 src = build_fold_indirect_ref (sarg);
6335 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6336 src = build_fold_indirect_ref (src);
6337 dst = build_fold_indirect_ref (arg);
6338 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6339 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6340 append_to_statement_list (t, &list);
6341 n = splay_tree_lookup (ctx->field_map,
6342 (splay_tree_key) TREE_OPERAND (ind, 0));
6343 df = (tree) n->value;
6344 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6345 ptr = build_fold_indirect_ref (arg);
6346 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6347 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6348 build_fold_addr_expr (dst));
6349 append_to_statement_list (t, &list);
6352 t = build1 (RETURN_EXPR, void_type_node, NULL);
6353 append_to_statement_list (t, &list);
6355 if (tcctx.cb.decl_map)
6356 pointer_map_destroy (tcctx.cb.decl_map);
6357 pop_gimplify_context (NULL);
6358 BIND_EXPR_BODY (bind) = list;
6359 pop_cfun ();
6360 current_function_decl = ctx->cb.src_fn;
6363 /* Lower the OpenMP parallel or task directive in the current statement
6364 in GSI_P. CTX holds context information for the directive. */
6366 static void
6367 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6369 tree clauses;
6370 tree child_fn, t;
6371 gimple stmt = gsi_stmt (*gsi_p);
6372 gimple par_bind, bind;
6373 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6374 struct gimplify_ctx gctx;
6376 clauses = gimple_omp_taskreg_clauses (stmt);
6377 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6378 par_body = gimple_bind_body (par_bind);
6379 child_fn = ctx->cb.dst_fn;
6380 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6381 && !gimple_omp_parallel_combined_p (stmt))
6383 struct walk_stmt_info wi;
6384 int ws_num = 0;
6386 memset (&wi, 0, sizeof (wi));
6387 wi.info = &ws_num;
6388 wi.val_only = true;
6389 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6390 if (ws_num == 1)
6391 gimple_omp_parallel_set_combined_p (stmt, true);
6393 if (ctx->srecord_type)
6394 create_task_copyfn (stmt, ctx);
6396 push_gimplify_context (&gctx);
6398 par_olist = NULL;
6399 par_ilist = NULL;
6400 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6401 lower_omp (par_body, ctx);
6402 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6403 lower_reduction_clauses (clauses, &par_olist, ctx);
6405 /* Declare all the variables created by mapping and the variables
6406 declared in the scope of the parallel body. */
6407 record_vars_into (ctx->block_vars, child_fn);
6408 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6410 if (ctx->record_type)
6412 ctx->sender_decl
6413 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6414 : ctx->record_type, ".omp_data_o");
6415 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6416 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6419 olist = NULL;
6420 ilist = NULL;
6421 lower_send_clauses (clauses, &ilist, &olist, ctx);
6422 lower_send_shared_vars (&ilist, &olist, ctx);
6424 /* Once all the expansions are done, sequence all the different
6425 fragments inside gimple_omp_body. */
6427 new_body = NULL;
6429 if (ctx->record_type)
6431 t = build_fold_addr_expr (ctx->sender_decl);
6432 /* fixup_child_record_type might have changed receiver_decl's type. */
6433 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6434 gimple_seq_add_stmt (&new_body,
6435 gimple_build_assign (ctx->receiver_decl, t));
6438 gimple_seq_add_seq (&new_body, par_ilist);
6439 gimple_seq_add_seq (&new_body, par_body);
6440 gimple_seq_add_seq (&new_body, par_olist);
6441 new_body = maybe_catch_exception (new_body);
6442 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6443 gimple_omp_set_body (stmt, new_body);
6445 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6446 gimple_bind_add_stmt (bind, stmt);
6447 if (ilist || olist)
6449 gimple_seq_add_stmt (&ilist, bind);
6450 gimple_seq_add_seq (&ilist, olist);
6451 bind = gimple_build_bind (NULL, ilist, NULL);
6454 gsi_replace (gsi_p, bind, true);
6456 pop_gimplify_context (NULL);
6459 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6460 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6461 of OpenMP context, but with task_shared_vars set. */
6463 static tree
6464 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6465 void *data)
6467 tree t = *tp;
6469 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6470 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6471 return t;
6473 if (task_shared_vars
6474 && DECL_P (t)
6475 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6476 return t;
6478 /* If a global variable has been privatized, TREE_CONSTANT on
6479 ADDR_EXPR might be wrong. */
6480 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6481 recompute_tree_invariant_for_addr_expr (t);
6483 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6484 return NULL_TREE;
6487 static void
6488 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6490 gimple stmt = gsi_stmt (*gsi_p);
6491 struct walk_stmt_info wi;
6493 if (gimple_has_location (stmt))
6494 input_location = gimple_location (stmt);
6496 if (task_shared_vars)
6497 memset (&wi, '\0', sizeof (wi));
6499 /* If we have issued syntax errors, avoid doing any heavy lifting.
6500 Just replace the OpenMP directives with a NOP to avoid
6501 confusing RTL expansion. */
6502 if (errorcount && is_gimple_omp (stmt))
6504 gsi_replace (gsi_p, gimple_build_nop (), true);
6505 return;
6508 switch (gimple_code (stmt))
6510 case GIMPLE_COND:
6511 if ((ctx || task_shared_vars)
6512 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6513 ctx ? NULL : &wi, NULL)
6514 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6515 ctx ? NULL : &wi, NULL)))
6516 gimple_regimplify_operands (stmt, gsi_p);
6517 break;
6518 case GIMPLE_CATCH:
6519 lower_omp (gimple_catch_handler (stmt), ctx);
6520 break;
6521 case GIMPLE_EH_FILTER:
6522 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6523 break;
6524 case GIMPLE_TRY:
6525 lower_omp (gimple_try_eval (stmt), ctx);
6526 lower_omp (gimple_try_cleanup (stmt), ctx);
6527 break;
6528 case GIMPLE_BIND:
6529 lower_omp (gimple_bind_body (stmt), ctx);
6530 break;
6531 case GIMPLE_OMP_PARALLEL:
6532 case GIMPLE_OMP_TASK:
6533 ctx = maybe_lookup_ctx (stmt);
6534 lower_omp_taskreg (gsi_p, ctx);
6535 break;
6536 case GIMPLE_OMP_FOR:
6537 ctx = maybe_lookup_ctx (stmt);
6538 gcc_assert (ctx);
6539 lower_omp_for (gsi_p, ctx);
6540 break;
6541 case GIMPLE_OMP_SECTIONS:
6542 ctx = maybe_lookup_ctx (stmt);
6543 gcc_assert (ctx);
6544 lower_omp_sections (gsi_p, ctx);
6545 break;
6546 case GIMPLE_OMP_SINGLE:
6547 ctx = maybe_lookup_ctx (stmt);
6548 gcc_assert (ctx);
6549 lower_omp_single (gsi_p, ctx);
6550 break;
6551 case GIMPLE_OMP_MASTER:
6552 ctx = maybe_lookup_ctx (stmt);
6553 gcc_assert (ctx);
6554 lower_omp_master (gsi_p, ctx);
6555 break;
6556 case GIMPLE_OMP_ORDERED:
6557 ctx = maybe_lookup_ctx (stmt);
6558 gcc_assert (ctx);
6559 lower_omp_ordered (gsi_p, ctx);
6560 break;
6561 case GIMPLE_OMP_CRITICAL:
6562 ctx = maybe_lookup_ctx (stmt);
6563 gcc_assert (ctx);
6564 lower_omp_critical (gsi_p, ctx);
6565 break;
6566 case GIMPLE_OMP_ATOMIC_LOAD:
6567 if ((ctx || task_shared_vars)
6568 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6569 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6570 gimple_regimplify_operands (stmt, gsi_p);
6571 break;
6572 default:
6573 if ((ctx || task_shared_vars)
6574 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6575 ctx ? NULL : &wi))
6576 gimple_regimplify_operands (stmt, gsi_p);
6577 break;
6581 static void
6582 lower_omp (gimple_seq body, omp_context *ctx)
6584 location_t saved_location = input_location;
6585 gimple_stmt_iterator gsi = gsi_start (body);
6586 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6587 lower_omp_1 (&gsi, ctx);
6588 input_location = saved_location;
6591 /* Main entry point. */
6593 static unsigned int
6594 execute_lower_omp (void)
6596 gimple_seq body;
6598 /* This pass always runs, to provide PROP_gimple_lomp.
6599 But there is nothing to do unless -fopenmp is given. */
6600 if (flag_openmp == 0)
6601 return 0;
6603 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6604 delete_omp_context);
6606 body = gimple_body (current_function_decl);
6607 scan_omp (body, NULL);
6608 gcc_assert (taskreg_nesting_level == 0);
6610 if (all_contexts->root)
6612 struct gimplify_ctx gctx;
6614 if (task_shared_vars)
6615 push_gimplify_context (&gctx);
6616 lower_omp (body, NULL);
6617 if (task_shared_vars)
6618 pop_gimplify_context (NULL);
6621 if (all_contexts)
6623 splay_tree_delete (all_contexts);
6624 all_contexts = NULL;
6626 BITMAP_FREE (task_shared_vars);
6627 return 0;
6630 struct gimple_opt_pass pass_lower_omp =
6633 GIMPLE_PASS,
6634 "omplower", /* name */
6635 NULL, /* gate */
6636 execute_lower_omp, /* execute */
6637 NULL, /* sub */
6638 NULL, /* next */
6639 0, /* static_pass_number */
6640 TV_NONE, /* tv_id */
6641 PROP_gimple_any, /* properties_required */
6642 PROP_gimple_lomp, /* properties_provided */
6643 0, /* properties_destroyed */
6644 0, /* todo_flags_start */
6645 TODO_dump_func /* todo_flags_finish */
6649 /* The following is a utility to diagnose OpenMP structured block violations.
6650 It is not part of the "omplower" pass, as that's invoked too late. It
6651 should be invoked by the respective front ends after gimplification. */
6653 static splay_tree all_labels;
6655 /* Check for mismatched contexts and generate an error if needed. Return
6656 true if an error is detected. */
6658 static bool
6659 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6660 gimple branch_ctx, gimple label_ctx)
6662 if (label_ctx == branch_ctx)
6663 return false;
6667 Previously we kept track of the label's entire context in diagnose_sb_[12]
6668 so we could traverse it and issue a correct "exit" or "enter" error
6669 message upon a structured block violation.
6671 We built the context by building a list with tree_cons'ing, but there is
6672 no easy counterpart in gimple tuples. It seems like far too much work
6673 for issuing exit/enter error messages. If someone really misses the
6674 distinct error message... patches welcome.
6677 #if 0
6678 /* Try to avoid confusing the user by producing and error message
6679 with correct "exit" or "enter" verbiage. We prefer "exit"
6680 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6681 if (branch_ctx == NULL)
6682 exit_p = false;
6683 else
6685 while (label_ctx)
6687 if (TREE_VALUE (label_ctx) == branch_ctx)
6689 exit_p = false;
6690 break;
6692 label_ctx = TREE_CHAIN (label_ctx);
6696 if (exit_p)
6697 error ("invalid exit from OpenMP structured block");
6698 else
6699 error ("invalid entry to OpenMP structured block");
6700 #endif
6702 /* If it's obvious we have an invalid entry, be specific about the error. */
6703 if (branch_ctx == NULL)
6704 error ("invalid entry to OpenMP structured block");
6705 else
6706 /* Otherwise, be vague and lazy, but efficient. */
6707 error ("invalid branch to/from an OpenMP structured block");
6709 gsi_replace (gsi_p, gimple_build_nop (), false);
6710 return true;
6713 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6714 where each label is found. */
6716 static tree
6717 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6718 struct walk_stmt_info *wi)
6720 gimple context = (gimple) wi->info;
6721 gimple inner_context;
6722 gimple stmt = gsi_stmt (*gsi_p);
6724 *handled_ops_p = true;
6726 switch (gimple_code (stmt))
6728 WALK_SUBSTMTS;
6730 case GIMPLE_OMP_PARALLEL:
6731 case GIMPLE_OMP_TASK:
6732 case GIMPLE_OMP_SECTIONS:
6733 case GIMPLE_OMP_SINGLE:
6734 case GIMPLE_OMP_SECTION:
6735 case GIMPLE_OMP_MASTER:
6736 case GIMPLE_OMP_ORDERED:
6737 case GIMPLE_OMP_CRITICAL:
6738 /* The minimal context here is just the current OMP construct. */
6739 inner_context = stmt;
6740 wi->info = inner_context;
6741 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6742 wi->info = context;
6743 break;
6745 case GIMPLE_OMP_FOR:
6746 inner_context = stmt;
6747 wi->info = inner_context;
6748 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6749 walk them. */
6750 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6751 diagnose_sb_1, NULL, wi);
6752 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6753 wi->info = context;
6754 break;
6756 case GIMPLE_LABEL:
6757 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6758 (splay_tree_value) context);
6759 break;
6761 default:
6762 break;
6765 return NULL_TREE;
6768 /* Pass 2: Check each branch and see if its context differs from that of
6769 the destination label's context. */
6771 static tree
6772 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6773 struct walk_stmt_info *wi)
6775 gimple context = (gimple) wi->info;
6776 splay_tree_node n;
6777 gimple stmt = gsi_stmt (*gsi_p);
6779 *handled_ops_p = true;
6781 switch (gimple_code (stmt))
6783 WALK_SUBSTMTS;
6785 case GIMPLE_OMP_PARALLEL:
6786 case GIMPLE_OMP_TASK:
6787 case GIMPLE_OMP_SECTIONS:
6788 case GIMPLE_OMP_SINGLE:
6789 case GIMPLE_OMP_SECTION:
6790 case GIMPLE_OMP_MASTER:
6791 case GIMPLE_OMP_ORDERED:
6792 case GIMPLE_OMP_CRITICAL:
6793 wi->info = stmt;
6794 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6795 wi->info = context;
6796 break;
6798 case GIMPLE_OMP_FOR:
6799 wi->info = stmt;
6800 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6801 walk them. */
6802 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6803 diagnose_sb_2, NULL, wi);
6804 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6805 wi->info = context;
6806 break;
6808 case GIMPLE_GOTO:
6810 tree lab = gimple_goto_dest (stmt);
6811 if (TREE_CODE (lab) != LABEL_DECL)
6812 break;
6814 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6815 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6817 break;
6819 case GIMPLE_SWITCH:
6821 unsigned int i;
6822 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6824 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6825 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6826 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6827 break;
6830 break;
6832 case GIMPLE_RETURN:
6833 diagnose_sb_0 (gsi_p, context, NULL);
6834 break;
6836 default:
6837 break;
6840 return NULL_TREE;
6843 void
6844 diagnose_omp_structured_block_errors (tree fndecl)
6846 tree save_current = current_function_decl;
6847 struct walk_stmt_info wi;
6848 struct function *old_cfun = cfun;
6849 gimple_seq body = gimple_body (fndecl);
6851 current_function_decl = fndecl;
6852 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6854 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6856 memset (&wi, 0, sizeof (wi));
6857 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6859 memset (&wi, 0, sizeof (wi));
6860 wi.want_locations = true;
6861 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6863 splay_tree_delete (all_labels);
6864 all_labels = NULL;
6866 set_cfun (old_cfun);
6867 current_function_decl = save_current;
6870 #include "gt-omp-low.h"