* config/avr/avr.c (avr_function_arg_advance): Undo r179037.
[official-gcc.git] / gcc / omp-low.c
blobfbbef9c785cc7551b4e37f8e8db873d8a5e38931
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 default:
261 break;
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
314 else
315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
319 break;
320 case GE_EXPR:
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
323 else
324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
328 break;
329 default:
330 gcc_unreachable ();
333 t = gimple_omp_for_incr (for_stmt, i);
334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
337 case PLUS_EXPR:
338 case POINTER_PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case MINUS_EXPR:
342 loop->step = TREE_OPERAND (t, 1);
343 loop->step = fold_build1_loc (loc,
344 NEGATE_EXPR, TREE_TYPE (loop->step),
345 loop->step);
346 break;
347 default:
348 gcc_unreachable ();
351 if (iter_type != long_long_unsigned_type_node)
353 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
354 iter_type = long_long_unsigned_type_node;
355 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
356 && TYPE_PRECISION (TREE_TYPE (loop->v))
357 >= TYPE_PRECISION (iter_type))
359 tree n;
361 if (loop->cond_code == LT_EXPR)
362 n = fold_build2_loc (loc,
363 PLUS_EXPR, TREE_TYPE (loop->v),
364 loop->n2, loop->step);
365 else
366 n = loop->n1;
367 if (TREE_CODE (n) != INTEGER_CST
368 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
369 iter_type = long_long_unsigned_type_node;
371 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
372 > TYPE_PRECISION (iter_type))
374 tree n1, n2;
376 if (loop->cond_code == LT_EXPR)
378 n1 = loop->n1;
379 n2 = fold_build2_loc (loc,
380 PLUS_EXPR, TREE_TYPE (loop->v),
381 loop->n2, loop->step);
383 else
385 n1 = fold_build2_loc (loc,
386 MINUS_EXPR, TREE_TYPE (loop->v),
387 loop->n2, loop->step);
388 n2 = loop->n1;
390 if (TREE_CODE (n1) != INTEGER_CST
391 || TREE_CODE (n2) != INTEGER_CST
392 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
393 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
394 iter_type = long_long_unsigned_type_node;
398 if (collapse_count && *collapse_count == NULL)
400 if ((i == 0 || count != NULL_TREE)
401 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
402 && TREE_CONSTANT (loop->n1)
403 && TREE_CONSTANT (loop->n2)
404 && TREE_CODE (loop->step) == INTEGER_CST)
406 tree itype = TREE_TYPE (loop->v);
408 if (POINTER_TYPE_P (itype))
409 itype
410 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
413 PLUS_EXPR, itype,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
424 loop->step)));
425 else
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
432 count, t);
433 else
434 count = t;
435 if (TREE_CODE (count) != INTEGER_CST)
436 count = NULL_TREE;
438 else
439 count = NULL_TREE;
443 if (count)
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
447 else
448 iter_type = long_integer_type_node;
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
457 if (count)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
459 else
460 *collapse_count = create_tmp_var (iter_type, ".count");
463 if (fd->collapse > 1)
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
486 for (j ...)
488 Is lowered into:
490 # BLOCK 2 (PAR_ENTRY_BB)
491 .omp_data_o.i = i;
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
497 D.1598 = D.1667 * 4;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
509 call.
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
516 static bool
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
523 return true;
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
527 extract_omp_for_data (ws_stmt, &fd, NULL);
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
530 return false;
531 if (fd.iter_type != long_integer_type_node)
532 return false;
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
538 see through this. */
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
543 return false;
545 return true;
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
551 expanded. */
553 static VEC(tree,gc) *
554 get_ws_args_for (gimple ws_stmt)
556 tree t;
557 location_t loc = gimple_location (ws_stmt);
558 VEC(tree,gc) *ws_args;
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
562 struct omp_for_data fd;
564 extract_omp_for_data (ws_stmt, &fd, NULL);
566 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 VEC_quick_push (tree, ws_args, t);
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 VEC_quick_push (tree, ws_args, t);
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 VEC_quick_push (tree, ws_args, t);
577 if (fd.chunk_size)
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 VEC_quick_push (tree, ws_args, t);
583 return ws_args;
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 ws_args = VEC_alloc (tree, gc, 1);
593 VEC_quick_push (tree, ws_args, t);
594 return ws_args;
597 gcc_unreachable ();
601 /* Discover whether REGION is a combined parallel+workshare region. */
603 static void
604 determine_parallel_type (struct omp_region *region)
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
612 return;
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
618 return;
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (region->inner->type == GIMPLE_OMP_FOR)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
649 if (c == NULL
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
655 return;
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
666 /* Return true if EXPR is variable sized. */
668 static inline bool
669 is_variable_sized (const_tree expr)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
674 /* Return true if DECL is a reference type. */
676 static inline bool
677 is_reference (tree decl)
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
686 static inline tree
687 lookup_decl (tree var, omp_context *ctx)
689 tree *n;
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
691 return *n;
694 static inline tree
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
697 tree *n;
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
702 static inline tree
703 lookup_field (tree var, omp_context *ctx)
705 splay_tree_node n;
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
710 static inline tree
711 lookup_sfield (tree var, omp_context *ctx)
713 splay_tree_node n;
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
720 static inline tree
721 maybe_lookup_field (tree var, omp_context *ctx)
723 splay_tree_node n;
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
731 static bool
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
735 return true;
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
739 if (shared_ctx)
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
746 return true;
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
753 return true;
755 /* Do not use copy-in/copy-out for variables that have their
756 address taken. */
757 if (TREE_ADDRESSABLE (decl))
758 return true;
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767 omp_context *up;
769 for (up = shared_ctx->outer; up; up = up->outer)
770 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
771 break;
773 if (up)
775 tree c;
777 for (c = gimple_omp_taskreg_clauses (up->stmt);
778 c; c = OMP_CLAUSE_CHAIN (c))
779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c) == decl)
781 break;
783 if (c)
784 goto maybe_mark_addressable_and_ret;
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794 tree outer;
795 maybe_mark_addressable_and_ret:
796 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
797 if (is_gimple_reg (outer))
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
801 variable. */
802 if (!task_shared_vars)
803 task_shared_vars = BITMAP_ALLOC (NULL);
804 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
805 TREE_ADDRESSABLE (outer) = 1;
807 return true;
811 return false;
814 /* Create a new VAR_DECL and copy information from VAR to it. */
816 tree
817 copy_var_decl (tree var, tree name, tree type)
819 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
822 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
823 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
824 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
825 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
826 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
827 TREE_USED (copy) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830 return copy;
833 /* Construct a new automatic decl similar to VAR. */
835 static tree
836 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 tree copy = copy_var_decl (var, name, type);
840 DECL_CONTEXT (copy) = current_function_decl;
841 DECL_CHAIN (copy) = ctx->block_vars;
842 ctx->block_vars = copy;
844 return copy;
847 static tree
848 omp_copy_decl_1 (tree var, omp_context *ctx)
850 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 /* Build tree nodes to access the field for VAR on the receiver side. */
855 static tree
856 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 tree x, field = lookup_field (var, ctx);
860 /* If the receiver record type was remapped in the child function,
861 remap the field into the new record type. */
862 x = maybe_lookup_field (field, ctx);
863 if (x != NULL)
864 field = x;
866 x = build_simple_mem_ref (ctx->receiver_decl);
867 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
868 if (by_ref)
869 x = build_simple_mem_ref (x);
871 return x;
874 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
875 of a parallel, this is a component reference; for workshare constructs
876 this is some variable. */
878 static tree
879 build_outer_var_ref (tree var, omp_context *ctx)
881 tree x;
883 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
884 x = var;
885 else if (is_variable_sized (var))
887 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
888 x = build_outer_var_ref (x, ctx);
889 x = build_simple_mem_ref (x);
891 else if (is_taskreg_ctx (ctx))
893 bool by_ref = use_pointer_for_field (var, NULL);
894 x = build_receiver_ref (var, by_ref, ctx);
896 else if (ctx->outer)
897 x = lookup_decl (var, ctx->outer);
898 else if (is_reference (var))
899 /* This can happen with orphaned constructs. If var is reference, it is
900 possible it is shared and as such valid. */
901 x = var;
902 else
903 gcc_unreachable ();
905 if (is_reference (var))
906 x = build_simple_mem_ref (x);
908 return x;
911 /* Build tree nodes to access the field for VAR on the sender side. */
913 static tree
914 build_sender_ref (tree var, omp_context *ctx)
916 tree field = lookup_sfield (var, ctx);
917 return build3 (COMPONENT_REF, TREE_TYPE (field),
918 ctx->sender_decl, field, NULL);
921 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923 static void
924 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 tree field, type, sfield = NULL_TREE;
928 gcc_assert ((mask & 1) == 0
929 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
930 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
931 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933 type = TREE_TYPE (var);
934 if (by_ref)
935 type = build_pointer_type (type);
936 else if ((mask & 3) == 1 && is_reference (var))
937 type = TREE_TYPE (type);
939 field = build_decl (DECL_SOURCE_LOCATION (var),
940 FIELD_DECL, DECL_NAME (var), type);
942 /* Remember what variable this field was created for. This does have a
943 side effect of making dwarf2out ignore this member, so for helpful
944 debugging we clear it later in delete_omp_context. */
945 DECL_ABSTRACT_ORIGIN (field) = var;
946 if (type == TREE_TYPE (var))
948 DECL_ALIGN (field) = DECL_ALIGN (var);
949 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
950 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 else
953 DECL_ALIGN (field) = TYPE_ALIGN (type);
955 if ((mask & 3) == 3)
957 insert_field_into_struct (ctx->record_type, field);
958 if (ctx->srecord_type)
960 sfield = build_decl (DECL_SOURCE_LOCATION (var),
961 FIELD_DECL, DECL_NAME (var), type);
962 DECL_ABSTRACT_ORIGIN (sfield) = var;
963 DECL_ALIGN (sfield) = DECL_ALIGN (field);
964 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
965 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
966 insert_field_into_struct (ctx->srecord_type, sfield);
969 else
971 if (ctx->srecord_type == NULL_TREE)
973 tree t;
975 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
976 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
977 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 sfield = build_decl (DECL_SOURCE_LOCATION (var),
980 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
981 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
982 insert_field_into_struct (ctx->srecord_type, sfield);
983 splay_tree_insert (ctx->sfield_map,
984 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
985 (splay_tree_value) sfield);
988 sfield = field;
989 insert_field_into_struct ((mask & 1) ? ctx->record_type
990 : ctx->srecord_type, field);
993 if (mask & 1)
994 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
995 (splay_tree_value) field);
996 if ((mask & 2) && ctx->sfield_map)
997 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
998 (splay_tree_value) sfield);
1001 static tree
1002 install_var_local (tree var, omp_context *ctx)
1004 tree new_var = omp_copy_decl_1 (var, ctx);
1005 insert_decl_map (&ctx->cb, var, new_var);
1006 return new_var;
1009 /* Adjust the replacement for DECL in CTX for the new context. This means
1010 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012 static void
1013 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 tree new_decl, size;
1017 new_decl = lookup_decl (decl, ctx);
1019 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1022 && DECL_HAS_VALUE_EXPR_P (decl))
1024 tree ve = DECL_VALUE_EXPR (decl);
1025 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1026 SET_DECL_VALUE_EXPR (new_decl, ve);
1027 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1033 if (size == error_mark_node)
1034 size = TYPE_SIZE (TREE_TYPE (new_decl));
1035 DECL_SIZE (new_decl) = size;
1037 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1038 if (size == error_mark_node)
1039 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1040 DECL_SIZE_UNIT (new_decl) = size;
1044 /* The callback for remap_decl. Search all containing contexts for a
1045 mapping of the variable; this avoids having to duplicate the splay
1046 tree ahead of time. We know a mapping doesn't already exist in the
1047 given context. Create new mappings to implement default semantics. */
1049 static tree
1050 omp_copy_decl (tree var, copy_body_data *cb)
1052 omp_context *ctx = (omp_context *) cb;
1053 tree new_var;
1055 if (TREE_CODE (var) == LABEL_DECL)
1057 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1058 DECL_CONTEXT (new_var) = current_function_decl;
1059 insert_decl_map (&ctx->cb, var, new_var);
1060 return new_var;
1063 while (!is_taskreg_ctx (ctx))
1065 ctx = ctx->outer;
1066 if (ctx == NULL)
1067 return var;
1068 new_var = maybe_lookup_decl (var, ctx);
1069 if (new_var)
1070 return new_var;
1073 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1074 return var;
1076 return error_mark_node;
1080 /* Return the parallel region associated with STMT. */
1082 /* Debugging dumps for parallel regions. */
1083 void dump_omp_region (FILE *, struct omp_region *, int);
1084 void debug_omp_region (struct omp_region *);
1085 void debug_all_omp_regions (void);
1087 /* Dump the parallel region tree rooted at REGION. */
1089 void
1090 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1093 gimple_code_name[region->type]);
1095 if (region->inner)
1096 dump_omp_region (file, region->inner, indent + 4);
1098 if (region->cont)
1100 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1101 region->cont->index);
1104 if (region->exit)
1105 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1106 region->exit->index);
1107 else
1108 fprintf (file, "%*s[no exit marker]\n", indent, "");
1110 if (region->next)
1111 dump_omp_region (file, region->next, indent);
1114 DEBUG_FUNCTION void
1115 debug_omp_region (struct omp_region *region)
1117 dump_omp_region (stderr, region, 0);
1120 DEBUG_FUNCTION void
1121 debug_all_omp_regions (void)
1123 dump_omp_region (stderr, root_omp_region, 0);
1127 /* Create a new parallel region starting at STMT inside region PARENT. */
1129 struct omp_region *
1130 new_omp_region (basic_block bb, enum gimple_code type,
1131 struct omp_region *parent)
1133 struct omp_region *region = XCNEW (struct omp_region);
1135 region->outer = parent;
1136 region->entry = bb;
1137 region->type = type;
1139 if (parent)
1141 /* This is a nested region. Add it to the list of inner
1142 regions in PARENT. */
1143 region->next = parent->inner;
1144 parent->inner = region;
1146 else
1148 /* This is a toplevel region. Add it to the list of toplevel
1149 regions in ROOT_OMP_REGION. */
1150 region->next = root_omp_region;
1151 root_omp_region = region;
1154 return region;
1157 /* Release the memory associated with the region tree rooted at REGION. */
1159 static void
1160 free_omp_region_1 (struct omp_region *region)
1162 struct omp_region *i, *n;
1164 for (i = region->inner; i ; i = n)
1166 n = i->next;
1167 free_omp_region_1 (i);
1170 free (region);
1173 /* Release the memory for the entire omp region tree. */
1175 void
1176 free_omp_regions (void)
1178 struct omp_region *r, *n;
1179 for (r = root_omp_region; r ; r = n)
1181 n = r->next;
1182 free_omp_region_1 (r);
1184 root_omp_region = NULL;
1188 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190 static omp_context *
1191 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 omp_context *ctx = XCNEW (omp_context);
1195 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1196 (splay_tree_value) ctx);
1197 ctx->stmt = stmt;
1199 if (outer_ctx)
1201 ctx->outer = outer_ctx;
1202 ctx->cb = outer_ctx->cb;
1203 ctx->cb.block = NULL;
1204 ctx->depth = outer_ctx->depth + 1;
1206 else
1208 ctx->cb.src_fn = current_function_decl;
1209 ctx->cb.dst_fn = current_function_decl;
1210 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1211 gcc_checking_assert (ctx->cb.src_node);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1220 ctx->cb.decl_map = pointer_map_create ();
1222 return ctx;
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1227 /* Finalize task copyfn. */
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1264 cgraph_add_new_function (child_fn, false);
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1270 static void
1271 delete_omp_context (splay_tree_value value)
1273 omp_context *ctx = (omp_context *) value;
1275 pointer_map_destroy (ctx->cb.decl_map);
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1290 if (ctx->srecord_type)
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1300 XDELETE (ctx);
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1309 tree f, type = ctx->record_type;
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1320 tree name, new_fields = NULL;
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 DECL_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1364 bool by_ref;
1366 switch (OMP_CLAUSE_CODE (c))
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1416 else if (is_taskreg_ctx (ctx))
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1432 install_var_local (decl, ctx);
1433 break;
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1440 break;
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1446 case OMP_CLAUSE_FINAL:
1447 case OMP_CLAUSE_IF:
1448 case OMP_CLAUSE_NUM_THREADS:
1449 case OMP_CLAUSE_SCHEDULE:
1450 if (ctx->outer)
1451 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1452 break;
1454 case OMP_CLAUSE_NOWAIT:
1455 case OMP_CLAUSE_ORDERED:
1456 case OMP_CLAUSE_COLLAPSE:
1457 case OMP_CLAUSE_UNTIED:
1458 case OMP_CLAUSE_MERGEABLE:
1459 break;
1461 default:
1462 gcc_unreachable ();
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468 switch (OMP_CLAUSE_CODE (c))
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 case OMP_CLAUSE_FINAL:
1510 case OMP_CLAUSE_MERGEABLE:
1511 break;
1513 default:
1514 gcc_unreachable ();
1518 if (scan_array_reductions)
1519 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1520 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1521 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1526 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1527 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1528 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1531 /* Create a new name for omp child function. Returns an identifier. */
1533 static GTY(()) unsigned int tmp_ompfn_id_num;
1535 static tree
1536 create_omp_child_function_name (bool task_copy)
1538 return (clone_function_name (current_function_decl,
1539 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1542 /* Build a decl for the omp child function. It'll not contain a body
1543 yet, just the bare decl. */
1545 static void
1546 create_omp_child_function (omp_context *ctx, bool task_copy)
1548 tree decl, type, name, t;
1550 name = create_omp_child_function_name (task_copy);
1551 if (task_copy)
1552 type = build_function_type_list (void_type_node, ptr_type_node,
1553 ptr_type_node, NULL_TREE);
1554 else
1555 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1557 decl = build_decl (gimple_location (ctx->stmt),
1558 FUNCTION_DECL, name, type);
1560 if (!task_copy)
1561 ctx->cb.dst_fn = decl;
1562 else
1563 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1565 TREE_STATIC (decl) = 1;
1566 TREE_USED (decl) = 1;
1567 DECL_ARTIFICIAL (decl) = 1;
1568 DECL_NAMELESS (decl) = 1;
1569 DECL_IGNORED_P (decl) = 0;
1570 TREE_PUBLIC (decl) = 0;
1571 DECL_UNINLINABLE (decl) = 1;
1572 DECL_EXTERNAL (decl) = 0;
1573 DECL_CONTEXT (decl) = NULL_TREE;
1574 DECL_INITIAL (decl) = make_node (BLOCK);
1576 t = build_decl (DECL_SOURCE_LOCATION (decl),
1577 RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_CONTEXT (t) = decl;
1581 DECL_RESULT (decl) = t;
1583 t = build_decl (DECL_SOURCE_LOCATION (decl),
1584 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1585 DECL_ARTIFICIAL (t) = 1;
1586 DECL_NAMELESS (t) = 1;
1587 DECL_ARG_TYPE (t) = ptr_type_node;
1588 DECL_CONTEXT (t) = current_function_decl;
1589 TREE_USED (t) = 1;
1590 DECL_ARGUMENTS (decl) = t;
1591 if (!task_copy)
1592 ctx->receiver_decl = t;
1593 else
1595 t = build_decl (DECL_SOURCE_LOCATION (decl),
1596 PARM_DECL, get_identifier (".omp_data_o"),
1597 ptr_type_node);
1598 DECL_ARTIFICIAL (t) = 1;
1599 DECL_NAMELESS (t) = 1;
1600 DECL_ARG_TYPE (t) = ptr_type_node;
1601 DECL_CONTEXT (t) = current_function_decl;
1602 TREE_USED (t) = 1;
1603 TREE_ADDRESSABLE (t) = 1;
1604 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1605 DECL_ARGUMENTS (decl) = t;
1608 /* Allocate memory for the function structure. The call to
1609 allocate_struct_function clobbers CFUN, so we need to restore
1610 it afterward. */
1611 push_struct_function (decl);
1612 cfun->function_end_locus = gimple_location (ctx->stmt);
1613 pop_cfun ();
1617 /* Scan an OpenMP parallel directive. */
1619 static void
1620 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1622 omp_context *ctx;
1623 tree name;
1624 gimple stmt = gsi_stmt (*gsi);
1626 /* Ignore parallel directives with empty bodies, unless there
1627 are copyin clauses. */
1628 if (optimize > 0
1629 && empty_body_p (gimple_omp_body (stmt))
1630 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1631 OMP_CLAUSE_COPYIN) == NULL)
1633 gsi_replace (gsi, gimple_build_nop (), false);
1634 return;
1637 ctx = new_omp_context (stmt, outer_ctx);
1638 if (taskreg_nesting_level > 1)
1639 ctx->is_nested = true;
1640 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1641 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1642 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1643 name = create_tmp_var_name (".omp_data_s");
1644 name = build_decl (gimple_location (stmt),
1645 TYPE_DECL, name, ctx->record_type);
1646 DECL_ARTIFICIAL (name) = 1;
1647 DECL_NAMELESS (name) = 1;
1648 TYPE_NAME (ctx->record_type) = name;
1649 create_omp_child_function (ctx, false);
1650 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1652 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1653 scan_omp (gimple_omp_body (stmt), ctx);
1655 if (TYPE_FIELDS (ctx->record_type) == NULL)
1656 ctx->record_type = ctx->receiver_decl = NULL;
1657 else
1659 layout_type (ctx->record_type);
1660 fixup_child_record_type (ctx);
1664 /* Scan an OpenMP task directive. */
1666 static void
1667 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1669 omp_context *ctx;
1670 tree name, t;
1671 gimple stmt = gsi_stmt (*gsi);
1672 location_t loc = gimple_location (stmt);
1674 /* Ignore task directives with empty bodies. */
1675 if (optimize > 0
1676 && empty_body_p (gimple_omp_body (stmt)))
1678 gsi_replace (gsi, gimple_build_nop (), false);
1679 return;
1682 ctx = new_omp_context (stmt, outer_ctx);
1683 if (taskreg_nesting_level > 1)
1684 ctx->is_nested = true;
1685 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1686 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1687 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1688 name = create_tmp_var_name (".omp_data_s");
1689 name = build_decl (gimple_location (stmt),
1690 TYPE_DECL, name, ctx->record_type);
1691 DECL_ARTIFICIAL (name) = 1;
1692 DECL_NAMELESS (name) = 1;
1693 TYPE_NAME (ctx->record_type) = name;
1694 create_omp_child_function (ctx, false);
1695 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1697 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1699 if (ctx->srecord_type)
1701 name = create_tmp_var_name (".omp_data_a");
1702 name = build_decl (gimple_location (stmt),
1703 TYPE_DECL, name, ctx->srecord_type);
1704 DECL_ARTIFICIAL (name) = 1;
1705 DECL_NAMELESS (name) = 1;
1706 TYPE_NAME (ctx->srecord_type) = name;
1707 create_omp_child_function (ctx, true);
1710 scan_omp (gimple_omp_body (stmt), ctx);
1712 if (TYPE_FIELDS (ctx->record_type) == NULL)
1714 ctx->record_type = ctx->receiver_decl = NULL;
1715 t = build_int_cst (long_integer_type_node, 0);
1716 gimple_omp_task_set_arg_size (stmt, t);
1717 t = build_int_cst (long_integer_type_node, 1);
1718 gimple_omp_task_set_arg_align (stmt, t);
1720 else
1722 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1723 /* Move VLA fields to the end. */
1724 p = &TYPE_FIELDS (ctx->record_type);
1725 while (*p)
1726 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1727 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729 *q = *p;
1730 *p = TREE_CHAIN (*p);
1731 TREE_CHAIN (*q) = NULL_TREE;
1732 q = &TREE_CHAIN (*q);
1734 else
1735 p = &DECL_CHAIN (*p);
1736 *p = vla_fields;
1737 layout_type (ctx->record_type);
1738 fixup_child_record_type (ctx);
1739 if (ctx->srecord_type)
1740 layout_type (ctx->srecord_type);
1741 t = fold_convert_loc (loc, long_integer_type_node,
1742 TYPE_SIZE_UNIT (ctx->record_type));
1743 gimple_omp_task_set_arg_size (stmt, t);
1744 t = build_int_cst (long_integer_type_node,
1745 TYPE_ALIGN_UNIT (ctx->record_type));
1746 gimple_omp_task_set_arg_align (stmt, t);
1751 /* Scan an OpenMP loop directive. */
1753 static void
1754 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1756 omp_context *ctx;
1757 size_t i;
1759 ctx = new_omp_context (stmt, outer_ctx);
1761 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1763 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1764 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1766 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1771 scan_omp (gimple_omp_body (stmt), ctx);
1774 /* Scan an OpenMP sections directive. */
1776 static void
1777 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1779 omp_context *ctx;
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1783 scan_omp (gimple_omp_body (stmt), ctx);
1786 /* Scan an OpenMP single directive. */
1788 static void
1789 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1791 omp_context *ctx;
1792 tree name;
1794 ctx = new_omp_context (stmt, outer_ctx);
1795 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1796 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1797 name = create_tmp_var_name (".omp_copy_s");
1798 name = build_decl (gimple_location (stmt),
1799 TYPE_DECL, name, ctx->record_type);
1800 TYPE_NAME (ctx->record_type) = name;
1802 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1803 scan_omp (gimple_omp_body (stmt), ctx);
1805 if (TYPE_FIELDS (ctx->record_type) == NULL)
1806 ctx->record_type = NULL;
1807 else
1808 layout_type (ctx->record_type);
1812 /* Check OpenMP nesting restrictions. */
1813 static void
1814 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1816 switch (gimple_code (stmt))
1818 case GIMPLE_OMP_FOR:
1819 case GIMPLE_OMP_SECTIONS:
1820 case GIMPLE_OMP_SINGLE:
1821 case GIMPLE_CALL:
1822 for (; ctx != NULL; ctx = ctx->outer)
1823 switch (gimple_code (ctx->stmt))
1825 case GIMPLE_OMP_FOR:
1826 case GIMPLE_OMP_SECTIONS:
1827 case GIMPLE_OMP_SINGLE:
1828 case GIMPLE_OMP_ORDERED:
1829 case GIMPLE_OMP_MASTER:
1830 case GIMPLE_OMP_TASK:
1831 if (is_gimple_call (stmt))
1833 warning (0, "barrier region may not be closely nested inside "
1834 "of work-sharing, critical, ordered, master or "
1835 "explicit task region");
1836 return;
1838 warning (0, "work-sharing region may not be closely nested inside "
1839 "of work-sharing, critical, ordered, master or explicit "
1840 "task region");
1841 return;
1842 case GIMPLE_OMP_PARALLEL:
1843 return;
1844 default:
1845 break;
1847 break;
1848 case GIMPLE_OMP_MASTER:
1849 for (; ctx != NULL; ctx = ctx->outer)
1850 switch (gimple_code (ctx->stmt))
1852 case GIMPLE_OMP_FOR:
1853 case GIMPLE_OMP_SECTIONS:
1854 case GIMPLE_OMP_SINGLE:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "master region may not be closely nested inside "
1857 "of work-sharing or explicit task region");
1858 return;
1859 case GIMPLE_OMP_PARALLEL:
1860 return;
1861 default:
1862 break;
1864 break;
1865 case GIMPLE_OMP_ORDERED:
1866 for (; ctx != NULL; ctx = ctx->outer)
1867 switch (gimple_code (ctx->stmt))
1869 case GIMPLE_OMP_CRITICAL:
1870 case GIMPLE_OMP_TASK:
1871 warning (0, "ordered region may not be closely nested inside "
1872 "of critical or explicit task region");
1873 return;
1874 case GIMPLE_OMP_FOR:
1875 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1876 OMP_CLAUSE_ORDERED) == NULL)
1877 warning (0, "ordered region must be closely nested inside "
1878 "a loop region with an ordered clause");
1879 return;
1880 case GIMPLE_OMP_PARALLEL:
1881 return;
1882 default:
1883 break;
1885 break;
1886 case GIMPLE_OMP_CRITICAL:
1887 for (; ctx != NULL; ctx = ctx->outer)
1888 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1889 && (gimple_omp_critical_name (stmt)
1890 == gimple_omp_critical_name (ctx->stmt)))
1892 warning (0, "critical region may not be nested inside a critical "
1893 "region with the same name");
1894 return;
1896 break;
1897 default:
1898 break;
1903 /* Helper function scan_omp.
1905 Callback for walk_tree or operators in walk_gimple_stmt used to
1906 scan for OpenMP directives in TP. */
1908 static tree
1909 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1911 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1912 omp_context *ctx = (omp_context *) wi->info;
1913 tree t = *tp;
1915 switch (TREE_CODE (t))
1917 case VAR_DECL:
1918 case PARM_DECL:
1919 case LABEL_DECL:
1920 case RESULT_DECL:
1921 if (ctx)
1922 *tp = remap_decl (t, &ctx->cb);
1923 break;
1925 default:
1926 if (ctx && TYPE_P (t))
1927 *tp = remap_type (t, &ctx->cb);
1928 else if (!DECL_P (t))
1930 *walk_subtrees = 1;
1931 if (ctx)
1933 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1934 if (tem != TREE_TYPE (t))
1936 if (TREE_CODE (t) == INTEGER_CST)
1937 *tp = build_int_cst_wide (tem,
1938 TREE_INT_CST_LOW (t),
1939 TREE_INT_CST_HIGH (t));
1940 else
1941 TREE_TYPE (t) = tem;
1945 break;
1948 return NULL_TREE;
1952 /* Helper function for scan_omp.
1954 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1955 the current statement in GSI. */
1957 static tree
1958 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1959 struct walk_stmt_info *wi)
1961 gimple stmt = gsi_stmt (*gsi);
1962 omp_context *ctx = (omp_context *) wi->info;
1964 if (gimple_has_location (stmt))
1965 input_location = gimple_location (stmt);
1967 /* Check the OpenMP nesting restrictions. */
1968 if (ctx != NULL)
1970 if (is_gimple_omp (stmt))
1971 check_omp_nesting_restrictions (stmt, ctx);
1972 else if (is_gimple_call (stmt))
1974 tree fndecl = gimple_call_fndecl (stmt);
1975 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1976 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1977 check_omp_nesting_restrictions (stmt, ctx);
1981 *handled_ops_p = true;
1983 switch (gimple_code (stmt))
1985 case GIMPLE_OMP_PARALLEL:
1986 taskreg_nesting_level++;
1987 scan_omp_parallel (gsi, ctx);
1988 taskreg_nesting_level--;
1989 break;
1991 case GIMPLE_OMP_TASK:
1992 taskreg_nesting_level++;
1993 scan_omp_task (gsi, ctx);
1994 taskreg_nesting_level--;
1995 break;
1997 case GIMPLE_OMP_FOR:
1998 scan_omp_for (stmt, ctx);
1999 break;
2001 case GIMPLE_OMP_SECTIONS:
2002 scan_omp_sections (stmt, ctx);
2003 break;
2005 case GIMPLE_OMP_SINGLE:
2006 scan_omp_single (stmt, ctx);
2007 break;
2009 case GIMPLE_OMP_SECTION:
2010 case GIMPLE_OMP_MASTER:
2011 case GIMPLE_OMP_ORDERED:
2012 case GIMPLE_OMP_CRITICAL:
2013 ctx = new_omp_context (stmt, ctx);
2014 scan_omp (gimple_omp_body (stmt), ctx);
2015 break;
2017 case GIMPLE_BIND:
2019 tree var;
2021 *handled_ops_p = false;
2022 if (ctx)
2023 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2024 insert_decl_map (&ctx->cb, var, var);
2026 break;
2027 default:
2028 *handled_ops_p = false;
2029 break;
2032 return NULL_TREE;
2036 /* Scan all the statements starting at the current statement. CTX
2037 contains context information about the OpenMP directives and
2038 clauses found during the scan. */
2040 static void
2041 scan_omp (gimple_seq body, omp_context *ctx)
2043 location_t saved_location;
2044 struct walk_stmt_info wi;
2046 memset (&wi, 0, sizeof (wi));
2047 wi.info = ctx;
2048 wi.want_locations = true;
2050 saved_location = input_location;
2051 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2052 input_location = saved_location;
2055 /* Re-gimplification and code generation routines. */
2057 /* Build a call to GOMP_barrier. */
2059 static tree
2060 build_omp_barrier (void)
2062 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2065 /* If a context was created for STMT when it was scanned, return it. */
2067 static omp_context *
2068 maybe_lookup_ctx (gimple stmt)
2070 splay_tree_node n;
2071 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2072 return n ? (omp_context *) n->value : NULL;
2076 /* Find the mapping for DECL in CTX or the immediately enclosing
2077 context that has a mapping for DECL.
2079 If CTX is a nested parallel directive, we may have to use the decl
2080 mappings created in CTX's parent context. Suppose that we have the
2081 following parallel nesting (variable UIDs showed for clarity):
2083 iD.1562 = 0;
2084 #omp parallel shared(iD.1562) -> outer parallel
2085 iD.1562 = iD.1562 + 1;
2087 #omp parallel shared (iD.1562) -> inner parallel
2088 iD.1562 = iD.1562 - 1;
2090 Each parallel structure will create a distinct .omp_data_s structure
2091 for copying iD.1562 in/out of the directive:
2093 outer parallel .omp_data_s.1.i -> iD.1562
2094 inner parallel .omp_data_s.2.i -> iD.1562
2096 A shared variable mapping will produce a copy-out operation before
2097 the parallel directive and a copy-in operation after it. So, in
2098 this case we would have:
2100 iD.1562 = 0;
2101 .omp_data_o.1.i = iD.1562;
2102 #omp parallel shared(iD.1562) -> outer parallel
2103 .omp_data_i.1 = &.omp_data_o.1
2104 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106 .omp_data_o.2.i = iD.1562; -> **
2107 #omp parallel shared(iD.1562) -> inner parallel
2108 .omp_data_i.2 = &.omp_data_o.2
2109 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2112 ** This is a problem. The symbol iD.1562 cannot be referenced
2113 inside the body of the outer parallel region. But since we are
2114 emitting this copy operation while expanding the inner parallel
2115 directive, we need to access the CTX structure of the outer
2116 parallel directive to get the correct mapping:
2118 .omp_data_o.2.i = .omp_data_i.1->i
2120 Since there may be other workshare or parallel directives enclosing
2121 the parallel directive, it may be necessary to walk up the context
2122 parent chain. This is not a problem in general because nested
2123 parallelism happens only rarely. */
2125 static tree
2126 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2128 tree t;
2129 omp_context *up;
2131 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2132 t = maybe_lookup_decl (decl, up);
2134 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2136 return t ? t : decl;
2140 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2141 in outer contexts. */
2143 static tree
2144 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2146 tree t = NULL;
2147 omp_context *up;
2149 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2150 t = maybe_lookup_decl (decl, up);
2152 return t ? t : decl;
2156 /* Construct the initialization value for reduction CLAUSE. */
2158 tree
2159 omp_reduction_init (tree clause, tree type)
2161 location_t loc = OMP_CLAUSE_LOCATION (clause);
2162 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2164 case PLUS_EXPR:
2165 case MINUS_EXPR:
2166 case BIT_IOR_EXPR:
2167 case BIT_XOR_EXPR:
2168 case TRUTH_OR_EXPR:
2169 case TRUTH_ORIF_EXPR:
2170 case TRUTH_XOR_EXPR:
2171 case NE_EXPR:
2172 return build_zero_cst (type);
2174 case MULT_EXPR:
2175 case TRUTH_AND_EXPR:
2176 case TRUTH_ANDIF_EXPR:
2177 case EQ_EXPR:
2178 return fold_convert_loc (loc, type, integer_one_node);
2180 case BIT_AND_EXPR:
2181 return fold_convert_loc (loc, type, integer_minus_one_node);
2183 case MAX_EXPR:
2184 if (SCALAR_FLOAT_TYPE_P (type))
2186 REAL_VALUE_TYPE max, min;
2187 if (HONOR_INFINITIES (TYPE_MODE (type)))
2189 real_inf (&max);
2190 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2192 else
2193 real_maxval (&min, 1, TYPE_MODE (type));
2194 return build_real (type, min);
2196 else
2198 gcc_assert (INTEGRAL_TYPE_P (type));
2199 return TYPE_MIN_VALUE (type);
2202 case MIN_EXPR:
2203 if (SCALAR_FLOAT_TYPE_P (type))
2205 REAL_VALUE_TYPE max;
2206 if (HONOR_INFINITIES (TYPE_MODE (type)))
2207 real_inf (&max);
2208 else
2209 real_maxval (&max, 0, TYPE_MODE (type));
2210 return build_real (type, max);
2212 else
2214 gcc_assert (INTEGRAL_TYPE_P (type));
2215 return TYPE_MAX_VALUE (type);
2218 default:
2219 gcc_unreachable ();
2223 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2224 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2225 private variables. Initialization statements go in ILIST, while calls
2226 to destructors go in DLIST. */
2228 static void
2229 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2230 omp_context *ctx)
2232 gimple_stmt_iterator diter;
2233 tree c, dtor, copyin_seq, x, ptr;
2234 bool copyin_by_ref = false;
2235 bool lastprivate_firstprivate = false;
2236 int pass;
2238 *dlist = gimple_seq_alloc ();
2239 diter = gsi_start (*dlist);
2240 copyin_seq = NULL;
2242 /* Do all the fixed sized types in the first pass, and the variable sized
2243 types in the second pass. This makes sure that the scalar arguments to
2244 the variable sized types are processed before we use them in the
2245 variable sized operations. */
2246 for (pass = 0; pass < 2; ++pass)
2248 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2250 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2251 tree var, new_var;
2252 bool by_ref;
2253 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2255 switch (c_kind)
2257 case OMP_CLAUSE_PRIVATE:
2258 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2259 continue;
2260 break;
2261 case OMP_CLAUSE_SHARED:
2262 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2264 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2265 continue;
2267 case OMP_CLAUSE_FIRSTPRIVATE:
2268 case OMP_CLAUSE_COPYIN:
2269 case OMP_CLAUSE_REDUCTION:
2270 break;
2271 case OMP_CLAUSE_LASTPRIVATE:
2272 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2274 lastprivate_firstprivate = true;
2275 if (pass != 0)
2276 continue;
2278 break;
2279 default:
2280 continue;
2283 new_var = var = OMP_CLAUSE_DECL (c);
2284 if (c_kind != OMP_CLAUSE_COPYIN)
2285 new_var = lookup_decl (var, ctx);
2287 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2289 if (pass != 0)
2290 continue;
2292 else if (is_variable_sized (var))
2294 /* For variable sized types, we need to allocate the
2295 actual storage here. Call alloca and store the
2296 result in the pointer decl that we created elsewhere. */
2297 if (pass == 0)
2298 continue;
2300 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2302 gimple stmt;
2303 tree tmp;
2305 ptr = DECL_VALUE_EXPR (new_var);
2306 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2307 ptr = TREE_OPERAND (ptr, 0);
2308 gcc_assert (DECL_P (ptr));
2309 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2311 /* void *tmp = __builtin_alloca */
2312 stmt
2313 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2314 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2315 gimple_add_tmp_var (tmp);
2316 gimple_call_set_lhs (stmt, tmp);
2318 gimple_seq_add_stmt (ilist, stmt);
2320 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2321 gimplify_assign (ptr, x, ilist);
2324 else if (is_reference (var))
2326 /* For references that are being privatized for Fortran,
2327 allocate new backing storage for the new pointer
2328 variable. This allows us to avoid changing all the
2329 code that expects a pointer to something that expects
2330 a direct variable. Note that this doesn't apply to
2331 C++, since reference types are disallowed in data
2332 sharing clauses there, except for NRV optimized
2333 return values. */
2334 if (pass == 0)
2335 continue;
2337 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2338 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2340 x = build_receiver_ref (var, false, ctx);
2341 x = build_fold_addr_expr_loc (clause_loc, x);
2343 else if (TREE_CONSTANT (x))
2345 const char *name = NULL;
2346 if (DECL_NAME (var))
2347 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2349 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2350 name);
2351 gimple_add_tmp_var (x);
2352 TREE_ADDRESSABLE (x) = 1;
2353 x = build_fold_addr_expr_loc (clause_loc, x);
2355 else
2357 x = build_call_expr_loc (clause_loc,
2358 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2361 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2362 gimplify_assign (new_var, x, ilist);
2364 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2366 else if (c_kind == OMP_CLAUSE_REDUCTION
2367 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2369 if (pass == 0)
2370 continue;
2372 else if (pass != 0)
2373 continue;
2375 switch (OMP_CLAUSE_CODE (c))
2377 case OMP_CLAUSE_SHARED:
2378 /* Shared global vars are just accessed directly. */
2379 if (is_global_var (new_var))
2380 break;
2381 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2382 needs to be delayed until after fixup_child_record_type so
2383 that we get the correct type during the dereference. */
2384 by_ref = use_pointer_for_field (var, ctx);
2385 x = build_receiver_ref (var, by_ref, ctx);
2386 SET_DECL_VALUE_EXPR (new_var, x);
2387 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2389 /* ??? If VAR is not passed by reference, and the variable
2390 hasn't been initialized yet, then we'll get a warning for
2391 the store into the omp_data_s structure. Ideally, we'd be
2392 able to notice this and not store anything at all, but
2393 we're generating code too early. Suppress the warning. */
2394 if (!by_ref)
2395 TREE_NO_WARNING (var) = 1;
2396 break;
2398 case OMP_CLAUSE_LASTPRIVATE:
2399 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2400 break;
2401 /* FALLTHRU */
2403 case OMP_CLAUSE_PRIVATE:
2404 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2405 x = build_outer_var_ref (var, ctx);
2406 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2408 if (is_task_ctx (ctx))
2409 x = build_receiver_ref (var, false, ctx);
2410 else
2411 x = build_outer_var_ref (var, ctx);
2413 else
2414 x = NULL;
2415 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2416 if (x)
2417 gimplify_and_add (x, ilist);
2418 /* FALLTHRU */
2420 do_dtor:
2421 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2422 if (x)
2424 gimple_seq tseq = NULL;
2426 dtor = x;
2427 gimplify_stmt (&dtor, &tseq);
2428 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2430 break;
2432 case OMP_CLAUSE_FIRSTPRIVATE:
2433 if (is_task_ctx (ctx))
2435 if (is_reference (var) || is_variable_sized (var))
2436 goto do_dtor;
2437 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2438 ctx))
2439 || use_pointer_for_field (var, NULL))
2441 x = build_receiver_ref (var, false, ctx);
2442 SET_DECL_VALUE_EXPR (new_var, x);
2443 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2444 goto do_dtor;
2447 x = build_outer_var_ref (var, ctx);
2448 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2449 gimplify_and_add (x, ilist);
2450 goto do_dtor;
2451 break;
2453 case OMP_CLAUSE_COPYIN:
2454 by_ref = use_pointer_for_field (var, NULL);
2455 x = build_receiver_ref (var, by_ref, ctx);
2456 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2457 append_to_statement_list (x, &copyin_seq);
2458 copyin_by_ref |= by_ref;
2459 break;
2461 case OMP_CLAUSE_REDUCTION:
2462 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2464 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2465 x = build_outer_var_ref (var, ctx);
2467 if (is_reference (var))
2468 x = build_fold_addr_expr_loc (clause_loc, x);
2469 SET_DECL_VALUE_EXPR (placeholder, x);
2470 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2471 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2472 gimple_seq_add_seq (ilist,
2473 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2475 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2477 else
2479 x = omp_reduction_init (c, TREE_TYPE (new_var));
2480 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2481 gimplify_assign (new_var, x, ilist);
2483 break;
2485 default:
2486 gcc_unreachable ();
2491 /* The copyin sequence is not to be executed by the main thread, since
2492 that would result in self-copies. Perhaps not visible to scalars,
2493 but it certainly is to C++ operator=. */
2494 if (copyin_seq)
2496 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2497 x = build2 (NE_EXPR, boolean_type_node, x,
2498 build_int_cst (TREE_TYPE (x), 0));
2499 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2500 gimplify_and_add (x, ilist);
2503 /* If any copyin variable is passed by reference, we must ensure the
2504 master thread doesn't modify it before it is copied over in all
2505 threads. Similarly for variables in both firstprivate and
2506 lastprivate clauses we need to ensure the lastprivate copying
2507 happens after firstprivate copying in all threads. */
2508 if (copyin_by_ref || lastprivate_firstprivate)
2509 gimplify_and_add (build_omp_barrier (), ilist);
2513 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2514 both parallel and workshare constructs. PREDICATE may be NULL if it's
2515 always true. */
2517 static void
2518 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2519 omp_context *ctx)
2521 tree x, c, label = NULL;
2522 bool par_clauses = false;
2524 /* Early exit if there are no lastprivate clauses. */
2525 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2526 if (clauses == NULL)
2528 /* If this was a workshare clause, see if it had been combined
2529 with its parallel. In that case, look for the clauses on the
2530 parallel statement itself. */
2531 if (is_parallel_ctx (ctx))
2532 return;
2534 ctx = ctx->outer;
2535 if (ctx == NULL || !is_parallel_ctx (ctx))
2536 return;
2538 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2539 OMP_CLAUSE_LASTPRIVATE);
2540 if (clauses == NULL)
2541 return;
2542 par_clauses = true;
2545 if (predicate)
2547 gimple stmt;
2548 tree label_true, arm1, arm2;
2550 label = create_artificial_label (UNKNOWN_LOCATION);
2551 label_true = create_artificial_label (UNKNOWN_LOCATION);
2552 arm1 = TREE_OPERAND (predicate, 0);
2553 arm2 = TREE_OPERAND (predicate, 1);
2554 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2555 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2556 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2557 label_true, label);
2558 gimple_seq_add_stmt (stmt_list, stmt);
2559 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2562 for (c = clauses; c ;)
2564 tree var, new_var;
2565 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2567 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2569 var = OMP_CLAUSE_DECL (c);
2570 new_var = lookup_decl (var, ctx);
2572 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2574 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2575 gimple_seq_add_seq (stmt_list,
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2578 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2580 x = build_outer_var_ref (var, ctx);
2581 if (is_reference (var))
2582 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2583 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2584 gimplify_and_add (x, stmt_list);
2586 c = OMP_CLAUSE_CHAIN (c);
2587 if (c == NULL && !par_clauses)
2589 /* If this was a workshare clause, see if it had been combined
2590 with its parallel. In that case, continue looking for the
2591 clauses also on the parallel statement itself. */
2592 if (is_parallel_ctx (ctx))
2593 break;
2595 ctx = ctx->outer;
2596 if (ctx == NULL || !is_parallel_ctx (ctx))
2597 break;
2599 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2600 OMP_CLAUSE_LASTPRIVATE);
2601 par_clauses = true;
2605 if (label)
2606 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2610 /* Generate code to implement the REDUCTION clauses. */
2612 static void
2613 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2615 gimple_seq sub_seq = NULL;
2616 gimple stmt;
2617 tree x, c;
2618 int count = 0;
2620 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2621 update in that case, otherwise use a lock. */
2622 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2623 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2625 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2627 /* Never use OMP_ATOMIC for array reductions. */
2628 count = -1;
2629 break;
2631 count++;
2634 if (count == 0)
2635 return;
2637 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2639 tree var, ref, new_var;
2640 enum tree_code code;
2641 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2643 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2644 continue;
2646 var = OMP_CLAUSE_DECL (c);
2647 new_var = lookup_decl (var, ctx);
2648 if (is_reference (var))
2649 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2650 ref = build_outer_var_ref (var, ctx);
2651 code = OMP_CLAUSE_REDUCTION_CODE (c);
2653 /* reduction(-:var) sums up the partial results, so it acts
2654 identically to reduction(+:var). */
2655 if (code == MINUS_EXPR)
2656 code = PLUS_EXPR;
2658 if (count == 1)
2660 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2662 addr = save_expr (addr);
2663 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2664 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2665 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2666 gimplify_and_add (x, stmt_seqp);
2667 return;
2670 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2672 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2674 if (is_reference (var))
2675 ref = build_fold_addr_expr_loc (clause_loc, ref);
2676 SET_DECL_VALUE_EXPR (placeholder, ref);
2677 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2678 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2679 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2680 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2681 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2683 else
2685 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2686 ref = build_outer_var_ref (var, ctx);
2687 gimplify_assign (ref, x, &sub_seq);
2691 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2692 gimple_seq_add_stmt (stmt_seqp, stmt);
2694 gimple_seq_add_seq (stmt_seqp, sub_seq);
2696 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2697 gimple_seq_add_stmt (stmt_seqp, stmt);
2701 /* Generate code to implement the COPYPRIVATE clauses. */
2703 static void
2704 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2705 omp_context *ctx)
2707 tree c;
2709 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2711 tree var, new_var, ref, x;
2712 bool by_ref;
2713 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2715 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2716 continue;
2718 var = OMP_CLAUSE_DECL (c);
2719 by_ref = use_pointer_for_field (var, NULL);
2721 ref = build_sender_ref (var, ctx);
2722 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2723 if (by_ref)
2725 x = build_fold_addr_expr_loc (clause_loc, new_var);
2726 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2728 gimplify_assign (ref, x, slist);
2730 ref = build_receiver_ref (var, false, ctx);
2731 if (by_ref)
2733 ref = fold_convert_loc (clause_loc,
2734 build_pointer_type (TREE_TYPE (new_var)),
2735 ref);
2736 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2738 if (is_reference (var))
2740 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2741 ref = build_simple_mem_ref_loc (clause_loc, ref);
2742 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2744 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2745 gimplify_and_add (x, rlist);
2750 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2751 and REDUCTION from the sender (aka parent) side. */
2753 static void
2754 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2755 omp_context *ctx)
2757 tree c;
2759 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2761 tree val, ref, x, var;
2762 bool by_ref, do_in = false, do_out = false;
2763 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2765 switch (OMP_CLAUSE_CODE (c))
2767 case OMP_CLAUSE_PRIVATE:
2768 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2769 break;
2770 continue;
2771 case OMP_CLAUSE_FIRSTPRIVATE:
2772 case OMP_CLAUSE_COPYIN:
2773 case OMP_CLAUSE_LASTPRIVATE:
2774 case OMP_CLAUSE_REDUCTION:
2775 break;
2776 default:
2777 continue;
2780 val = OMP_CLAUSE_DECL (c);
2781 var = lookup_decl_in_outer_ctx (val, ctx);
2783 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2784 && is_global_var (var))
2785 continue;
2786 if (is_variable_sized (val))
2787 continue;
2788 by_ref = use_pointer_for_field (val, NULL);
2790 switch (OMP_CLAUSE_CODE (c))
2792 case OMP_CLAUSE_PRIVATE:
2793 case OMP_CLAUSE_FIRSTPRIVATE:
2794 case OMP_CLAUSE_COPYIN:
2795 do_in = true;
2796 break;
2798 case OMP_CLAUSE_LASTPRIVATE:
2799 if (by_ref || is_reference (val))
2801 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2802 continue;
2803 do_in = true;
2805 else
2807 do_out = true;
2808 if (lang_hooks.decls.omp_private_outer_ref (val))
2809 do_in = true;
2811 break;
2813 case OMP_CLAUSE_REDUCTION:
2814 do_in = true;
2815 do_out = !(by_ref || is_reference (val));
2816 break;
2818 default:
2819 gcc_unreachable ();
2822 if (do_in)
2824 ref = build_sender_ref (val, ctx);
2825 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2826 gimplify_assign (ref, x, ilist);
2827 if (is_task_ctx (ctx))
2828 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2831 if (do_out)
2833 ref = build_sender_ref (val, ctx);
2834 gimplify_assign (var, ref, olist);
2839 /* Generate code to implement SHARED from the sender (aka parent)
2840 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2841 list things that got automatically shared. */
2843 static void
2844 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2846 tree var, ovar, nvar, f, x, record_type;
2848 if (ctx->record_type == NULL)
2849 return;
2851 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2852 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2854 ovar = DECL_ABSTRACT_ORIGIN (f);
2855 nvar = maybe_lookup_decl (ovar, ctx);
2856 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2857 continue;
2859 /* If CTX is a nested parallel directive. Find the immediately
2860 enclosing parallel or workshare construct that contains a
2861 mapping for OVAR. */
2862 var = lookup_decl_in_outer_ctx (ovar, ctx);
2864 if (use_pointer_for_field (ovar, ctx))
2866 x = build_sender_ref (ovar, ctx);
2867 var = build_fold_addr_expr (var);
2868 gimplify_assign (x, var, ilist);
2870 else
2872 x = build_sender_ref (ovar, ctx);
2873 gimplify_assign (x, var, ilist);
2875 if (!TREE_READONLY (var)
2876 /* We don't need to receive a new reference to a result
2877 or parm decl. In fact we may not store to it as we will
2878 invalidate any pending RSO and generate wrong gimple
2879 during inlining. */
2880 && !((TREE_CODE (var) == RESULT_DECL
2881 || TREE_CODE (var) == PARM_DECL)
2882 && DECL_BY_REFERENCE (var)))
2884 x = build_sender_ref (ovar, ctx);
2885 gimplify_assign (var, x, olist);
2892 /* A convenience function to build an empty GIMPLE_COND with just the
2893 condition. */
2895 static gimple
2896 gimple_build_cond_empty (tree cond)
2898 enum tree_code pred_code;
2899 tree lhs, rhs;
2901 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2902 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2906 /* Build the function calls to GOMP_parallel_start etc to actually
2907 generate the parallel operation. REGION is the parallel region
2908 being expanded. BB is the block where to insert the code. WS_ARGS
2909 will be set if this is a call to a combined parallel+workshare
2910 construct, it contains the list of additional arguments needed by
2911 the workshare construct. */
2913 static void
2914 expand_parallel_call (struct omp_region *region, basic_block bb,
2915 gimple entry_stmt, VEC(tree,gc) *ws_args)
2917 tree t, t1, t2, val, cond, c, clauses;
2918 gimple_stmt_iterator gsi;
2919 gimple stmt;
2920 int start_ix;
2921 location_t clause_loc;
2922 VEC(tree,gc) *args;
2924 clauses = gimple_omp_parallel_clauses (entry_stmt);
2926 /* Determine what flavor of GOMP_parallel_start we will be
2927 emitting. */
2928 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2929 if (is_combined_parallel (region))
2931 switch (region->inner->type)
2933 case GIMPLE_OMP_FOR:
2934 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2935 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2936 + (region->inner->sched_kind
2937 == OMP_CLAUSE_SCHEDULE_RUNTIME
2938 ? 3 : region->inner->sched_kind);
2939 break;
2940 case GIMPLE_OMP_SECTIONS:
2941 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2942 break;
2943 default:
2944 gcc_unreachable ();
2948 /* By default, the value of NUM_THREADS is zero (selected at run time)
2949 and there is no conditional. */
2950 cond = NULL_TREE;
2951 val = build_int_cst (unsigned_type_node, 0);
2953 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2954 if (c)
2955 cond = OMP_CLAUSE_IF_EXPR (c);
2957 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2958 if (c)
2960 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2961 clause_loc = OMP_CLAUSE_LOCATION (c);
2963 else
2964 clause_loc = gimple_location (entry_stmt);
2966 /* Ensure 'val' is of the correct type. */
2967 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2969 /* If we found the clause 'if (cond)', build either
2970 (cond != 0) or (cond ? val : 1u). */
2971 if (cond)
2973 gimple_stmt_iterator gsi;
2975 cond = gimple_boolify (cond);
2977 if (integer_zerop (val))
2978 val = fold_build2_loc (clause_loc,
2979 EQ_EXPR, unsigned_type_node, cond,
2980 build_int_cst (TREE_TYPE (cond), 0));
2981 else
2983 basic_block cond_bb, then_bb, else_bb;
2984 edge e, e_then, e_else;
2985 tree tmp_then, tmp_else, tmp_join, tmp_var;
2987 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2988 if (gimple_in_ssa_p (cfun))
2990 tmp_then = make_ssa_name (tmp_var, NULL);
2991 tmp_else = make_ssa_name (tmp_var, NULL);
2992 tmp_join = make_ssa_name (tmp_var, NULL);
2994 else
2996 tmp_then = tmp_var;
2997 tmp_else = tmp_var;
2998 tmp_join = tmp_var;
3001 e = split_block (bb, NULL);
3002 cond_bb = e->src;
3003 bb = e->dest;
3004 remove_edge (e);
3006 then_bb = create_empty_bb (cond_bb);
3007 else_bb = create_empty_bb (then_bb);
3008 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3009 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3011 stmt = gimple_build_cond_empty (cond);
3012 gsi = gsi_start_bb (cond_bb);
3013 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3015 gsi = gsi_start_bb (then_bb);
3016 stmt = gimple_build_assign (tmp_then, val);
3017 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3019 gsi = gsi_start_bb (else_bb);
3020 stmt = gimple_build_assign
3021 (tmp_else, build_int_cst (unsigned_type_node, 1));
3022 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3024 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3025 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3026 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3027 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3029 if (gimple_in_ssa_p (cfun))
3031 gimple phi = create_phi_node (tmp_join, bb);
3032 SSA_NAME_DEF_STMT (tmp_join) = phi;
3033 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3034 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3037 val = tmp_join;
3040 gsi = gsi_start_bb (bb);
3041 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3042 false, GSI_CONTINUE_LINKING);
3045 gsi = gsi_last_bb (bb);
3046 t = gimple_omp_parallel_data_arg (entry_stmt);
3047 if (t == NULL)
3048 t1 = null_pointer_node;
3049 else
3050 t1 = build_fold_addr_expr (t);
3051 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3053 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3054 VEC_quick_push (tree, args, t2);
3055 VEC_quick_push (tree, args, t1);
3056 VEC_quick_push (tree, args, val);
3057 VEC_splice (tree, args, ws_args);
3059 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3060 built_in_decls[start_ix], args);
3062 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3063 false, GSI_CONTINUE_LINKING);
3065 t = gimple_omp_parallel_data_arg (entry_stmt);
3066 if (t == NULL)
3067 t = null_pointer_node;
3068 else
3069 t = build_fold_addr_expr (t);
3070 t = build_call_expr_loc (gimple_location (entry_stmt),
3071 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3072 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3073 false, GSI_CONTINUE_LINKING);
3075 t = build_call_expr_loc (gimple_location (entry_stmt),
3076 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3077 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3078 false, GSI_CONTINUE_LINKING);
3082 /* Build the function call to GOMP_task to actually
3083 generate the task operation. BB is the block where to insert the code. */
3085 static void
3086 expand_task_call (basic_block bb, gimple entry_stmt)
3088 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3089 gimple_stmt_iterator gsi;
3090 location_t loc = gimple_location (entry_stmt);
3092 clauses = gimple_omp_task_clauses (entry_stmt);
3094 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3095 if (c)
3096 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3097 else
3098 cond = boolean_true_node;
3100 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3101 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3102 flags = build_int_cst (unsigned_type_node,
3103 (c ? 1 : 0) + (c2 ? 4 : 0));
3105 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3106 if (c)
3108 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3109 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3110 build_int_cst (unsigned_type_node, 2),
3111 build_int_cst (unsigned_type_node, 0));
3112 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3115 gsi = gsi_last_bb (bb);
3116 t = gimple_omp_task_data_arg (entry_stmt);
3117 if (t == NULL)
3118 t2 = null_pointer_node;
3119 else
3120 t2 = build_fold_addr_expr_loc (loc, t);
3121 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3122 t = gimple_omp_task_copy_fn (entry_stmt);
3123 if (t == NULL)
3124 t3 = null_pointer_node;
3125 else
3126 t3 = build_fold_addr_expr_loc (loc, t);
3128 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3129 gimple_omp_task_arg_size (entry_stmt),
3130 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3132 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3133 false, GSI_CONTINUE_LINKING);
3137 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3138 catch handler and return it. This prevents programs from violating the
3139 structured block semantics with throws. */
3141 static gimple_seq
3142 maybe_catch_exception (gimple_seq body)
3144 gimple g;
3145 tree decl;
3147 if (!flag_exceptions)
3148 return body;
3150 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3151 decl = lang_hooks.eh_protect_cleanup_actions ();
3152 else
3153 decl = built_in_decls[BUILT_IN_TRAP];
3155 g = gimple_build_eh_must_not_throw (decl);
3156 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3157 GIMPLE_TRY_CATCH);
3159 return gimple_seq_alloc_with_stmt (g);
3162 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3164 static tree
3165 vec2chain (VEC(tree,gc) *v)
3167 tree chain = NULL_TREE, t;
3168 unsigned ix;
3170 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3172 DECL_CHAIN (t) = chain;
3173 chain = t;
3176 return chain;
3180 /* Remove barriers in REGION->EXIT's block. Note that this is only
3181 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3182 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3183 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3184 removed. */
3186 static void
3187 remove_exit_barrier (struct omp_region *region)
3189 gimple_stmt_iterator gsi;
3190 basic_block exit_bb;
3191 edge_iterator ei;
3192 edge e;
3193 gimple stmt;
3194 int any_addressable_vars = -1;
3196 exit_bb = region->exit;
3198 /* If the parallel region doesn't return, we don't have REGION->EXIT
3199 block at all. */
3200 if (! exit_bb)
3201 return;
3203 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3204 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3205 statements that can appear in between are extremely limited -- no
3206 memory operations at all. Here, we allow nothing at all, so the
3207 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3208 gsi = gsi_last_bb (exit_bb);
3209 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3210 gsi_prev (&gsi);
3211 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3212 return;
3214 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3216 gsi = gsi_last_bb (e->src);
3217 if (gsi_end_p (gsi))
3218 continue;
3219 stmt = gsi_stmt (gsi);
3220 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3221 && !gimple_omp_return_nowait_p (stmt))
3223 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3224 in many cases. If there could be tasks queued, the barrier
3225 might be needed to let the tasks run before some local
3226 variable of the parallel that the task uses as shared
3227 runs out of scope. The task can be spawned either
3228 from within current function (this would be easy to check)
3229 or from some function it calls and gets passed an address
3230 of such a variable. */
3231 if (any_addressable_vars < 0)
3233 gimple parallel_stmt = last_stmt (region->entry);
3234 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3235 tree local_decls, block, decl;
3236 unsigned ix;
3238 any_addressable_vars = 0;
3239 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3240 if (TREE_ADDRESSABLE (decl))
3242 any_addressable_vars = 1;
3243 break;
3245 for (block = gimple_block (stmt);
3246 !any_addressable_vars
3247 && block
3248 && TREE_CODE (block) == BLOCK;
3249 block = BLOCK_SUPERCONTEXT (block))
3251 for (local_decls = BLOCK_VARS (block);
3252 local_decls;
3253 local_decls = DECL_CHAIN (local_decls))
3254 if (TREE_ADDRESSABLE (local_decls))
3256 any_addressable_vars = 1;
3257 break;
3259 if (block == gimple_block (parallel_stmt))
3260 break;
3263 if (!any_addressable_vars)
3264 gimple_omp_return_set_nowait (stmt);
3269 static void
3270 remove_exit_barriers (struct omp_region *region)
3272 if (region->type == GIMPLE_OMP_PARALLEL)
3273 remove_exit_barrier (region);
3275 if (region->inner)
3277 region = region->inner;
3278 remove_exit_barriers (region);
3279 while (region->next)
3281 region = region->next;
3282 remove_exit_barriers (region);
3287 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3288 calls. These can't be declared as const functions, but
3289 within one parallel body they are constant, so they can be
3290 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3291 which are declared const. Similarly for task body, except
3292 that in untied task omp_get_thread_num () can change at any task
3293 scheduling point. */
3295 static void
3296 optimize_omp_library_calls (gimple entry_stmt)
3298 basic_block bb;
3299 gimple_stmt_iterator gsi;
3300 tree thr_num_id
3301 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3302 tree num_thr_id
3303 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3304 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3305 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3306 OMP_CLAUSE_UNTIED) != NULL);
3308 FOR_EACH_BB (bb)
3309 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3311 gimple call = gsi_stmt (gsi);
3312 tree decl;
3314 if (is_gimple_call (call)
3315 && (decl = gimple_call_fndecl (call))
3316 && DECL_EXTERNAL (decl)
3317 && TREE_PUBLIC (decl)
3318 && DECL_INITIAL (decl) == NULL)
3320 tree built_in;
3322 if (DECL_NAME (decl) == thr_num_id)
3324 /* In #pragma omp task untied omp_get_thread_num () can change
3325 during the execution of the task region. */
3326 if (untied_task)
3327 continue;
3328 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3330 else if (DECL_NAME (decl) == num_thr_id)
3331 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3332 else
3333 continue;
3335 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3336 || gimple_call_num_args (call) != 0)
3337 continue;
3339 if (flag_exceptions && !TREE_NOTHROW (decl))
3340 continue;
3342 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3343 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3344 TREE_TYPE (TREE_TYPE (built_in))))
3345 continue;
3347 gimple_call_set_fndecl (call, built_in);
3352 /* Expand the OpenMP parallel or task directive starting at REGION. */
3354 static void
3355 expand_omp_taskreg (struct omp_region *region)
3357 basic_block entry_bb, exit_bb, new_bb;
3358 struct function *child_cfun;
3359 tree child_fn, block, t;
3360 tree save_current;
3361 gimple_stmt_iterator gsi;
3362 gimple entry_stmt, stmt;
3363 edge e;
3364 VEC(tree,gc) *ws_args;
3366 entry_stmt = last_stmt (region->entry);
3367 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3368 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3369 /* If this function has been already instrumented, make sure
3370 the child function isn't instrumented again. */
3371 child_cfun->after_tree_profile = cfun->after_tree_profile;
3373 entry_bb = region->entry;
3374 exit_bb = region->exit;
3376 if (is_combined_parallel (region))
3377 ws_args = region->ws_args;
3378 else
3379 ws_args = NULL;
3381 if (child_cfun->cfg)
3383 /* Due to inlining, it may happen that we have already outlined
3384 the region, in which case all we need to do is make the
3385 sub-graph unreachable and emit the parallel call. */
3386 edge entry_succ_e, exit_succ_e;
3387 gimple_stmt_iterator gsi;
3389 entry_succ_e = single_succ_edge (entry_bb);
3391 gsi = gsi_last_bb (entry_bb);
3392 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3393 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3394 gsi_remove (&gsi, true);
3396 new_bb = entry_bb;
3397 if (exit_bb)
3399 exit_succ_e = single_succ_edge (exit_bb);
3400 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3402 remove_edge_and_dominated_blocks (entry_succ_e);
3404 else
3406 unsigned srcidx, dstidx, num;
3408 /* If the parallel region needs data sent from the parent
3409 function, then the very first statement (except possible
3410 tree profile counter updates) of the parallel body
3411 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3412 &.OMP_DATA_O is passed as an argument to the child function,
3413 we need to replace it with the argument as seen by the child
3414 function.
3416 In most cases, this will end up being the identity assignment
3417 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3418 a function call that has been inlined, the original PARM_DECL
3419 .OMP_DATA_I may have been converted into a different local
3420 variable. In which case, we need to keep the assignment. */
3421 if (gimple_omp_taskreg_data_arg (entry_stmt))
3423 basic_block entry_succ_bb = single_succ (entry_bb);
3424 gimple_stmt_iterator gsi;
3425 tree arg, narg;
3426 gimple parcopy_stmt = NULL;
3428 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3430 gimple stmt;
3432 gcc_assert (!gsi_end_p (gsi));
3433 stmt = gsi_stmt (gsi);
3434 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3435 continue;
3437 if (gimple_num_ops (stmt) == 2)
3439 tree arg = gimple_assign_rhs1 (stmt);
3441 /* We're ignore the subcode because we're
3442 effectively doing a STRIP_NOPS. */
3444 if (TREE_CODE (arg) == ADDR_EXPR
3445 && TREE_OPERAND (arg, 0)
3446 == gimple_omp_taskreg_data_arg (entry_stmt))
3448 parcopy_stmt = stmt;
3449 break;
3454 gcc_assert (parcopy_stmt != NULL);
3455 arg = DECL_ARGUMENTS (child_fn);
3457 if (!gimple_in_ssa_p (cfun))
3459 if (gimple_assign_lhs (parcopy_stmt) == arg)
3460 gsi_remove (&gsi, true);
3461 else
3463 /* ?? Is setting the subcode really necessary ?? */
3464 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3465 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3468 else
3470 /* If we are in ssa form, we must load the value from the default
3471 definition of the argument. That should not be defined now,
3472 since the argument is not used uninitialized. */
3473 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3474 narg = make_ssa_name (arg, gimple_build_nop ());
3475 set_default_def (arg, narg);
3476 /* ?? Is setting the subcode really necessary ?? */
3477 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3478 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3479 update_stmt (parcopy_stmt);
3483 /* Declare local variables needed in CHILD_CFUN. */
3484 block = DECL_INITIAL (child_fn);
3485 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3486 /* The gimplifier could record temporaries in parallel/task block
3487 rather than in containing function's local_decls chain,
3488 which would mean cgraph missed finalizing them. Do it now. */
3489 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3490 if (TREE_CODE (t) == VAR_DECL
3491 && TREE_STATIC (t)
3492 && !DECL_EXTERNAL (t))
3493 varpool_finalize_decl (t);
3494 DECL_SAVED_TREE (child_fn) = NULL;
3495 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3496 TREE_USED (block) = 1;
3498 /* Reset DECL_CONTEXT on function arguments. */
3499 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3500 DECL_CONTEXT (t) = child_fn;
3502 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3503 so that it can be moved to the child function. */
3504 gsi = gsi_last_bb (entry_bb);
3505 stmt = gsi_stmt (gsi);
3506 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3507 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3508 gsi_remove (&gsi, true);
3509 e = split_block (entry_bb, stmt);
3510 entry_bb = e->dest;
3511 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3513 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3514 if (exit_bb)
3516 gsi = gsi_last_bb (exit_bb);
3517 gcc_assert (!gsi_end_p (gsi)
3518 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3519 stmt = gimple_build_return (NULL);
3520 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3521 gsi_remove (&gsi, true);
3524 /* Move the parallel region into CHILD_CFUN. */
3526 if (gimple_in_ssa_p (cfun))
3528 push_cfun (child_cfun);
3529 init_tree_ssa (child_cfun);
3530 init_ssa_operands ();
3531 cfun->gimple_df->in_ssa_p = true;
3532 pop_cfun ();
3533 block = NULL_TREE;
3535 else
3536 block = gimple_block (entry_stmt);
3538 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3539 if (exit_bb)
3540 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3542 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3543 num = VEC_length (tree, child_cfun->local_decls);
3544 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3546 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3547 if (DECL_CONTEXT (t) == cfun->decl)
3548 continue;
3549 if (srcidx != dstidx)
3550 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3551 dstidx++;
3553 if (dstidx != num)
3554 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3556 /* Inform the callgraph about the new function. */
3557 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3558 = cfun->curr_properties;
3559 cgraph_add_new_function (child_fn, true);
3561 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3562 fixed in a following pass. */
3563 push_cfun (child_cfun);
3564 save_current = current_function_decl;
3565 current_function_decl = child_fn;
3566 if (optimize)
3567 optimize_omp_library_calls (entry_stmt);
3568 rebuild_cgraph_edges ();
3570 /* Some EH regions might become dead, see PR34608. If
3571 pass_cleanup_cfg isn't the first pass to happen with the
3572 new child, these dead EH edges might cause problems.
3573 Clean them up now. */
3574 if (flag_exceptions)
3576 basic_block bb;
3577 bool changed = false;
3579 FOR_EACH_BB (bb)
3580 changed |= gimple_purge_dead_eh_edges (bb);
3581 if (changed)
3582 cleanup_tree_cfg ();
3584 if (gimple_in_ssa_p (cfun))
3585 update_ssa (TODO_update_ssa);
3586 current_function_decl = save_current;
3587 pop_cfun ();
3590 /* Emit a library call to launch the children threads. */
3591 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3592 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3593 else
3594 expand_task_call (new_bb, entry_stmt);
3595 update_ssa (TODO_update_ssa_only_virtuals);
3599 /* A subroutine of expand_omp_for. Generate code for a parallel
3600 loop with any schedule. Given parameters:
3602 for (V = N1; V cond N2; V += STEP) BODY;
3604 where COND is "<" or ">", we generate pseudocode
3606 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3607 if (more) goto L0; else goto L3;
3609 V = istart0;
3610 iend = iend0;
3612 BODY;
3613 V += STEP;
3614 if (V cond iend) goto L1; else goto L2;
3616 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3619 If this is a combined omp parallel loop, instead of the call to
3620 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3622 For collapsed loops, given parameters:
3623 collapse(3)
3624 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3625 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3626 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3627 BODY;
3629 we generate pseudocode
3631 if (cond3 is <)
3632 adj = STEP3 - 1;
3633 else
3634 adj = STEP3 + 1;
3635 count3 = (adj + N32 - N31) / STEP3;
3636 if (cond2 is <)
3637 adj = STEP2 - 1;
3638 else
3639 adj = STEP2 + 1;
3640 count2 = (adj + N22 - N21) / STEP2;
3641 if (cond1 is <)
3642 adj = STEP1 - 1;
3643 else
3644 adj = STEP1 + 1;
3645 count1 = (adj + N12 - N11) / STEP1;
3646 count = count1 * count2 * count3;
3647 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3648 if (more) goto L0; else goto L3;
3650 V = istart0;
3651 T = V;
3652 V3 = N31 + (T % count3) * STEP3;
3653 T = T / count3;
3654 V2 = N21 + (T % count2) * STEP2;
3655 T = T / count2;
3656 V1 = N11 + T * STEP1;
3657 iend = iend0;
3659 BODY;
3660 V += 1;
3661 if (V < iend) goto L10; else goto L2;
3662 L10:
3663 V3 += STEP3;
3664 if (V3 cond3 N32) goto L1; else goto L11;
3665 L11:
3666 V3 = N31;
3667 V2 += STEP2;
3668 if (V2 cond2 N22) goto L1; else goto L12;
3669 L12:
3670 V2 = N21;
3671 V1 += STEP1;
3672 goto L1;
3674 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3679 static void
3680 expand_omp_for_generic (struct omp_region *region,
3681 struct omp_for_data *fd,
3682 enum built_in_function start_fn,
3683 enum built_in_function next_fn)
3685 tree type, istart0, iend0, iend;
3686 tree t, vmain, vback, bias = NULL_TREE;
3687 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3688 basic_block l2_bb = NULL, l3_bb = NULL;
3689 gimple_stmt_iterator gsi;
3690 gimple stmt;
3691 bool in_combined_parallel = is_combined_parallel (region);
3692 bool broken_loop = region->cont == NULL;
3693 edge e, ne;
3694 tree *counts = NULL;
3695 int i;
3697 gcc_assert (!broken_loop || !in_combined_parallel);
3698 gcc_assert (fd->iter_type == long_integer_type_node
3699 || !in_combined_parallel);
3701 type = TREE_TYPE (fd->loop.v);
3702 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3703 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3704 TREE_ADDRESSABLE (istart0) = 1;
3705 TREE_ADDRESSABLE (iend0) = 1;
3706 if (gimple_in_ssa_p (cfun))
3708 add_referenced_var (istart0);
3709 add_referenced_var (iend0);
3712 /* See if we need to bias by LLONG_MIN. */
3713 if (fd->iter_type == long_long_unsigned_type_node
3714 && TREE_CODE (type) == INTEGER_TYPE
3715 && !TYPE_UNSIGNED (type))
3717 tree n1, n2;
3719 if (fd->loop.cond_code == LT_EXPR)
3721 n1 = fd->loop.n1;
3722 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3724 else
3726 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3727 n2 = fd->loop.n1;
3729 if (TREE_CODE (n1) != INTEGER_CST
3730 || TREE_CODE (n2) != INTEGER_CST
3731 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3732 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3735 entry_bb = region->entry;
3736 cont_bb = region->cont;
3737 collapse_bb = NULL;
3738 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3739 gcc_assert (broken_loop
3740 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3741 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3742 l1_bb = single_succ (l0_bb);
3743 if (!broken_loop)
3745 l2_bb = create_empty_bb (cont_bb);
3746 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3747 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3749 else
3750 l2_bb = NULL;
3751 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3752 exit_bb = region->exit;
3754 gsi = gsi_last_bb (entry_bb);
3756 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3757 if (fd->collapse > 1)
3759 /* collapsed loops need work for expansion in SSA form. */
3760 gcc_assert (!gimple_in_ssa_p (cfun));
3761 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3762 for (i = 0; i < fd->collapse; i++)
3764 tree itype = TREE_TYPE (fd->loops[i].v);
3766 if (POINTER_TYPE_P (itype))
3767 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3768 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3769 ? -1 : 1));
3770 t = fold_build2 (PLUS_EXPR, itype,
3771 fold_convert (itype, fd->loops[i].step), t);
3772 t = fold_build2 (PLUS_EXPR, itype, t,
3773 fold_convert (itype, fd->loops[i].n2));
3774 t = fold_build2 (MINUS_EXPR, itype, t,
3775 fold_convert (itype, fd->loops[i].n1));
3776 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3777 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3778 fold_build1 (NEGATE_EXPR, itype, t),
3779 fold_build1 (NEGATE_EXPR, itype,
3780 fold_convert (itype,
3781 fd->loops[i].step)));
3782 else
3783 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3784 fold_convert (itype, fd->loops[i].step));
3785 t = fold_convert (type, t);
3786 if (TREE_CODE (t) == INTEGER_CST)
3787 counts[i] = t;
3788 else
3790 counts[i] = create_tmp_var (type, ".count");
3791 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3792 true, GSI_SAME_STMT);
3793 stmt = gimple_build_assign (counts[i], t);
3794 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3796 if (SSA_VAR_P (fd->loop.n2))
3798 if (i == 0)
3799 t = counts[0];
3800 else
3802 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3803 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3804 true, GSI_SAME_STMT);
3806 stmt = gimple_build_assign (fd->loop.n2, t);
3807 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3811 if (in_combined_parallel)
3813 /* In a combined parallel loop, emit a call to
3814 GOMP_loop_foo_next. */
3815 t = build_call_expr (built_in_decls[next_fn], 2,
3816 build_fold_addr_expr (istart0),
3817 build_fold_addr_expr (iend0));
3819 else
3821 tree t0, t1, t2, t3, t4;
3822 /* If this is not a combined parallel loop, emit a call to
3823 GOMP_loop_foo_start in ENTRY_BB. */
3824 t4 = build_fold_addr_expr (iend0);
3825 t3 = build_fold_addr_expr (istart0);
3826 t2 = fold_convert (fd->iter_type, fd->loop.step);
3827 if (POINTER_TYPE_P (type)
3828 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3830 /* Avoid casting pointers to integer of a different size. */
3831 tree itype
3832 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3833 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3834 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3836 else
3838 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3839 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3841 if (bias)
3843 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3844 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3846 if (fd->iter_type == long_integer_type_node)
3848 if (fd->chunk_size)
3850 t = fold_convert (fd->iter_type, fd->chunk_size);
3851 t = build_call_expr (built_in_decls[start_fn], 6,
3852 t0, t1, t2, t, t3, t4);
3854 else
3855 t = build_call_expr (built_in_decls[start_fn], 5,
3856 t0, t1, t2, t3, t4);
3858 else
3860 tree t5;
3861 tree c_bool_type;
3863 /* The GOMP_loop_ull_*start functions have additional boolean
3864 argument, true for < loops and false for > loops.
3865 In Fortran, the C bool type can be different from
3866 boolean_type_node. */
3867 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3868 t5 = build_int_cst (c_bool_type,
3869 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3870 if (fd->chunk_size)
3872 t = fold_convert (fd->iter_type, fd->chunk_size);
3873 t = build_call_expr (built_in_decls[start_fn], 7,
3874 t5, t0, t1, t2, t, t3, t4);
3876 else
3877 t = build_call_expr (built_in_decls[start_fn], 6,
3878 t5, t0, t1, t2, t3, t4);
3881 if (TREE_TYPE (t) != boolean_type_node)
3882 t = fold_build2 (NE_EXPR, boolean_type_node,
3883 t, build_int_cst (TREE_TYPE (t), 0));
3884 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3885 true, GSI_SAME_STMT);
3886 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3888 /* Remove the GIMPLE_OMP_FOR statement. */
3889 gsi_remove (&gsi, true);
3891 /* Iteration setup for sequential loop goes in L0_BB. */
3892 gsi = gsi_start_bb (l0_bb);
3893 t = istart0;
3894 if (bias)
3895 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3896 if (POINTER_TYPE_P (type))
3897 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3898 0), t);
3899 t = fold_convert (type, t);
3900 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3901 false, GSI_CONTINUE_LINKING);
3902 stmt = gimple_build_assign (fd->loop.v, t);
3903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3905 t = iend0;
3906 if (bias)
3907 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3908 if (POINTER_TYPE_P (type))
3909 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3910 0), t);
3911 t = fold_convert (type, t);
3912 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3913 false, GSI_CONTINUE_LINKING);
3914 if (fd->collapse > 1)
3916 tree tem = create_tmp_var (type, ".tem");
3918 stmt = gimple_build_assign (tem, fd->loop.v);
3919 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3920 for (i = fd->collapse - 1; i >= 0; i--)
3922 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3923 itype = vtype;
3924 if (POINTER_TYPE_P (vtype))
3925 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3926 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3927 t = fold_convert (itype, t);
3928 t = fold_build2 (MULT_EXPR, itype, t,
3929 fold_convert (itype, fd->loops[i].step));
3930 if (POINTER_TYPE_P (vtype))
3931 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3932 else
3933 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3934 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3935 false, GSI_CONTINUE_LINKING);
3936 stmt = gimple_build_assign (fd->loops[i].v, t);
3937 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3938 if (i != 0)
3940 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3941 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3942 false, GSI_CONTINUE_LINKING);
3943 stmt = gimple_build_assign (tem, t);
3944 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3949 if (!broken_loop)
3951 /* Code to control the increment and predicate for the sequential
3952 loop goes in the CONT_BB. */
3953 gsi = gsi_last_bb (cont_bb);
3954 stmt = gsi_stmt (gsi);
3955 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3956 vmain = gimple_omp_continue_control_use (stmt);
3957 vback = gimple_omp_continue_control_def (stmt);
3959 if (POINTER_TYPE_P (type))
3960 t = fold_build_pointer_plus (vmain, fd->loop.step);
3961 else
3962 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3963 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3964 true, GSI_SAME_STMT);
3965 stmt = gimple_build_assign (vback, t);
3966 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3968 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3969 stmt = gimple_build_cond_empty (t);
3970 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3972 /* Remove GIMPLE_OMP_CONTINUE. */
3973 gsi_remove (&gsi, true);
3975 if (fd->collapse > 1)
3977 basic_block last_bb, bb;
3979 last_bb = cont_bb;
3980 for (i = fd->collapse - 1; i >= 0; i--)
3982 tree vtype = TREE_TYPE (fd->loops[i].v);
3984 bb = create_empty_bb (last_bb);
3985 gsi = gsi_start_bb (bb);
3987 if (i < fd->collapse - 1)
3989 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3990 e->probability = REG_BR_PROB_BASE / 8;
3992 t = fd->loops[i + 1].n1;
3993 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3994 false, GSI_CONTINUE_LINKING);
3995 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3996 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3998 else
3999 collapse_bb = bb;
4001 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4003 if (POINTER_TYPE_P (vtype))
4004 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4005 else
4006 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4007 fd->loops[i].step);
4008 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4009 false, GSI_CONTINUE_LINKING);
4010 stmt = gimple_build_assign (fd->loops[i].v, t);
4011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4013 if (i > 0)
4015 t = fd->loops[i].n2;
4016 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4017 false, GSI_CONTINUE_LINKING);
4018 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4019 fd->loops[i].v, t);
4020 stmt = gimple_build_cond_empty (t);
4021 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4022 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4023 e->probability = REG_BR_PROB_BASE * 7 / 8;
4025 else
4026 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4027 last_bb = bb;
4031 /* Emit code to get the next parallel iteration in L2_BB. */
4032 gsi = gsi_start_bb (l2_bb);
4034 t = build_call_expr (built_in_decls[next_fn], 2,
4035 build_fold_addr_expr (istart0),
4036 build_fold_addr_expr (iend0));
4037 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4038 false, GSI_CONTINUE_LINKING);
4039 if (TREE_TYPE (t) != boolean_type_node)
4040 t = fold_build2 (NE_EXPR, boolean_type_node,
4041 t, build_int_cst (TREE_TYPE (t), 0));
4042 stmt = gimple_build_cond_empty (t);
4043 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4046 /* Add the loop cleanup function. */
4047 gsi = gsi_last_bb (exit_bb);
4048 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4049 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4050 else
4051 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4052 stmt = gimple_build_call (t, 0);
4053 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4054 gsi_remove (&gsi, true);
4056 /* Connect the new blocks. */
4057 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4058 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4060 if (!broken_loop)
4062 gimple_seq phis;
4064 e = find_edge (cont_bb, l3_bb);
4065 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4067 phis = phi_nodes (l3_bb);
4068 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4070 gimple phi = gsi_stmt (gsi);
4071 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4072 PHI_ARG_DEF_FROM_EDGE (phi, e));
4074 remove_edge (e);
4076 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4077 if (fd->collapse > 1)
4079 e = find_edge (cont_bb, l1_bb);
4080 remove_edge (e);
4081 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4083 else
4085 e = find_edge (cont_bb, l1_bb);
4086 e->flags = EDGE_TRUE_VALUE;
4088 e->probability = REG_BR_PROB_BASE * 7 / 8;
4089 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4090 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4092 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4093 recompute_dominator (CDI_DOMINATORS, l2_bb));
4094 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4095 recompute_dominator (CDI_DOMINATORS, l3_bb));
4096 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4097 recompute_dominator (CDI_DOMINATORS, l0_bb));
4098 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4099 recompute_dominator (CDI_DOMINATORS, l1_bb));
4104 /* A subroutine of expand_omp_for. Generate code for a parallel
4105 loop with static schedule and no specified chunk size. Given
4106 parameters:
4108 for (V = N1; V cond N2; V += STEP) BODY;
4110 where COND is "<" or ">", we generate pseudocode
4112 if (cond is <)
4113 adj = STEP - 1;
4114 else
4115 adj = STEP + 1;
4116 if ((__typeof (V)) -1 > 0 && cond is >)
4117 n = -(adj + N2 - N1) / -STEP;
4118 else
4119 n = (adj + N2 - N1) / STEP;
4120 q = n / nthreads;
4121 tt = n % nthreads;
4122 if (threadid < tt) goto L3; else goto L4;
4124 tt = 0;
4125 q = q + 1;
4127 s0 = q * threadid + tt;
4128 e0 = s0 + q;
4129 V = s0 * STEP + N1;
4130 if (s0 >= e0) goto L2; else goto L0;
4132 e = e0 * STEP + N1;
4134 BODY;
4135 V += STEP;
4136 if (V cond e) goto L1;
4140 static void
4141 expand_omp_for_static_nochunk (struct omp_region *region,
4142 struct omp_for_data *fd)
4144 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4145 tree type, itype, vmain, vback;
4146 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4147 basic_block body_bb, cont_bb;
4148 basic_block fin_bb;
4149 gimple_stmt_iterator gsi;
4150 gimple stmt;
4151 edge ep;
4153 itype = type = TREE_TYPE (fd->loop.v);
4154 if (POINTER_TYPE_P (type))
4155 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4157 entry_bb = region->entry;
4158 cont_bb = region->cont;
4159 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4160 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4161 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4162 body_bb = single_succ (seq_start_bb);
4163 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4164 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4165 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4166 exit_bb = region->exit;
4168 /* Iteration space partitioning goes in ENTRY_BB. */
4169 gsi = gsi_last_bb (entry_bb);
4170 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4172 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4173 t = fold_convert (itype, t);
4174 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4175 true, GSI_SAME_STMT);
4177 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4178 t = fold_convert (itype, t);
4179 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4180 true, GSI_SAME_STMT);
4182 fd->loop.n1
4183 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4184 true, NULL_TREE, true, GSI_SAME_STMT);
4185 fd->loop.n2
4186 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4187 true, NULL_TREE, true, GSI_SAME_STMT);
4188 fd->loop.step
4189 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4190 true, NULL_TREE, true, GSI_SAME_STMT);
4192 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4193 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4194 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4195 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4196 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4197 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4198 fold_build1 (NEGATE_EXPR, itype, t),
4199 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4200 else
4201 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4202 t = fold_convert (itype, t);
4203 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4205 q = create_tmp_var (itype, "q");
4206 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4207 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4208 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4210 tt = create_tmp_var (itype, "tt");
4211 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4212 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4213 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4215 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4216 stmt = gimple_build_cond_empty (t);
4217 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4219 second_bb = split_block (entry_bb, stmt)->dest;
4220 gsi = gsi_last_bb (second_bb);
4221 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4223 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4224 GSI_SAME_STMT);
4225 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4226 build_int_cst (itype, 1));
4227 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4229 third_bb = split_block (second_bb, stmt)->dest;
4230 gsi = gsi_last_bb (third_bb);
4231 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4233 t = build2 (MULT_EXPR, itype, q, threadid);
4234 t = build2 (PLUS_EXPR, itype, t, tt);
4235 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4237 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4238 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4240 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4241 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4243 /* Remove the GIMPLE_OMP_FOR statement. */
4244 gsi_remove (&gsi, true);
4246 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4247 gsi = gsi_start_bb (seq_start_bb);
4249 t = fold_convert (itype, s0);
4250 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4251 if (POINTER_TYPE_P (type))
4252 t = fold_build_pointer_plus (fd->loop.n1, t);
4253 else
4254 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4255 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4256 false, GSI_CONTINUE_LINKING);
4257 stmt = gimple_build_assign (fd->loop.v, t);
4258 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4260 t = fold_convert (itype, e0);
4261 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4262 if (POINTER_TYPE_P (type))
4263 t = fold_build_pointer_plus (fd->loop.n1, t);
4264 else
4265 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4266 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4267 false, GSI_CONTINUE_LINKING);
4269 /* The code controlling the sequential loop replaces the
4270 GIMPLE_OMP_CONTINUE. */
4271 gsi = gsi_last_bb (cont_bb);
4272 stmt = gsi_stmt (gsi);
4273 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4274 vmain = gimple_omp_continue_control_use (stmt);
4275 vback = gimple_omp_continue_control_def (stmt);
4277 if (POINTER_TYPE_P (type))
4278 t = fold_build_pointer_plus (vmain, fd->loop.step);
4279 else
4280 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4281 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4282 true, GSI_SAME_STMT);
4283 stmt = gimple_build_assign (vback, t);
4284 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4286 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4287 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4289 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4290 gsi_remove (&gsi, true);
4292 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4293 gsi = gsi_last_bb (exit_bb);
4294 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4295 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4296 false, GSI_SAME_STMT);
4297 gsi_remove (&gsi, true);
4299 /* Connect all the blocks. */
4300 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4301 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4302 ep = find_edge (entry_bb, second_bb);
4303 ep->flags = EDGE_TRUE_VALUE;
4304 ep->probability = REG_BR_PROB_BASE / 4;
4305 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4306 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4308 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4309 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4311 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4312 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4313 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4314 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4315 recompute_dominator (CDI_DOMINATORS, body_bb));
4316 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4317 recompute_dominator (CDI_DOMINATORS, fin_bb));
4321 /* A subroutine of expand_omp_for. Generate code for a parallel
4322 loop with static schedule and a specified chunk size. Given
4323 parameters:
4325 for (V = N1; V cond N2; V += STEP) BODY;
4327 where COND is "<" or ">", we generate pseudocode
4329 if (cond is <)
4330 adj = STEP - 1;
4331 else
4332 adj = STEP + 1;
4333 if ((__typeof (V)) -1 > 0 && cond is >)
4334 n = -(adj + N2 - N1) / -STEP;
4335 else
4336 n = (adj + N2 - N1) / STEP;
4337 trip = 0;
4338 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4339 here so that V is defined
4340 if the loop is not entered
4342 s0 = (trip * nthreads + threadid) * CHUNK;
4343 e0 = min(s0 + CHUNK, n);
4344 if (s0 < n) goto L1; else goto L4;
4346 V = s0 * STEP + N1;
4347 e = e0 * STEP + N1;
4349 BODY;
4350 V += STEP;
4351 if (V cond e) goto L2; else goto L3;
4353 trip += 1;
4354 goto L0;
4358 static void
4359 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4361 tree n, s0, e0, e, t;
4362 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4363 tree type, itype, v_main, v_back, v_extra;
4364 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4365 basic_block trip_update_bb, cont_bb, fin_bb;
4366 gimple_stmt_iterator si;
4367 gimple stmt;
4368 edge se;
4370 itype = type = TREE_TYPE (fd->loop.v);
4371 if (POINTER_TYPE_P (type))
4372 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4374 entry_bb = region->entry;
4375 se = split_block (entry_bb, last_stmt (entry_bb));
4376 entry_bb = se->src;
4377 iter_part_bb = se->dest;
4378 cont_bb = region->cont;
4379 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4380 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4381 == FALLTHRU_EDGE (cont_bb)->dest);
4382 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4383 body_bb = single_succ (seq_start_bb);
4384 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4385 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4386 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4387 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4388 exit_bb = region->exit;
4390 /* Trip and adjustment setup goes in ENTRY_BB. */
4391 si = gsi_last_bb (entry_bb);
4392 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4394 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4395 t = fold_convert (itype, t);
4396 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4397 true, GSI_SAME_STMT);
4399 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4400 t = fold_convert (itype, t);
4401 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4402 true, GSI_SAME_STMT);
4404 fd->loop.n1
4405 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4406 true, NULL_TREE, true, GSI_SAME_STMT);
4407 fd->loop.n2
4408 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4409 true, NULL_TREE, true, GSI_SAME_STMT);
4410 fd->loop.step
4411 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4412 true, NULL_TREE, true, GSI_SAME_STMT);
4413 fd->chunk_size
4414 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4415 true, NULL_TREE, true, GSI_SAME_STMT);
4417 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4418 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4419 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4420 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4421 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4422 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4423 fold_build1 (NEGATE_EXPR, itype, t),
4424 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4425 else
4426 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4427 t = fold_convert (itype, t);
4428 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4429 true, GSI_SAME_STMT);
4431 trip_var = create_tmp_var (itype, ".trip");
4432 if (gimple_in_ssa_p (cfun))
4434 add_referenced_var (trip_var);
4435 trip_init = make_ssa_name (trip_var, NULL);
4436 trip_main = make_ssa_name (trip_var, NULL);
4437 trip_back = make_ssa_name (trip_var, NULL);
4439 else
4441 trip_init = trip_var;
4442 trip_main = trip_var;
4443 trip_back = trip_var;
4446 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4447 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4449 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4450 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4451 if (POINTER_TYPE_P (type))
4452 t = fold_build_pointer_plus (fd->loop.n1, t);
4453 else
4454 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4455 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4456 true, GSI_SAME_STMT);
4458 /* Remove the GIMPLE_OMP_FOR. */
4459 gsi_remove (&si, true);
4461 /* Iteration space partitioning goes in ITER_PART_BB. */
4462 si = gsi_last_bb (iter_part_bb);
4464 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4465 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4466 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4467 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4468 false, GSI_CONTINUE_LINKING);
4470 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4471 t = fold_build2 (MIN_EXPR, itype, t, n);
4472 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4473 false, GSI_CONTINUE_LINKING);
4475 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4476 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4478 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4479 si = gsi_start_bb (seq_start_bb);
4481 t = fold_convert (itype, s0);
4482 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4483 if (POINTER_TYPE_P (type))
4484 t = fold_build_pointer_plus (fd->loop.n1, t);
4485 else
4486 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4487 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4488 false, GSI_CONTINUE_LINKING);
4489 stmt = gimple_build_assign (fd->loop.v, t);
4490 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4492 t = fold_convert (itype, e0);
4493 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4494 if (POINTER_TYPE_P (type))
4495 t = fold_build_pointer_plus (fd->loop.n1, t);
4496 else
4497 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4498 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4499 false, GSI_CONTINUE_LINKING);
4501 /* The code controlling the sequential loop goes in CONT_BB,
4502 replacing the GIMPLE_OMP_CONTINUE. */
4503 si = gsi_last_bb (cont_bb);
4504 stmt = gsi_stmt (si);
4505 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4506 v_main = gimple_omp_continue_control_use (stmt);
4507 v_back = gimple_omp_continue_control_def (stmt);
4509 if (POINTER_TYPE_P (type))
4510 t = fold_build_pointer_plus (v_main, fd->loop.step);
4511 else
4512 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4513 stmt = gimple_build_assign (v_back, t);
4514 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4516 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4517 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4519 /* Remove GIMPLE_OMP_CONTINUE. */
4520 gsi_remove (&si, true);
4522 /* Trip update code goes into TRIP_UPDATE_BB. */
4523 si = gsi_start_bb (trip_update_bb);
4525 t = build_int_cst (itype, 1);
4526 t = build2 (PLUS_EXPR, itype, trip_main, t);
4527 stmt = gimple_build_assign (trip_back, t);
4528 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4530 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4531 si = gsi_last_bb (exit_bb);
4532 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4533 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4534 false, GSI_SAME_STMT);
4535 gsi_remove (&si, true);
4537 /* Connect the new blocks. */
4538 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4539 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4541 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4542 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4544 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4546 if (gimple_in_ssa_p (cfun))
4548 gimple_stmt_iterator psi;
4549 gimple phi;
4550 edge re, ene;
4551 edge_var_map_vector head;
4552 edge_var_map *vm;
4553 size_t i;
4555 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4556 remove arguments of the phi nodes in fin_bb. We need to create
4557 appropriate phi nodes in iter_part_bb instead. */
4558 se = single_pred_edge (fin_bb);
4559 re = single_succ_edge (trip_update_bb);
4560 head = redirect_edge_var_map_vector (re);
4561 ene = single_succ_edge (entry_bb);
4563 psi = gsi_start_phis (fin_bb);
4564 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4565 gsi_next (&psi), ++i)
4567 gimple nphi;
4568 source_location locus;
4570 phi = gsi_stmt (psi);
4571 t = gimple_phi_result (phi);
4572 gcc_assert (t == redirect_edge_var_map_result (vm));
4573 nphi = create_phi_node (t, iter_part_bb);
4574 SSA_NAME_DEF_STMT (t) = nphi;
4576 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4577 locus = gimple_phi_arg_location_from_edge (phi, se);
4579 /* A special case -- fd->loop.v is not yet computed in
4580 iter_part_bb, we need to use v_extra instead. */
4581 if (t == fd->loop.v)
4582 t = v_extra;
4583 add_phi_arg (nphi, t, ene, locus);
4584 locus = redirect_edge_var_map_location (vm);
4585 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4587 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4588 redirect_edge_var_map_clear (re);
4589 while (1)
4591 psi = gsi_start_phis (fin_bb);
4592 if (gsi_end_p (psi))
4593 break;
4594 remove_phi_node (&psi, false);
4597 /* Make phi node for trip. */
4598 phi = create_phi_node (trip_main, iter_part_bb);
4599 SSA_NAME_DEF_STMT (trip_main) = phi;
4600 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4601 UNKNOWN_LOCATION);
4602 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4603 UNKNOWN_LOCATION);
4606 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4607 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4608 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4609 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4610 recompute_dominator (CDI_DOMINATORS, fin_bb));
4611 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4612 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4613 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4614 recompute_dominator (CDI_DOMINATORS, body_bb));
4618 /* Expand the OpenMP loop defined by REGION. */
4620 static void
4621 expand_omp_for (struct omp_region *region)
4623 struct omp_for_data fd;
4624 struct omp_for_data_loop *loops;
4626 loops
4627 = (struct omp_for_data_loop *)
4628 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4629 * sizeof (struct omp_for_data_loop));
4630 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4631 region->sched_kind = fd.sched_kind;
4633 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4634 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4635 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4636 if (region->cont)
4638 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4639 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4640 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4643 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4644 && !fd.have_ordered
4645 && fd.collapse == 1
4646 && region->cont != NULL)
4648 if (fd.chunk_size == NULL)
4649 expand_omp_for_static_nochunk (region, &fd);
4650 else
4651 expand_omp_for_static_chunk (region, &fd);
4653 else
4655 int fn_index, start_ix, next_ix;
4657 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4658 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4659 ? 3 : fd.sched_kind;
4660 fn_index += fd.have_ordered * 4;
4661 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4662 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4663 if (fd.iter_type == long_long_unsigned_type_node)
4665 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4666 - BUILT_IN_GOMP_LOOP_STATIC_START;
4667 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4668 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4670 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4671 (enum built_in_function) next_ix);
4674 update_ssa (TODO_update_ssa_only_virtuals);
4678 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4680 v = GOMP_sections_start (n);
4682 switch (v)
4684 case 0:
4685 goto L2;
4686 case 1:
4687 section 1;
4688 goto L1;
4689 case 2:
4691 case n:
4693 default:
4694 abort ();
4697 v = GOMP_sections_next ();
4698 goto L0;
4700 reduction;
4702 If this is a combined parallel sections, replace the call to
4703 GOMP_sections_start with call to GOMP_sections_next. */
4705 static void
4706 expand_omp_sections (struct omp_region *region)
4708 tree t, u, vin = NULL, vmain, vnext, l2;
4709 VEC (tree,heap) *label_vec;
4710 unsigned len;
4711 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4712 gimple_stmt_iterator si, switch_si;
4713 gimple sections_stmt, stmt, cont;
4714 edge_iterator ei;
4715 edge e;
4716 struct omp_region *inner;
4717 unsigned i, casei;
4718 bool exit_reachable = region->cont != NULL;
4720 gcc_assert (exit_reachable == (region->exit != NULL));
4721 entry_bb = region->entry;
4722 l0_bb = single_succ (entry_bb);
4723 l1_bb = region->cont;
4724 l2_bb = region->exit;
4725 if (exit_reachable)
4727 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4728 l2 = gimple_block_label (l2_bb);
4729 else
4731 /* This can happen if there are reductions. */
4732 len = EDGE_COUNT (l0_bb->succs);
4733 gcc_assert (len > 0);
4734 e = EDGE_SUCC (l0_bb, len - 1);
4735 si = gsi_last_bb (e->dest);
4736 l2 = NULL_TREE;
4737 if (gsi_end_p (si)
4738 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4739 l2 = gimple_block_label (e->dest);
4740 else
4741 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4743 si = gsi_last_bb (e->dest);
4744 if (gsi_end_p (si)
4745 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4747 l2 = gimple_block_label (e->dest);
4748 break;
4752 default_bb = create_empty_bb (l1_bb->prev_bb);
4754 else
4756 default_bb = create_empty_bb (l0_bb);
4757 l2 = gimple_block_label (default_bb);
4760 /* We will build a switch() with enough cases for all the
4761 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4762 and a default case to abort if something goes wrong. */
4763 len = EDGE_COUNT (l0_bb->succs);
4765 /* Use VEC_quick_push on label_vec throughout, since we know the size
4766 in advance. */
4767 label_vec = VEC_alloc (tree, heap, len);
4769 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4770 GIMPLE_OMP_SECTIONS statement. */
4771 si = gsi_last_bb (entry_bb);
4772 sections_stmt = gsi_stmt (si);
4773 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4774 vin = gimple_omp_sections_control (sections_stmt);
4775 if (!is_combined_parallel (region))
4777 /* If we are not inside a combined parallel+sections region,
4778 call GOMP_sections_start. */
4779 t = build_int_cst (unsigned_type_node,
4780 exit_reachable ? len - 1 : len);
4781 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4782 stmt = gimple_build_call (u, 1, t);
4784 else
4786 /* Otherwise, call GOMP_sections_next. */
4787 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4788 stmt = gimple_build_call (u, 0);
4790 gimple_call_set_lhs (stmt, vin);
4791 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4792 gsi_remove (&si, true);
4794 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4795 L0_BB. */
4796 switch_si = gsi_last_bb (l0_bb);
4797 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4798 if (exit_reachable)
4800 cont = last_stmt (l1_bb);
4801 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4802 vmain = gimple_omp_continue_control_use (cont);
4803 vnext = gimple_omp_continue_control_def (cont);
4805 else
4807 vmain = vin;
4808 vnext = NULL_TREE;
4811 i = 0;
4812 if (exit_reachable)
4814 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4815 VEC_quick_push (tree, label_vec, t);
4816 i++;
4819 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4820 for (inner = region->inner, casei = 1;
4821 inner;
4822 inner = inner->next, i++, casei++)
4824 basic_block s_entry_bb, s_exit_bb;
4826 /* Skip optional reduction region. */
4827 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4829 --i;
4830 --casei;
4831 continue;
4834 s_entry_bb = inner->entry;
4835 s_exit_bb = inner->exit;
4837 t = gimple_block_label (s_entry_bb);
4838 u = build_int_cst (unsigned_type_node, casei);
4839 u = build_case_label (u, NULL, t);
4840 VEC_quick_push (tree, label_vec, u);
4842 si = gsi_last_bb (s_entry_bb);
4843 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4844 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4845 gsi_remove (&si, true);
4846 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4848 if (s_exit_bb == NULL)
4849 continue;
4851 si = gsi_last_bb (s_exit_bb);
4852 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4853 gsi_remove (&si, true);
4855 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4858 /* Error handling code goes in DEFAULT_BB. */
4859 t = gimple_block_label (default_bb);
4860 u = build_case_label (NULL, NULL, t);
4861 make_edge (l0_bb, default_bb, 0);
4863 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4864 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4865 gsi_remove (&switch_si, true);
4866 VEC_free (tree, heap, label_vec);
4868 si = gsi_start_bb (default_bb);
4869 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4870 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4872 if (exit_reachable)
4874 /* Code to get the next section goes in L1_BB. */
4875 si = gsi_last_bb (l1_bb);
4876 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4878 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4879 gimple_call_set_lhs (stmt, vnext);
4880 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4881 gsi_remove (&si, true);
4883 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4885 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4886 si = gsi_last_bb (l2_bb);
4887 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4888 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4889 else
4890 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4891 stmt = gimple_build_call (t, 0);
4892 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4893 gsi_remove (&si, true);
4896 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4900 /* Expand code for an OpenMP single directive. We've already expanded
4901 much of the code, here we simply place the GOMP_barrier call. */
4903 static void
4904 expand_omp_single (struct omp_region *region)
4906 basic_block entry_bb, exit_bb;
4907 gimple_stmt_iterator si;
4908 bool need_barrier = false;
4910 entry_bb = region->entry;
4911 exit_bb = region->exit;
4913 si = gsi_last_bb (entry_bb);
4914 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4915 be removed. We need to ensure that the thread that entered the single
4916 does not exit before the data is copied out by the other threads. */
4917 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4918 OMP_CLAUSE_COPYPRIVATE))
4919 need_barrier = true;
4920 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4921 gsi_remove (&si, true);
4922 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4924 si = gsi_last_bb (exit_bb);
4925 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4926 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4927 false, GSI_SAME_STMT);
4928 gsi_remove (&si, true);
4929 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4933 /* Generic expansion for OpenMP synchronization directives: master,
4934 ordered and critical. All we need to do here is remove the entry
4935 and exit markers for REGION. */
4937 static void
4938 expand_omp_synch (struct omp_region *region)
4940 basic_block entry_bb, exit_bb;
4941 gimple_stmt_iterator si;
4943 entry_bb = region->entry;
4944 exit_bb = region->exit;
4946 si = gsi_last_bb (entry_bb);
4947 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4948 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4949 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4950 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4951 gsi_remove (&si, true);
4952 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4954 if (exit_bb)
4956 si = gsi_last_bb (exit_bb);
4957 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4958 gsi_remove (&si, true);
4959 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4963 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4964 operation as a normal volatile load. */
4966 static bool
4967 expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val)
4969 /* FIXME */
4970 (void) load_bb;
4971 (void) addr;
4972 (void) loaded_val;
4973 return false;
4976 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4977 operation as a normal volatile store. */
4979 static bool
4980 expand_omp_atomic_store (basic_block load_bb, tree addr)
4982 /* FIXME */
4983 (void) load_bb;
4984 (void) addr;
4985 return false;
4988 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4989 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4990 size of the data type, and thus usable to find the index of the builtin
4991 decl. Returns false if the expression is not of the proper form. */
4993 static bool
4994 expand_omp_atomic_fetch_op (basic_block load_bb,
4995 tree addr, tree loaded_val,
4996 tree stored_val, int index)
4998 enum built_in_function oldbase, newbase;
4999 tree decl, itype, call;
5000 direct_optab optab, oldoptab, newoptab;
5001 tree lhs, rhs;
5002 basic_block store_bb = single_succ (load_bb);
5003 gimple_stmt_iterator gsi;
5004 gimple stmt;
5005 location_t loc;
5006 bool need_old, need_new;
5008 /* We expect to find the following sequences:
5010 load_bb:
5011 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5013 store_bb:
5014 val = tmp OP something; (or: something OP tmp)
5015 GIMPLE_OMP_STORE (val)
5017 ???FIXME: Allow a more flexible sequence.
5018 Perhaps use data flow to pick the statements.
5022 gsi = gsi_after_labels (store_bb);
5023 stmt = gsi_stmt (gsi);
5024 loc = gimple_location (stmt);
5025 if (!is_gimple_assign (stmt))
5026 return false;
5027 gsi_next (&gsi);
5028 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5029 return false;
5030 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5031 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5032 gcc_checking_assert (!need_old || !need_new);
5034 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5035 return false;
5037 /* Check for one of the supported fetch-op operations. */
5038 switch (gimple_assign_rhs_code (stmt))
5040 case PLUS_EXPR:
5041 case POINTER_PLUS_EXPR:
5042 oldbase = BUILT_IN_SYNC_FETCH_AND_ADD_N;
5043 newbase = BUILT_IN_SYNC_ADD_AND_FETCH_N;
5044 optab = sync_add_optab;
5045 oldoptab = sync_old_add_optab;
5046 newoptab = sync_new_add_optab;
5047 break;
5048 case MINUS_EXPR:
5049 oldbase = BUILT_IN_SYNC_FETCH_AND_SUB_N;
5050 newbase = BUILT_IN_SYNC_SUB_AND_FETCH_N;
5051 optab = sync_add_optab;
5052 oldoptab = sync_old_add_optab;
5053 newoptab = sync_new_add_optab;
5054 break;
5055 case BIT_AND_EXPR:
5056 oldbase = BUILT_IN_SYNC_FETCH_AND_AND_N;
5057 newbase = BUILT_IN_SYNC_AND_AND_FETCH_N;
5058 optab = sync_and_optab;
5059 oldoptab = sync_old_and_optab;
5060 newoptab = sync_new_and_optab;
5061 break;
5062 case BIT_IOR_EXPR:
5063 oldbase = BUILT_IN_SYNC_FETCH_AND_OR_N;
5064 newbase = BUILT_IN_SYNC_OR_AND_FETCH_N;
5065 optab = sync_ior_optab;
5066 oldoptab = sync_old_ior_optab;
5067 newoptab = sync_new_ior_optab;
5068 break;
5069 case BIT_XOR_EXPR:
5070 oldbase = BUILT_IN_SYNC_FETCH_AND_XOR_N;
5071 newbase = BUILT_IN_SYNC_XOR_AND_FETCH_N;
5072 optab = sync_xor_optab;
5073 oldoptab = sync_old_xor_optab;
5074 newoptab = sync_new_xor_optab;
5075 break;
5076 default:
5077 return false;
5079 /* Make sure the expression is of the proper form. */
5080 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5081 rhs = gimple_assign_rhs2 (stmt);
5082 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5083 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5084 rhs = gimple_assign_rhs1 (stmt);
5085 else
5086 return false;
5088 decl = built_in_decls[(need_new ? newbase : oldbase) + index + 1];
5089 if (decl == NULL_TREE)
5090 return false;
5091 itype = TREE_TYPE (TREE_TYPE (decl));
5093 if (need_new)
5095 /* expand_sync_fetch_operation can always compensate when interested
5096 in the new value. */
5097 if (direct_optab_handler (newoptab, TYPE_MODE (itype))
5098 == CODE_FOR_nothing
5099 && direct_optab_handler (oldoptab, TYPE_MODE (itype))
5100 == CODE_FOR_nothing)
5101 return false;
5103 else if (need_old)
5105 /* When interested in the old value, expand_sync_fetch_operation
5106 can compensate only if the operation is reversible. AND and OR
5107 are not reversible. */
5108 if (direct_optab_handler (oldoptab, TYPE_MODE (itype))
5109 == CODE_FOR_nothing
5110 && (oldbase == BUILT_IN_SYNC_FETCH_AND_AND_N
5111 || oldbase == BUILT_IN_SYNC_FETCH_AND_OR_N
5112 || direct_optab_handler (newoptab, TYPE_MODE (itype))
5113 == CODE_FOR_nothing))
5114 return false;
5116 else if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5117 return false;
5119 gsi = gsi_last_bb (load_bb);
5120 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5121 call = build_call_expr_loc (loc, decl, 2, addr,
5122 fold_convert_loc (loc, itype, rhs));
5123 if (need_old || need_new)
5125 lhs = need_old ? loaded_val : stored_val;
5126 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5127 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5129 else
5130 call = fold_convert_loc (loc, void_type_node, call);
5131 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5132 gsi_remove (&gsi, true);
5134 gsi = gsi_last_bb (store_bb);
5135 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5136 gsi_remove (&gsi, true);
5137 gsi = gsi_last_bb (store_bb);
5138 gsi_remove (&gsi, true);
5140 if (gimple_in_ssa_p (cfun))
5141 update_ssa (TODO_update_ssa_no_phi);
5143 return true;
5146 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5148 oldval = *addr;
5149 repeat:
5150 newval = rhs; // with oldval replacing *addr in rhs
5151 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5152 if (oldval != newval)
5153 goto repeat;
5155 INDEX is log2 of the size of the data type, and thus usable to find the
5156 index of the builtin decl. */
5158 static bool
5159 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5160 tree addr, tree loaded_val, tree stored_val,
5161 int index)
5163 tree loadedi, storedi, initial, new_storedi, old_vali;
5164 tree type, itype, cmpxchg, iaddr;
5165 gimple_stmt_iterator si;
5166 basic_block loop_header = single_succ (load_bb);
5167 gimple phi, stmt;
5168 edge e;
5170 cmpxchg = built_in_decls[BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1];
5171 if (cmpxchg == NULL_TREE)
5172 return false;
5173 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5174 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5176 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5177 == CODE_FOR_nothing)
5178 return false;
5180 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5181 si = gsi_last_bb (load_bb);
5182 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5184 /* For floating-point values, we'll need to view-convert them to integers
5185 so that we can perform the atomic compare and swap. Simplify the
5186 following code by always setting up the "i"ntegral variables. */
5187 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5189 tree iaddr_val;
5191 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5192 true), NULL);
5193 iaddr_val
5194 = force_gimple_operand_gsi (&si,
5195 fold_convert (TREE_TYPE (iaddr), addr),
5196 false, NULL_TREE, true, GSI_SAME_STMT);
5197 stmt = gimple_build_assign (iaddr, iaddr_val);
5198 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5199 loadedi = create_tmp_var (itype, NULL);
5200 if (gimple_in_ssa_p (cfun))
5202 add_referenced_var (iaddr);
5203 add_referenced_var (loadedi);
5204 loadedi = make_ssa_name (loadedi, NULL);
5207 else
5209 iaddr = addr;
5210 loadedi = loaded_val;
5213 initial
5214 = force_gimple_operand_gsi (&si,
5215 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5216 iaddr,
5217 build_int_cst (TREE_TYPE (iaddr), 0)),
5218 true, NULL_TREE, true, GSI_SAME_STMT);
5220 /* Move the value to the LOADEDI temporary. */
5221 if (gimple_in_ssa_p (cfun))
5223 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5224 phi = create_phi_node (loadedi, loop_header);
5225 SSA_NAME_DEF_STMT (loadedi) = phi;
5226 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5227 initial);
5229 else
5230 gsi_insert_before (&si,
5231 gimple_build_assign (loadedi, initial),
5232 GSI_SAME_STMT);
5233 if (loadedi != loaded_val)
5235 gimple_stmt_iterator gsi2;
5236 tree x;
5238 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5239 gsi2 = gsi_start_bb (loop_header);
5240 if (gimple_in_ssa_p (cfun))
5242 gimple stmt;
5243 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5244 true, GSI_SAME_STMT);
5245 stmt = gimple_build_assign (loaded_val, x);
5246 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5248 else
5250 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5251 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5252 true, GSI_SAME_STMT);
5255 gsi_remove (&si, true);
5257 si = gsi_last_bb (store_bb);
5258 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5260 if (iaddr == addr)
5261 storedi = stored_val;
5262 else
5263 storedi =
5264 force_gimple_operand_gsi (&si,
5265 build1 (VIEW_CONVERT_EXPR, itype,
5266 stored_val), true, NULL_TREE, true,
5267 GSI_SAME_STMT);
5269 /* Build the compare&swap statement. */
5270 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5271 new_storedi = force_gimple_operand_gsi (&si,
5272 fold_convert (TREE_TYPE (loadedi),
5273 new_storedi),
5274 true, NULL_TREE,
5275 true, GSI_SAME_STMT);
5277 if (gimple_in_ssa_p (cfun))
5278 old_vali = loadedi;
5279 else
5281 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5282 if (gimple_in_ssa_p (cfun))
5283 add_referenced_var (old_vali);
5284 stmt = gimple_build_assign (old_vali, loadedi);
5285 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5287 stmt = gimple_build_assign (loadedi, new_storedi);
5288 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5291 /* Note that we always perform the comparison as an integer, even for
5292 floating point. This allows the atomic operation to properly
5293 succeed even with NaNs and -0.0. */
5294 stmt = gimple_build_cond_empty
5295 (build2 (NE_EXPR, boolean_type_node,
5296 new_storedi, old_vali));
5297 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5299 /* Update cfg. */
5300 e = single_succ_edge (store_bb);
5301 e->flags &= ~EDGE_FALLTHRU;
5302 e->flags |= EDGE_FALSE_VALUE;
5304 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5306 /* Copy the new value to loadedi (we already did that before the condition
5307 if we are not in SSA). */
5308 if (gimple_in_ssa_p (cfun))
5310 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5311 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5314 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5315 gsi_remove (&si, true);
5317 if (gimple_in_ssa_p (cfun))
5318 update_ssa (TODO_update_ssa_no_phi);
5320 return true;
5323 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5325 GOMP_atomic_start ();
5326 *addr = rhs;
5327 GOMP_atomic_end ();
5329 The result is not globally atomic, but works so long as all parallel
5330 references are within #pragma omp atomic directives. According to
5331 responses received from omp@openmp.org, appears to be within spec.
5332 Which makes sense, since that's how several other compilers handle
5333 this situation as well.
5334 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5335 expanding. STORED_VAL is the operand of the matching
5336 GIMPLE_OMP_ATOMIC_STORE.
5338 We replace
5339 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5340 loaded_val = *addr;
5342 and replace
5343 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5344 *addr = stored_val;
5347 static bool
5348 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5349 tree addr, tree loaded_val, tree stored_val)
5351 gimple_stmt_iterator si;
5352 gimple stmt;
5353 tree t;
5355 si = gsi_last_bb (load_bb);
5356 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5358 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5359 t = build_call_expr (t, 0);
5360 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5362 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5363 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5364 gsi_remove (&si, true);
5366 si = gsi_last_bb (store_bb);
5367 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5369 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5370 stored_val);
5371 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5373 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5374 t = build_call_expr (t, 0);
5375 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5376 gsi_remove (&si, true);
5378 if (gimple_in_ssa_p (cfun))
5379 update_ssa (TODO_update_ssa_no_phi);
5380 return true;
5383 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5384 using expand_omp_atomic_fetch_op. If it failed, we try to
5385 call expand_omp_atomic_pipeline, and if it fails too, the
5386 ultimate fallback is wrapping the operation in a mutex
5387 (expand_omp_atomic_mutex). REGION is the atomic region built
5388 by build_omp_regions_1(). */
5390 static void
5391 expand_omp_atomic (struct omp_region *region)
5393 basic_block load_bb = region->entry, store_bb = region->exit;
5394 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5395 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5396 tree addr = gimple_omp_atomic_load_rhs (load);
5397 tree stored_val = gimple_omp_atomic_store_val (store);
5398 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5399 HOST_WIDE_INT index;
5401 /* Make sure the type is one of the supported sizes. */
5402 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5403 index = exact_log2 (index);
5404 if (index >= 0 && index <= 4)
5406 unsigned int align = TYPE_ALIGN_UNIT (type);
5408 /* __sync builtins require strict data alignment. */
5409 if (exact_log2 (align) >= index)
5411 /* Atomic load. FIXME: have some target hook signalize what loads
5412 are actually atomic? */
5413 if (loaded_val == stored_val
5414 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5415 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5416 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5417 && expand_omp_atomic_load (load_bb, addr, loaded_val))
5418 return;
5420 /* Atomic store. FIXME: have some target hook signalize what
5421 stores are actually atomic? */
5422 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5423 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5424 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5425 && store_bb == single_succ (load_bb)
5426 && first_stmt (store_bb) == store
5427 && expand_omp_atomic_store (load_bb, addr))
5428 return;
5430 /* When possible, use specialized atomic update functions. */
5431 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5432 && store_bb == single_succ (load_bb))
5434 if (expand_omp_atomic_fetch_op (load_bb, addr,
5435 loaded_val, stored_val, index))
5436 return;
5439 /* If we don't have specialized __sync builtins, try and implement
5440 as a compare and swap loop. */
5441 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5442 loaded_val, stored_val, index))
5443 return;
5447 /* The ultimate fallback is wrapping the operation in a mutex. */
5448 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5452 /* Expand the parallel region tree rooted at REGION. Expansion
5453 proceeds in depth-first order. Innermost regions are expanded
5454 first. This way, parallel regions that require a new function to
5455 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5456 internal dependencies in their body. */
5458 static void
5459 expand_omp (struct omp_region *region)
5461 while (region)
5463 location_t saved_location;
5465 /* First, determine whether this is a combined parallel+workshare
5466 region. */
5467 if (region->type == GIMPLE_OMP_PARALLEL)
5468 determine_parallel_type (region);
5470 if (region->inner)
5471 expand_omp (region->inner);
5473 saved_location = input_location;
5474 if (gimple_has_location (last_stmt (region->entry)))
5475 input_location = gimple_location (last_stmt (region->entry));
5477 switch (region->type)
5479 case GIMPLE_OMP_PARALLEL:
5480 case GIMPLE_OMP_TASK:
5481 expand_omp_taskreg (region);
5482 break;
5484 case GIMPLE_OMP_FOR:
5485 expand_omp_for (region);
5486 break;
5488 case GIMPLE_OMP_SECTIONS:
5489 expand_omp_sections (region);
5490 break;
5492 case GIMPLE_OMP_SECTION:
5493 /* Individual omp sections are handled together with their
5494 parent GIMPLE_OMP_SECTIONS region. */
5495 break;
5497 case GIMPLE_OMP_SINGLE:
5498 expand_omp_single (region);
5499 break;
5501 case GIMPLE_OMP_MASTER:
5502 case GIMPLE_OMP_ORDERED:
5503 case GIMPLE_OMP_CRITICAL:
5504 expand_omp_synch (region);
5505 break;
5507 case GIMPLE_OMP_ATOMIC_LOAD:
5508 expand_omp_atomic (region);
5509 break;
5511 default:
5512 gcc_unreachable ();
5515 input_location = saved_location;
5516 region = region->next;
5521 /* Helper for build_omp_regions. Scan the dominator tree starting at
5522 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5523 true, the function ends once a single tree is built (otherwise, whole
5524 forest of OMP constructs may be built). */
5526 static void
5527 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5528 bool single_tree)
5530 gimple_stmt_iterator gsi;
5531 gimple stmt;
5532 basic_block son;
5534 gsi = gsi_last_bb (bb);
5535 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5537 struct omp_region *region;
5538 enum gimple_code code;
5540 stmt = gsi_stmt (gsi);
5541 code = gimple_code (stmt);
5542 if (code == GIMPLE_OMP_RETURN)
5544 /* STMT is the return point out of region PARENT. Mark it
5545 as the exit point and make PARENT the immediately
5546 enclosing region. */
5547 gcc_assert (parent);
5548 region = parent;
5549 region->exit = bb;
5550 parent = parent->outer;
5552 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5554 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5555 GIMPLE_OMP_RETURN, but matches with
5556 GIMPLE_OMP_ATOMIC_LOAD. */
5557 gcc_assert (parent);
5558 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5559 region = parent;
5560 region->exit = bb;
5561 parent = parent->outer;
5564 else if (code == GIMPLE_OMP_CONTINUE)
5566 gcc_assert (parent);
5567 parent->cont = bb;
5569 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5571 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5572 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5575 else
5577 /* Otherwise, this directive becomes the parent for a new
5578 region. */
5579 region = new_omp_region (bb, code, parent);
5580 parent = region;
5584 if (single_tree && !parent)
5585 return;
5587 for (son = first_dom_son (CDI_DOMINATORS, bb);
5588 son;
5589 son = next_dom_son (CDI_DOMINATORS, son))
5590 build_omp_regions_1 (son, parent, single_tree);
5593 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5594 root_omp_region. */
5596 static void
5597 build_omp_regions_root (basic_block root)
5599 gcc_assert (root_omp_region == NULL);
5600 build_omp_regions_1 (root, NULL, true);
5601 gcc_assert (root_omp_region != NULL);
5604 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5606 void
5607 omp_expand_local (basic_block head)
5609 build_omp_regions_root (head);
5610 if (dump_file && (dump_flags & TDF_DETAILS))
5612 fprintf (dump_file, "\nOMP region tree\n\n");
5613 dump_omp_region (dump_file, root_omp_region, 0);
5614 fprintf (dump_file, "\n");
5617 remove_exit_barriers (root_omp_region);
5618 expand_omp (root_omp_region);
5620 free_omp_regions ();
5623 /* Scan the CFG and build a tree of OMP regions. Return the root of
5624 the OMP region tree. */
5626 static void
5627 build_omp_regions (void)
5629 gcc_assert (root_omp_region == NULL);
5630 calculate_dominance_info (CDI_DOMINATORS);
5631 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5634 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5636 static unsigned int
5637 execute_expand_omp (void)
5639 build_omp_regions ();
5641 if (!root_omp_region)
5642 return 0;
5644 if (dump_file)
5646 fprintf (dump_file, "\nOMP region tree\n\n");
5647 dump_omp_region (dump_file, root_omp_region, 0);
5648 fprintf (dump_file, "\n");
5651 remove_exit_barriers (root_omp_region);
5653 expand_omp (root_omp_region);
5655 cleanup_tree_cfg ();
5657 free_omp_regions ();
5659 return 0;
5662 /* OMP expansion -- the default pass, run before creation of SSA form. */
5664 static bool
5665 gate_expand_omp (void)
5667 return (flag_openmp != 0 && !seen_error ());
5670 struct gimple_opt_pass pass_expand_omp =
5673 GIMPLE_PASS,
5674 "ompexp", /* name */
5675 gate_expand_omp, /* gate */
5676 execute_expand_omp, /* execute */
5677 NULL, /* sub */
5678 NULL, /* next */
5679 0, /* static_pass_number */
5680 TV_NONE, /* tv_id */
5681 PROP_gimple_any, /* properties_required */
5682 0, /* properties_provided */
5683 0, /* properties_destroyed */
5684 0, /* todo_flags_start */
5685 0 /* todo_flags_finish */
5689 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5691 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5692 CTX is the enclosing OMP context for the current statement. */
5694 static void
5695 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5697 tree block, control;
5698 gimple_stmt_iterator tgsi;
5699 unsigned i, len;
5700 gimple stmt, new_stmt, bind, t;
5701 gimple_seq ilist, dlist, olist, new_body, body;
5702 struct gimplify_ctx gctx;
5704 stmt = gsi_stmt (*gsi_p);
5706 push_gimplify_context (&gctx);
5708 dlist = NULL;
5709 ilist = NULL;
5710 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5711 &ilist, &dlist, ctx);
5713 tgsi = gsi_start (gimple_omp_body (stmt));
5714 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5715 continue;
5717 tgsi = gsi_start (gimple_omp_body (stmt));
5718 body = NULL;
5719 for (i = 0; i < len; i++, gsi_next (&tgsi))
5721 omp_context *sctx;
5722 gimple sec_start;
5724 sec_start = gsi_stmt (tgsi);
5725 sctx = maybe_lookup_ctx (sec_start);
5726 gcc_assert (sctx);
5728 gimple_seq_add_stmt (&body, sec_start);
5730 lower_omp (gimple_omp_body (sec_start), sctx);
5731 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5732 gimple_omp_set_body (sec_start, NULL);
5734 if (i == len - 1)
5736 gimple_seq l = NULL;
5737 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5738 &l, ctx);
5739 gimple_seq_add_seq (&body, l);
5740 gimple_omp_section_set_last (sec_start);
5743 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5746 block = make_node (BLOCK);
5747 bind = gimple_build_bind (NULL, body, block);
5749 olist = NULL;
5750 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5752 block = make_node (BLOCK);
5753 new_stmt = gimple_build_bind (NULL, NULL, block);
5755 pop_gimplify_context (new_stmt);
5756 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5757 BLOCK_VARS (block) = gimple_bind_vars (bind);
5758 if (BLOCK_VARS (block))
5759 TREE_USED (block) = 1;
5761 new_body = NULL;
5762 gimple_seq_add_seq (&new_body, ilist);
5763 gimple_seq_add_stmt (&new_body, stmt);
5764 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5765 gimple_seq_add_stmt (&new_body, bind);
5767 control = create_tmp_var (unsigned_type_node, ".section");
5768 t = gimple_build_omp_continue (control, control);
5769 gimple_omp_sections_set_control (stmt, control);
5770 gimple_seq_add_stmt (&new_body, t);
5772 gimple_seq_add_seq (&new_body, olist);
5773 gimple_seq_add_seq (&new_body, dlist);
5775 new_body = maybe_catch_exception (new_body);
5777 t = gimple_build_omp_return
5778 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5779 OMP_CLAUSE_NOWAIT));
5780 gimple_seq_add_stmt (&new_body, t);
5782 gimple_bind_set_body (new_stmt, new_body);
5783 gimple_omp_set_body (stmt, NULL);
5785 gsi_replace (gsi_p, new_stmt, true);
5789 /* A subroutine of lower_omp_single. Expand the simple form of
5790 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5792 if (GOMP_single_start ())
5793 BODY;
5794 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5796 FIXME. It may be better to delay expanding the logic of this until
5797 pass_expand_omp. The expanded logic may make the job more difficult
5798 to a synchronization analysis pass. */
5800 static void
5801 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5803 location_t loc = gimple_location (single_stmt);
5804 tree tlabel = create_artificial_label (loc);
5805 tree flabel = create_artificial_label (loc);
5806 gimple call, cond;
5807 tree lhs, decl;
5809 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5810 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5811 call = gimple_build_call (decl, 0);
5812 gimple_call_set_lhs (call, lhs);
5813 gimple_seq_add_stmt (pre_p, call);
5815 cond = gimple_build_cond (EQ_EXPR, lhs,
5816 fold_convert_loc (loc, TREE_TYPE (lhs),
5817 boolean_true_node),
5818 tlabel, flabel);
5819 gimple_seq_add_stmt (pre_p, cond);
5820 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5821 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5822 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5826 /* A subroutine of lower_omp_single. Expand the simple form of
5827 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5829 #pragma omp single copyprivate (a, b, c)
5831 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5834 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5836 BODY;
5837 copyout.a = a;
5838 copyout.b = b;
5839 copyout.c = c;
5840 GOMP_single_copy_end (&copyout);
5842 else
5844 a = copyout_p->a;
5845 b = copyout_p->b;
5846 c = copyout_p->c;
5848 GOMP_barrier ();
5851 FIXME. It may be better to delay expanding the logic of this until
5852 pass_expand_omp. The expanded logic may make the job more difficult
5853 to a synchronization analysis pass. */
5855 static void
5856 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5858 tree ptr_type, t, l0, l1, l2;
5859 gimple_seq copyin_seq;
5860 location_t loc = gimple_location (single_stmt);
5862 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5864 ptr_type = build_pointer_type (ctx->record_type);
5865 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5867 l0 = create_artificial_label (loc);
5868 l1 = create_artificial_label (loc);
5869 l2 = create_artificial_label (loc);
5871 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5872 t = fold_convert_loc (loc, ptr_type, t);
5873 gimplify_assign (ctx->receiver_decl, t, pre_p);
5875 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5876 build_int_cst (ptr_type, 0));
5877 t = build3 (COND_EXPR, void_type_node, t,
5878 build_and_jump (&l0), build_and_jump (&l1));
5879 gimplify_and_add (t, pre_p);
5881 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5883 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5885 copyin_seq = NULL;
5886 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5887 &copyin_seq, ctx);
5889 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5890 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5891 1, t);
5892 gimplify_and_add (t, pre_p);
5894 t = build_and_jump (&l2);
5895 gimplify_and_add (t, pre_p);
5897 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5899 gimple_seq_add_seq (pre_p, copyin_seq);
5901 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5905 /* Expand code for an OpenMP single directive. */
5907 static void
5908 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5910 tree block;
5911 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5912 gimple_seq bind_body, dlist;
5913 struct gimplify_ctx gctx;
5915 push_gimplify_context (&gctx);
5917 bind_body = NULL;
5918 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5919 &bind_body, &dlist, ctx);
5920 lower_omp (gimple_omp_body (single_stmt), ctx);
5922 gimple_seq_add_stmt (&bind_body, single_stmt);
5924 if (ctx->record_type)
5925 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5926 else
5927 lower_omp_single_simple (single_stmt, &bind_body);
5929 gimple_omp_set_body (single_stmt, NULL);
5931 gimple_seq_add_seq (&bind_body, dlist);
5933 bind_body = maybe_catch_exception (bind_body);
5935 t = gimple_build_omp_return
5936 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5937 OMP_CLAUSE_NOWAIT));
5938 gimple_seq_add_stmt (&bind_body, t);
5940 block = make_node (BLOCK);
5941 bind = gimple_build_bind (NULL, bind_body, block);
5943 pop_gimplify_context (bind);
5945 gimple_bind_append_vars (bind, ctx->block_vars);
5946 BLOCK_VARS (block) = ctx->block_vars;
5947 gsi_replace (gsi_p, bind, true);
5948 if (BLOCK_VARS (block))
5949 TREE_USED (block) = 1;
5953 /* Expand code for an OpenMP master directive. */
5955 static void
5956 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5958 tree block, lab = NULL, x;
5959 gimple stmt = gsi_stmt (*gsi_p), bind;
5960 location_t loc = gimple_location (stmt);
5961 gimple_seq tseq;
5962 struct gimplify_ctx gctx;
5964 push_gimplify_context (&gctx);
5966 block = make_node (BLOCK);
5967 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5968 block);
5970 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5971 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5972 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5973 tseq = NULL;
5974 gimplify_and_add (x, &tseq);
5975 gimple_bind_add_seq (bind, tseq);
5977 lower_omp (gimple_omp_body (stmt), ctx);
5978 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5979 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5980 gimple_omp_set_body (stmt, NULL);
5982 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5984 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5986 pop_gimplify_context (bind);
5988 gimple_bind_append_vars (bind, ctx->block_vars);
5989 BLOCK_VARS (block) = ctx->block_vars;
5990 gsi_replace (gsi_p, bind, true);
5994 /* Expand code for an OpenMP ordered directive. */
5996 static void
5997 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5999 tree block;
6000 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6001 struct gimplify_ctx gctx;
6003 push_gimplify_context (&gctx);
6005 block = make_node (BLOCK);
6006 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6007 block);
6009 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
6010 gimple_bind_add_stmt (bind, x);
6012 lower_omp (gimple_omp_body (stmt), ctx);
6013 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6014 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6015 gimple_omp_set_body (stmt, NULL);
6017 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
6018 gimple_bind_add_stmt (bind, x);
6020 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6022 pop_gimplify_context (bind);
6024 gimple_bind_append_vars (bind, ctx->block_vars);
6025 BLOCK_VARS (block) = gimple_bind_vars (bind);
6026 gsi_replace (gsi_p, bind, true);
6030 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6031 substitution of a couple of function calls. But in the NAMED case,
6032 requires that languages coordinate a symbol name. It is therefore
6033 best put here in common code. */
6035 static GTY((param1_is (tree), param2_is (tree)))
6036 splay_tree critical_name_mutexes;
6038 static void
6039 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6041 tree block;
6042 tree name, lock, unlock;
6043 gimple stmt = gsi_stmt (*gsi_p), bind;
6044 location_t loc = gimple_location (stmt);
6045 gimple_seq tbody;
6046 struct gimplify_ctx gctx;
6048 name = gimple_omp_critical_name (stmt);
6049 if (name)
6051 tree decl;
6052 splay_tree_node n;
6054 if (!critical_name_mutexes)
6055 critical_name_mutexes
6056 = splay_tree_new_ggc (splay_tree_compare_pointers,
6057 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6058 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6060 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6061 if (n == NULL)
6063 char *new_str;
6065 decl = create_tmp_var_raw (ptr_type_node, NULL);
6067 new_str = ACONCAT ((".gomp_critical_user_",
6068 IDENTIFIER_POINTER (name), NULL));
6069 DECL_NAME (decl) = get_identifier (new_str);
6070 TREE_PUBLIC (decl) = 1;
6071 TREE_STATIC (decl) = 1;
6072 DECL_COMMON (decl) = 1;
6073 DECL_ARTIFICIAL (decl) = 1;
6074 DECL_IGNORED_P (decl) = 1;
6075 varpool_finalize_decl (decl);
6077 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6078 (splay_tree_value) decl);
6080 else
6081 decl = (tree) n->value;
6083 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
6084 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6086 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
6087 unlock = build_call_expr_loc (loc, unlock, 1,
6088 build_fold_addr_expr_loc (loc, decl));
6090 else
6092 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
6093 lock = build_call_expr_loc (loc, lock, 0);
6095 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
6096 unlock = build_call_expr_loc (loc, unlock, 0);
6099 push_gimplify_context (&gctx);
6101 block = make_node (BLOCK);
6102 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6104 tbody = gimple_bind_body (bind);
6105 gimplify_and_add (lock, &tbody);
6106 gimple_bind_set_body (bind, tbody);
6108 lower_omp (gimple_omp_body (stmt), ctx);
6109 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6110 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6111 gimple_omp_set_body (stmt, NULL);
6113 tbody = gimple_bind_body (bind);
6114 gimplify_and_add (unlock, &tbody);
6115 gimple_bind_set_body (bind, tbody);
6117 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6119 pop_gimplify_context (bind);
6120 gimple_bind_append_vars (bind, ctx->block_vars);
6121 BLOCK_VARS (block) = gimple_bind_vars (bind);
6122 gsi_replace (gsi_p, bind, true);
6126 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6127 for a lastprivate clause. Given a loop control predicate of (V
6128 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6129 is appended to *DLIST, iterator initialization is appended to
6130 *BODY_P. */
6132 static void
6133 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6134 gimple_seq *dlist, struct omp_context *ctx)
6136 tree clauses, cond, vinit;
6137 enum tree_code cond_code;
6138 gimple_seq stmts;
6140 cond_code = fd->loop.cond_code;
6141 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6143 /* When possible, use a strict equality expression. This can let VRP
6144 type optimizations deduce the value and remove a copy. */
6145 if (host_integerp (fd->loop.step, 0))
6147 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6148 if (step == 1 || step == -1)
6149 cond_code = EQ_EXPR;
6152 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6154 clauses = gimple_omp_for_clauses (fd->for_stmt);
6155 stmts = NULL;
6156 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6157 if (!gimple_seq_empty_p (stmts))
6159 gimple_seq_add_seq (&stmts, *dlist);
6160 *dlist = stmts;
6162 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6163 vinit = fd->loop.n1;
6164 if (cond_code == EQ_EXPR
6165 && host_integerp (fd->loop.n2, 0)
6166 && ! integer_zerop (fd->loop.n2))
6167 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6169 /* Initialize the iterator variable, so that threads that don't execute
6170 any iterations don't execute the lastprivate clauses by accident. */
6171 gimplify_assign (fd->loop.v, vinit, body_p);
6176 /* Lower code for an OpenMP loop directive. */
6178 static void
6179 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6181 tree *rhs_p, block;
6182 struct omp_for_data fd;
6183 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6184 gimple_seq omp_for_body, body, dlist;
6185 size_t i;
6186 struct gimplify_ctx gctx;
6188 push_gimplify_context (&gctx);
6190 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6191 lower_omp (gimple_omp_body (stmt), ctx);
6193 block = make_node (BLOCK);
6194 new_stmt = gimple_build_bind (NULL, NULL, block);
6196 /* Move declaration of temporaries in the loop body before we make
6197 it go away. */
6198 omp_for_body = gimple_omp_body (stmt);
6199 if (!gimple_seq_empty_p (omp_for_body)
6200 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6202 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6203 gimple_bind_append_vars (new_stmt, vars);
6206 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6207 dlist = NULL;
6208 body = NULL;
6209 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6210 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6212 /* Lower the header expressions. At this point, we can assume that
6213 the header is of the form:
6215 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6217 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6218 using the .omp_data_s mapping, if needed. */
6219 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6221 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6222 if (!is_gimple_min_invariant (*rhs_p))
6223 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6225 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6226 if (!is_gimple_min_invariant (*rhs_p))
6227 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6229 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6230 if (!is_gimple_min_invariant (*rhs_p))
6231 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6234 /* Once lowered, extract the bounds and clauses. */
6235 extract_omp_for_data (stmt, &fd, NULL);
6237 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6239 gimple_seq_add_stmt (&body, stmt);
6240 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6242 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6243 fd.loop.v));
6245 /* After the loop, add exit clauses. */
6246 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6247 gimple_seq_add_seq (&body, dlist);
6249 body = maybe_catch_exception (body);
6251 /* Region exit marker goes at the end of the loop body. */
6252 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6254 pop_gimplify_context (new_stmt);
6256 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6257 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6258 if (BLOCK_VARS (block))
6259 TREE_USED (block) = 1;
6261 gimple_bind_set_body (new_stmt, body);
6262 gimple_omp_set_body (stmt, NULL);
6263 gimple_omp_for_set_pre_body (stmt, NULL);
6264 gsi_replace (gsi_p, new_stmt, true);
6267 /* Callback for walk_stmts. Check if the current statement only contains
6268 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6270 static tree
6271 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6272 bool *handled_ops_p,
6273 struct walk_stmt_info *wi)
6275 int *info = (int *) wi->info;
6276 gimple stmt = gsi_stmt (*gsi_p);
6278 *handled_ops_p = true;
6279 switch (gimple_code (stmt))
6281 WALK_SUBSTMTS;
6283 case GIMPLE_OMP_FOR:
6284 case GIMPLE_OMP_SECTIONS:
6285 *info = *info == 0 ? 1 : -1;
6286 break;
6287 default:
6288 *info = -1;
6289 break;
6291 return NULL;
6294 struct omp_taskcopy_context
6296 /* This field must be at the beginning, as we do "inheritance": Some
6297 callback functions for tree-inline.c (e.g., omp_copy_decl)
6298 receive a copy_body_data pointer that is up-casted to an
6299 omp_context pointer. */
6300 copy_body_data cb;
6301 omp_context *ctx;
6304 static tree
6305 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6307 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6309 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6310 return create_tmp_var (TREE_TYPE (var), NULL);
6312 return var;
6315 static tree
6316 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6318 tree name, new_fields = NULL, type, f;
6320 type = lang_hooks.types.make_type (RECORD_TYPE);
6321 name = DECL_NAME (TYPE_NAME (orig_type));
6322 name = build_decl (gimple_location (tcctx->ctx->stmt),
6323 TYPE_DECL, name, type);
6324 TYPE_NAME (type) = name;
6326 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6328 tree new_f = copy_node (f);
6329 DECL_CONTEXT (new_f) = type;
6330 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6331 TREE_CHAIN (new_f) = new_fields;
6332 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6333 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6334 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6335 &tcctx->cb, NULL);
6336 new_fields = new_f;
6337 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6339 TYPE_FIELDS (type) = nreverse (new_fields);
6340 layout_type (type);
6341 return type;
6344 /* Create task copyfn. */
6346 static void
6347 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6349 struct function *child_cfun;
6350 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6351 tree record_type, srecord_type, bind, list;
6352 bool record_needs_remap = false, srecord_needs_remap = false;
6353 splay_tree_node n;
6354 struct omp_taskcopy_context tcctx;
6355 struct gimplify_ctx gctx;
6356 location_t loc = gimple_location (task_stmt);
6358 child_fn = gimple_omp_task_copy_fn (task_stmt);
6359 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6360 gcc_assert (child_cfun->cfg == NULL);
6361 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6363 /* Reset DECL_CONTEXT on function arguments. */
6364 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6365 DECL_CONTEXT (t) = child_fn;
6367 /* Populate the function. */
6368 push_gimplify_context (&gctx);
6369 current_function_decl = child_fn;
6371 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6372 TREE_SIDE_EFFECTS (bind) = 1;
6373 list = NULL;
6374 DECL_SAVED_TREE (child_fn) = bind;
6375 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6377 /* Remap src and dst argument types if needed. */
6378 record_type = ctx->record_type;
6379 srecord_type = ctx->srecord_type;
6380 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6381 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6383 record_needs_remap = true;
6384 break;
6386 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6387 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6389 srecord_needs_remap = true;
6390 break;
6393 if (record_needs_remap || srecord_needs_remap)
6395 memset (&tcctx, '\0', sizeof (tcctx));
6396 tcctx.cb.src_fn = ctx->cb.src_fn;
6397 tcctx.cb.dst_fn = child_fn;
6398 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6399 gcc_checking_assert (tcctx.cb.src_node);
6400 tcctx.cb.dst_node = tcctx.cb.src_node;
6401 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6402 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6403 tcctx.cb.eh_lp_nr = 0;
6404 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6405 tcctx.cb.decl_map = pointer_map_create ();
6406 tcctx.ctx = ctx;
6408 if (record_needs_remap)
6409 record_type = task_copyfn_remap_type (&tcctx, record_type);
6410 if (srecord_needs_remap)
6411 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6413 else
6414 tcctx.cb.decl_map = NULL;
6416 push_cfun (child_cfun);
6418 arg = DECL_ARGUMENTS (child_fn);
6419 TREE_TYPE (arg) = build_pointer_type (record_type);
6420 sarg = DECL_CHAIN (arg);
6421 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6423 /* First pass: initialize temporaries used in record_type and srecord_type
6424 sizes and field offsets. */
6425 if (tcctx.cb.decl_map)
6426 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6427 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6429 tree *p;
6431 decl = OMP_CLAUSE_DECL (c);
6432 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6433 if (p == NULL)
6434 continue;
6435 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6436 sf = (tree) n->value;
6437 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6438 src = build_simple_mem_ref_loc (loc, sarg);
6439 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6440 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6441 append_to_statement_list (t, &list);
6444 /* Second pass: copy shared var pointers and copy construct non-VLA
6445 firstprivate vars. */
6446 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6447 switch (OMP_CLAUSE_CODE (c))
6449 case OMP_CLAUSE_SHARED:
6450 decl = OMP_CLAUSE_DECL (c);
6451 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6452 if (n == NULL)
6453 break;
6454 f = (tree) n->value;
6455 if (tcctx.cb.decl_map)
6456 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6457 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6458 sf = (tree) n->value;
6459 if (tcctx.cb.decl_map)
6460 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6461 src = build_simple_mem_ref_loc (loc, sarg);
6462 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6463 dst = build_simple_mem_ref_loc (loc, arg);
6464 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6465 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6466 append_to_statement_list (t, &list);
6467 break;
6468 case OMP_CLAUSE_FIRSTPRIVATE:
6469 decl = OMP_CLAUSE_DECL (c);
6470 if (is_variable_sized (decl))
6471 break;
6472 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6473 if (n == NULL)
6474 break;
6475 f = (tree) n->value;
6476 if (tcctx.cb.decl_map)
6477 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6478 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6479 if (n != NULL)
6481 sf = (tree) n->value;
6482 if (tcctx.cb.decl_map)
6483 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6484 src = build_simple_mem_ref_loc (loc, sarg);
6485 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6486 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6487 src = build_simple_mem_ref_loc (loc, src);
6489 else
6490 src = decl;
6491 dst = build_simple_mem_ref_loc (loc, arg);
6492 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6493 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6494 append_to_statement_list (t, &list);
6495 break;
6496 case OMP_CLAUSE_PRIVATE:
6497 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6498 break;
6499 decl = OMP_CLAUSE_DECL (c);
6500 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6501 f = (tree) n->value;
6502 if (tcctx.cb.decl_map)
6503 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6504 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6505 if (n != NULL)
6507 sf = (tree) n->value;
6508 if (tcctx.cb.decl_map)
6509 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6510 src = build_simple_mem_ref_loc (loc, sarg);
6511 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6512 if (use_pointer_for_field (decl, NULL))
6513 src = build_simple_mem_ref_loc (loc, src);
6515 else
6516 src = decl;
6517 dst = build_simple_mem_ref_loc (loc, arg);
6518 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6519 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6520 append_to_statement_list (t, &list);
6521 break;
6522 default:
6523 break;
6526 /* Last pass: handle VLA firstprivates. */
6527 if (tcctx.cb.decl_map)
6528 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6529 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6531 tree ind, ptr, df;
6533 decl = OMP_CLAUSE_DECL (c);
6534 if (!is_variable_sized (decl))
6535 continue;
6536 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6537 if (n == NULL)
6538 continue;
6539 f = (tree) n->value;
6540 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6541 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6542 ind = DECL_VALUE_EXPR (decl);
6543 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6544 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6545 n = splay_tree_lookup (ctx->sfield_map,
6546 (splay_tree_key) TREE_OPERAND (ind, 0));
6547 sf = (tree) n->value;
6548 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6549 src = build_simple_mem_ref_loc (loc, sarg);
6550 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6551 src = build_simple_mem_ref_loc (loc, src);
6552 dst = build_simple_mem_ref_loc (loc, arg);
6553 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6554 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6555 append_to_statement_list (t, &list);
6556 n = splay_tree_lookup (ctx->field_map,
6557 (splay_tree_key) TREE_OPERAND (ind, 0));
6558 df = (tree) n->value;
6559 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6560 ptr = build_simple_mem_ref_loc (loc, arg);
6561 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6562 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6563 build_fold_addr_expr_loc (loc, dst));
6564 append_to_statement_list (t, &list);
6567 t = build1 (RETURN_EXPR, void_type_node, NULL);
6568 append_to_statement_list (t, &list);
6570 if (tcctx.cb.decl_map)
6571 pointer_map_destroy (tcctx.cb.decl_map);
6572 pop_gimplify_context (NULL);
6573 BIND_EXPR_BODY (bind) = list;
6574 pop_cfun ();
6575 current_function_decl = ctx->cb.src_fn;
6578 /* Lower the OpenMP parallel or task directive in the current statement
6579 in GSI_P. CTX holds context information for the directive. */
6581 static void
6582 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6584 tree clauses;
6585 tree child_fn, t;
6586 gimple stmt = gsi_stmt (*gsi_p);
6587 gimple par_bind, bind;
6588 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6589 struct gimplify_ctx gctx;
6590 location_t loc = gimple_location (stmt);
6592 clauses = gimple_omp_taskreg_clauses (stmt);
6593 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6594 par_body = gimple_bind_body (par_bind);
6595 child_fn = ctx->cb.dst_fn;
6596 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6597 && !gimple_omp_parallel_combined_p (stmt))
6599 struct walk_stmt_info wi;
6600 int ws_num = 0;
6602 memset (&wi, 0, sizeof (wi));
6603 wi.info = &ws_num;
6604 wi.val_only = true;
6605 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6606 if (ws_num == 1)
6607 gimple_omp_parallel_set_combined_p (stmt, true);
6609 if (ctx->srecord_type)
6610 create_task_copyfn (stmt, ctx);
6612 push_gimplify_context (&gctx);
6614 par_olist = NULL;
6615 par_ilist = NULL;
6616 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6617 lower_omp (par_body, ctx);
6618 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6619 lower_reduction_clauses (clauses, &par_olist, ctx);
6621 /* Declare all the variables created by mapping and the variables
6622 declared in the scope of the parallel body. */
6623 record_vars_into (ctx->block_vars, child_fn);
6624 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6626 if (ctx->record_type)
6628 ctx->sender_decl
6629 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6630 : ctx->record_type, ".omp_data_o");
6631 DECL_NAMELESS (ctx->sender_decl) = 1;
6632 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6633 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6636 olist = NULL;
6637 ilist = NULL;
6638 lower_send_clauses (clauses, &ilist, &olist, ctx);
6639 lower_send_shared_vars (&ilist, &olist, ctx);
6641 /* Once all the expansions are done, sequence all the different
6642 fragments inside gimple_omp_body. */
6644 new_body = NULL;
6646 if (ctx->record_type)
6648 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6649 /* fixup_child_record_type might have changed receiver_decl's type. */
6650 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6651 gimple_seq_add_stmt (&new_body,
6652 gimple_build_assign (ctx->receiver_decl, t));
6655 gimple_seq_add_seq (&new_body, par_ilist);
6656 gimple_seq_add_seq (&new_body, par_body);
6657 gimple_seq_add_seq (&new_body, par_olist);
6658 new_body = maybe_catch_exception (new_body);
6659 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6660 gimple_omp_set_body (stmt, new_body);
6662 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6663 gimple_bind_add_stmt (bind, stmt);
6664 if (ilist || olist)
6666 gimple_seq_add_stmt (&ilist, bind);
6667 gimple_seq_add_seq (&ilist, olist);
6668 bind = gimple_build_bind (NULL, ilist, NULL);
6671 gsi_replace (gsi_p, bind, true);
6673 pop_gimplify_context (NULL);
6676 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6677 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6678 of OpenMP context, but with task_shared_vars set. */
6680 static tree
6681 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6682 void *data)
6684 tree t = *tp;
6686 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6687 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6688 return t;
6690 if (task_shared_vars
6691 && DECL_P (t)
6692 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6693 return t;
6695 /* If a global variable has been privatized, TREE_CONSTANT on
6696 ADDR_EXPR might be wrong. */
6697 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6698 recompute_tree_invariant_for_addr_expr (t);
6700 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6701 return NULL_TREE;
6704 static void
6705 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6707 gimple stmt = gsi_stmt (*gsi_p);
6708 struct walk_stmt_info wi;
6710 if (gimple_has_location (stmt))
6711 input_location = gimple_location (stmt);
6713 if (task_shared_vars)
6714 memset (&wi, '\0', sizeof (wi));
6716 /* If we have issued syntax errors, avoid doing any heavy lifting.
6717 Just replace the OpenMP directives with a NOP to avoid
6718 confusing RTL expansion. */
6719 if (seen_error () && is_gimple_omp (stmt))
6721 gsi_replace (gsi_p, gimple_build_nop (), true);
6722 return;
6725 switch (gimple_code (stmt))
6727 case GIMPLE_COND:
6728 if ((ctx || task_shared_vars)
6729 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6730 ctx ? NULL : &wi, NULL)
6731 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6732 ctx ? NULL : &wi, NULL)))
6733 gimple_regimplify_operands (stmt, gsi_p);
6734 break;
6735 case GIMPLE_CATCH:
6736 lower_omp (gimple_catch_handler (stmt), ctx);
6737 break;
6738 case GIMPLE_EH_FILTER:
6739 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6740 break;
6741 case GIMPLE_TRY:
6742 lower_omp (gimple_try_eval (stmt), ctx);
6743 lower_omp (gimple_try_cleanup (stmt), ctx);
6744 break;
6745 case GIMPLE_BIND:
6746 lower_omp (gimple_bind_body (stmt), ctx);
6747 break;
6748 case GIMPLE_OMP_PARALLEL:
6749 case GIMPLE_OMP_TASK:
6750 ctx = maybe_lookup_ctx (stmt);
6751 lower_omp_taskreg (gsi_p, ctx);
6752 break;
6753 case GIMPLE_OMP_FOR:
6754 ctx = maybe_lookup_ctx (stmt);
6755 gcc_assert (ctx);
6756 lower_omp_for (gsi_p, ctx);
6757 break;
6758 case GIMPLE_OMP_SECTIONS:
6759 ctx = maybe_lookup_ctx (stmt);
6760 gcc_assert (ctx);
6761 lower_omp_sections (gsi_p, ctx);
6762 break;
6763 case GIMPLE_OMP_SINGLE:
6764 ctx = maybe_lookup_ctx (stmt);
6765 gcc_assert (ctx);
6766 lower_omp_single (gsi_p, ctx);
6767 break;
6768 case GIMPLE_OMP_MASTER:
6769 ctx = maybe_lookup_ctx (stmt);
6770 gcc_assert (ctx);
6771 lower_omp_master (gsi_p, ctx);
6772 break;
6773 case GIMPLE_OMP_ORDERED:
6774 ctx = maybe_lookup_ctx (stmt);
6775 gcc_assert (ctx);
6776 lower_omp_ordered (gsi_p, ctx);
6777 break;
6778 case GIMPLE_OMP_CRITICAL:
6779 ctx = maybe_lookup_ctx (stmt);
6780 gcc_assert (ctx);
6781 lower_omp_critical (gsi_p, ctx);
6782 break;
6783 case GIMPLE_OMP_ATOMIC_LOAD:
6784 if ((ctx || task_shared_vars)
6785 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6786 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6787 gimple_regimplify_operands (stmt, gsi_p);
6788 break;
6789 default:
6790 if ((ctx || task_shared_vars)
6791 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6792 ctx ? NULL : &wi))
6793 gimple_regimplify_operands (stmt, gsi_p);
6794 break;
6798 static void
6799 lower_omp (gimple_seq body, omp_context *ctx)
6801 location_t saved_location = input_location;
6802 gimple_stmt_iterator gsi = gsi_start (body);
6803 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6804 lower_omp_1 (&gsi, ctx);
6805 input_location = saved_location;
6808 /* Main entry point. */
6810 static unsigned int
6811 execute_lower_omp (void)
6813 gimple_seq body;
6815 /* This pass always runs, to provide PROP_gimple_lomp.
6816 But there is nothing to do unless -fopenmp is given. */
6817 if (flag_openmp == 0)
6818 return 0;
6820 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6821 delete_omp_context);
6823 body = gimple_body (current_function_decl);
6824 scan_omp (body, NULL);
6825 gcc_assert (taskreg_nesting_level == 0);
6827 if (all_contexts->root)
6829 struct gimplify_ctx gctx;
6831 if (task_shared_vars)
6832 push_gimplify_context (&gctx);
6833 lower_omp (body, NULL);
6834 if (task_shared_vars)
6835 pop_gimplify_context (NULL);
6838 if (all_contexts)
6840 splay_tree_delete (all_contexts);
6841 all_contexts = NULL;
6843 BITMAP_FREE (task_shared_vars);
6844 return 0;
6847 struct gimple_opt_pass pass_lower_omp =
6850 GIMPLE_PASS,
6851 "omplower", /* name */
6852 NULL, /* gate */
6853 execute_lower_omp, /* execute */
6854 NULL, /* sub */
6855 NULL, /* next */
6856 0, /* static_pass_number */
6857 TV_NONE, /* tv_id */
6858 PROP_gimple_any, /* properties_required */
6859 PROP_gimple_lomp, /* properties_provided */
6860 0, /* properties_destroyed */
6861 0, /* todo_flags_start */
6862 0 /* todo_flags_finish */
6866 /* The following is a utility to diagnose OpenMP structured block violations.
6867 It is not part of the "omplower" pass, as that's invoked too late. It
6868 should be invoked by the respective front ends after gimplification. */
6870 static splay_tree all_labels;
6872 /* Check for mismatched contexts and generate an error if needed. Return
6873 true if an error is detected. */
6875 static bool
6876 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6877 gimple branch_ctx, gimple label_ctx)
6879 if (label_ctx == branch_ctx)
6880 return false;
6884 Previously we kept track of the label's entire context in diagnose_sb_[12]
6885 so we could traverse it and issue a correct "exit" or "enter" error
6886 message upon a structured block violation.
6888 We built the context by building a list with tree_cons'ing, but there is
6889 no easy counterpart in gimple tuples. It seems like far too much work
6890 for issuing exit/enter error messages. If someone really misses the
6891 distinct error message... patches welcome.
6894 #if 0
6895 /* Try to avoid confusing the user by producing and error message
6896 with correct "exit" or "enter" verbiage. We prefer "exit"
6897 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6898 if (branch_ctx == NULL)
6899 exit_p = false;
6900 else
6902 while (label_ctx)
6904 if (TREE_VALUE (label_ctx) == branch_ctx)
6906 exit_p = false;
6907 break;
6909 label_ctx = TREE_CHAIN (label_ctx);
6913 if (exit_p)
6914 error ("invalid exit from OpenMP structured block");
6915 else
6916 error ("invalid entry to OpenMP structured block");
6917 #endif
6919 /* If it's obvious we have an invalid entry, be specific about the error. */
6920 if (branch_ctx == NULL)
6921 error ("invalid entry to OpenMP structured block");
6922 else
6923 /* Otherwise, be vague and lazy, but efficient. */
6924 error ("invalid branch to/from an OpenMP structured block");
6926 gsi_replace (gsi_p, gimple_build_nop (), false);
6927 return true;
6930 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6931 where each label is found. */
6933 static tree
6934 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6935 struct walk_stmt_info *wi)
6937 gimple context = (gimple) wi->info;
6938 gimple inner_context;
6939 gimple stmt = gsi_stmt (*gsi_p);
6941 *handled_ops_p = true;
6943 switch (gimple_code (stmt))
6945 WALK_SUBSTMTS;
6947 case GIMPLE_OMP_PARALLEL:
6948 case GIMPLE_OMP_TASK:
6949 case GIMPLE_OMP_SECTIONS:
6950 case GIMPLE_OMP_SINGLE:
6951 case GIMPLE_OMP_SECTION:
6952 case GIMPLE_OMP_MASTER:
6953 case GIMPLE_OMP_ORDERED:
6954 case GIMPLE_OMP_CRITICAL:
6955 /* The minimal context here is just the current OMP construct. */
6956 inner_context = stmt;
6957 wi->info = inner_context;
6958 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6959 wi->info = context;
6960 break;
6962 case GIMPLE_OMP_FOR:
6963 inner_context = stmt;
6964 wi->info = inner_context;
6965 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6966 walk them. */
6967 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6968 diagnose_sb_1, NULL, wi);
6969 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6970 wi->info = context;
6971 break;
6973 case GIMPLE_LABEL:
6974 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6975 (splay_tree_value) context);
6976 break;
6978 default:
6979 break;
6982 return NULL_TREE;
6985 /* Pass 2: Check each branch and see if its context differs from that of
6986 the destination label's context. */
6988 static tree
6989 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6990 struct walk_stmt_info *wi)
6992 gimple context = (gimple) wi->info;
6993 splay_tree_node n;
6994 gimple stmt = gsi_stmt (*gsi_p);
6996 *handled_ops_p = true;
6998 switch (gimple_code (stmt))
7000 WALK_SUBSTMTS;
7002 case GIMPLE_OMP_PARALLEL:
7003 case GIMPLE_OMP_TASK:
7004 case GIMPLE_OMP_SECTIONS:
7005 case GIMPLE_OMP_SINGLE:
7006 case GIMPLE_OMP_SECTION:
7007 case GIMPLE_OMP_MASTER:
7008 case GIMPLE_OMP_ORDERED:
7009 case GIMPLE_OMP_CRITICAL:
7010 wi->info = stmt;
7011 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7012 wi->info = context;
7013 break;
7015 case GIMPLE_OMP_FOR:
7016 wi->info = stmt;
7017 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7018 walk them. */
7019 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7020 diagnose_sb_2, NULL, wi);
7021 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7022 wi->info = context;
7023 break;
7025 case GIMPLE_COND:
7027 tree lab = gimple_cond_true_label (stmt);
7028 if (lab)
7030 n = splay_tree_lookup (all_labels,
7031 (splay_tree_key) lab);
7032 diagnose_sb_0 (gsi_p, context,
7033 n ? (gimple) n->value : NULL);
7035 lab = gimple_cond_false_label (stmt);
7036 if (lab)
7038 n = splay_tree_lookup (all_labels,
7039 (splay_tree_key) lab);
7040 diagnose_sb_0 (gsi_p, context,
7041 n ? (gimple) n->value : NULL);
7044 break;
7046 case GIMPLE_GOTO:
7048 tree lab = gimple_goto_dest (stmt);
7049 if (TREE_CODE (lab) != LABEL_DECL)
7050 break;
7052 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7053 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7055 break;
7057 case GIMPLE_SWITCH:
7059 unsigned int i;
7060 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7062 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7063 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7064 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7065 break;
7068 break;
7070 case GIMPLE_RETURN:
7071 diagnose_sb_0 (gsi_p, context, NULL);
7072 break;
7074 default:
7075 break;
7078 return NULL_TREE;
7081 static unsigned int
7082 diagnose_omp_structured_block_errors (void)
7084 struct walk_stmt_info wi;
7085 gimple_seq body = gimple_body (current_function_decl);
7087 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7089 memset (&wi, 0, sizeof (wi));
7090 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7092 memset (&wi, 0, sizeof (wi));
7093 wi.want_locations = true;
7094 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7096 splay_tree_delete (all_labels);
7097 all_labels = NULL;
7099 return 0;
7102 static bool
7103 gate_diagnose_omp_blocks (void)
7105 return flag_openmp != 0;
7108 struct gimple_opt_pass pass_diagnose_omp_blocks =
7111 GIMPLE_PASS,
7112 "*diagnose_omp_blocks", /* name */
7113 gate_diagnose_omp_blocks, /* gate */
7114 diagnose_omp_structured_block_errors, /* execute */
7115 NULL, /* sub */
7116 NULL, /* next */
7117 0, /* static_pass_number */
7118 TV_NONE, /* tv_id */
7119 PROP_gimple_any, /* properties_required */
7120 0, /* properties_provided */
7121 0, /* properties_destroyed */
7122 0, /* todo_flags_start */
7123 0, /* todo_flags_finish */
7127 #include "gt-omp-low.h"