* config/rl78/vregs.h: Add G10 register definitions.
[official-gcc.git] / gcc / omp-low.c
blobffd12202ba1e6f7ccc11c69d46f7bc586f939556
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-ssa.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45 #include "target.h"
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
53 expressions.
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
62 typedef struct omp_context
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
72 gimple stmt;
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104 } omp_context;
107 struct omp_for_data_loop
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
113 /* A structure describing the main elements of a parallel loop. */
115 struct omp_for_data
117 struct omp_for_data_loop loop;
118 tree chunk_size;
119 gimple for_stmt;
120 tree pre, iter_type;
121 int collapse;
122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
124 struct omp_for_data_loop *loops;
128 static splay_tree all_contexts;
129 static int taskreg_nesting_level;
130 struct omp_region *root_omp_region;
131 static bitmap task_shared_vars;
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
136 #define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
173 return NULL_TREE;
176 /* Return true if CTX is for an omp parallel. */
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
187 static inline bool
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
261 default:
262 break;
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
288 for (i = 0; i < fd->collapse; i++)
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
338 case PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
354 if (simd)
356 if (fd->collapse == 1)
357 iter_type = TREE_TYPE (loop->v);
358 else if (i == 0
359 || TYPE_PRECISION (iter_type)
360 < TYPE_PRECISION (TREE_TYPE (loop->v)))
361 iter_type
362 = build_nonstandard_integer_type
363 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
365 else if (iter_type != long_long_unsigned_type_node)
367 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
368 iter_type = long_long_unsigned_type_node;
369 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
370 && TYPE_PRECISION (TREE_TYPE (loop->v))
371 >= TYPE_PRECISION (iter_type))
373 tree n;
375 if (loop->cond_code == LT_EXPR)
376 n = fold_build2_loc (loc,
377 PLUS_EXPR, TREE_TYPE (loop->v),
378 loop->n2, loop->step);
379 else
380 n = loop->n1;
381 if (TREE_CODE (n) != INTEGER_CST
382 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
383 iter_type = long_long_unsigned_type_node;
385 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
386 > TYPE_PRECISION (iter_type))
388 tree n1, n2;
390 if (loop->cond_code == LT_EXPR)
392 n1 = loop->n1;
393 n2 = fold_build2_loc (loc,
394 PLUS_EXPR, TREE_TYPE (loop->v),
395 loop->n2, loop->step);
397 else
399 n1 = fold_build2_loc (loc,
400 MINUS_EXPR, TREE_TYPE (loop->v),
401 loop->n2, loop->step);
402 n2 = loop->n1;
404 if (TREE_CODE (n1) != INTEGER_CST
405 || TREE_CODE (n2) != INTEGER_CST
406 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
407 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
408 iter_type = long_long_unsigned_type_node;
412 if (collapse_count && *collapse_count == NULL)
414 t = fold_binary (loop->cond_code, boolean_type_node,
415 fold_convert (TREE_TYPE (loop->v), loop->n1),
416 fold_convert (TREE_TYPE (loop->v), loop->n2));
417 if (t && integer_zerop (t))
418 count = build_zero_cst (long_long_unsigned_type_node);
419 else if ((i == 0 || count != NULL_TREE)
420 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
421 && TREE_CONSTANT (loop->n1)
422 && TREE_CONSTANT (loop->n2)
423 && TREE_CODE (loop->step) == INTEGER_CST)
425 tree itype = TREE_TYPE (loop->v);
427 if (POINTER_TYPE_P (itype))
428 itype = signed_type_for (itype);
429 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
430 t = fold_build2_loc (loc,
431 PLUS_EXPR, itype,
432 fold_convert_loc (loc, itype, loop->step), t);
433 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
434 fold_convert_loc (loc, itype, loop->n2));
435 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
436 fold_convert_loc (loc, itype, loop->n1));
437 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
438 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
439 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
440 fold_build1_loc (loc, NEGATE_EXPR, itype,
441 fold_convert_loc (loc, itype,
442 loop->step)));
443 else
444 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
445 fold_convert_loc (loc, itype, loop->step));
446 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
447 if (count != NULL_TREE)
448 count = fold_build2_loc (loc,
449 MULT_EXPR, long_long_unsigned_type_node,
450 count, t);
451 else
452 count = t;
453 if (TREE_CODE (count) != INTEGER_CST)
454 count = NULL_TREE;
456 else if (count && !integer_zerop (count))
457 count = NULL_TREE;
461 if (count
462 && !simd)
464 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
465 iter_type = long_long_unsigned_type_node;
466 else
467 iter_type = long_integer_type_node;
469 else if (collapse_iter && *collapse_iter != NULL)
470 iter_type = TREE_TYPE (*collapse_iter);
471 fd->iter_type = iter_type;
472 if (collapse_iter && *collapse_iter == NULL)
473 *collapse_iter = create_tmp_var (iter_type, ".iter");
474 if (collapse_count && *collapse_count == NULL)
476 if (count)
477 *collapse_count = fold_convert_loc (loc, iter_type, count);
478 else
479 *collapse_count = create_tmp_var (iter_type, ".count");
482 if (fd->collapse > 1)
484 fd->loop.v = *collapse_iter;
485 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
486 fd->loop.n2 = *collapse_count;
487 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
488 fd->loop.cond_code = LT_EXPR;
493 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
494 is the immediate dominator of PAR_ENTRY_BB, return true if there
495 are no data dependencies that would prevent expanding the parallel
496 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
498 When expanding a combined parallel+workshare region, the call to
499 the child function may need additional arguments in the case of
500 GIMPLE_OMP_FOR regions. In some cases, these arguments are
501 computed out of variables passed in from the parent to the child
502 via 'struct .omp_data_s'. For instance:
504 #pragma omp parallel for schedule (guided, i * 4)
505 for (j ...)
507 Is lowered into:
509 # BLOCK 2 (PAR_ENTRY_BB)
510 .omp_data_o.i = i;
511 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
513 # BLOCK 3 (WS_ENTRY_BB)
514 .omp_data_i = &.omp_data_o;
515 D.1667 = .omp_data_i->i;
516 D.1598 = D.1667 * 4;
517 #pragma omp for schedule (guided, D.1598)
519 When we outline the parallel region, the call to the child function
520 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
521 that value is computed *after* the call site. So, in principle we
522 cannot do the transformation.
524 To see whether the code in WS_ENTRY_BB blocks the combined
525 parallel+workshare call, we collect all the variables used in the
526 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
527 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
528 call.
530 FIXME. If we had the SSA form built at this point, we could merely
531 hoist the code in block 3 into block 2 and be done with it. But at
532 this point we don't have dataflow information and though we could
533 hack something up here, it is really not worth the aggravation. */
535 static bool
536 workshare_safe_to_combine_p (basic_block ws_entry_bb)
538 struct omp_for_data fd;
539 gimple ws_stmt = last_stmt (ws_entry_bb);
541 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
542 return true;
544 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
546 extract_omp_for_data (ws_stmt, &fd, NULL);
548 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
549 return false;
550 if (fd.iter_type != long_integer_type_node)
551 return false;
553 /* FIXME. We give up too easily here. If any of these arguments
554 are not constants, they will likely involve variables that have
555 been mapped into fields of .omp_data_s for sharing with the child
556 function. With appropriate data flow, it would be possible to
557 see through this. */
558 if (!is_gimple_min_invariant (fd.loop.n1)
559 || !is_gimple_min_invariant (fd.loop.n2)
560 || !is_gimple_min_invariant (fd.loop.step)
561 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
562 return false;
564 return true;
568 /* Collect additional arguments needed to emit a combined
569 parallel+workshare call. WS_STMT is the workshare directive being
570 expanded. */
572 static vec<tree, va_gc> *
573 get_ws_args_for (gimple ws_stmt)
575 tree t;
576 location_t loc = gimple_location (ws_stmt);
577 vec<tree, va_gc> *ws_args;
579 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
581 struct omp_for_data fd;
583 extract_omp_for_data (ws_stmt, &fd, NULL);
585 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
587 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
588 ws_args->quick_push (t);
590 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
591 ws_args->quick_push (t);
593 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
594 ws_args->quick_push (t);
596 if (fd.chunk_size)
598 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
599 ws_args->quick_push (t);
602 return ws_args;
604 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
606 /* Number of sections is equal to the number of edges from the
607 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
608 the exit of the sections region. */
609 basic_block bb = single_succ (gimple_bb (ws_stmt));
610 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
611 vec_alloc (ws_args, 1);
612 ws_args->quick_push (t);
613 return ws_args;
616 gcc_unreachable ();
620 /* Discover whether REGION is a combined parallel+workshare region. */
622 static void
623 determine_parallel_type (struct omp_region *region)
625 basic_block par_entry_bb, par_exit_bb;
626 basic_block ws_entry_bb, ws_exit_bb;
628 if (region == NULL || region->inner == NULL
629 || region->exit == NULL || region->inner->exit == NULL
630 || region->inner->cont == NULL)
631 return;
633 /* We only support parallel+for and parallel+sections. */
634 if (region->type != GIMPLE_OMP_PARALLEL
635 || (region->inner->type != GIMPLE_OMP_FOR
636 && region->inner->type != GIMPLE_OMP_SECTIONS))
637 return;
639 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
640 WS_EXIT_BB -> PAR_EXIT_BB. */
641 par_entry_bb = region->entry;
642 par_exit_bb = region->exit;
643 ws_entry_bb = region->inner->entry;
644 ws_exit_bb = region->inner->exit;
646 if (single_succ (par_entry_bb) == ws_entry_bb
647 && single_succ (ws_exit_bb) == par_exit_bb
648 && workshare_safe_to_combine_p (ws_entry_bb)
649 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
650 || (last_and_only_stmt (ws_entry_bb)
651 && last_and_only_stmt (par_exit_bb))))
653 gimple ws_stmt = last_stmt (ws_entry_bb);
655 if (region->inner->type == GIMPLE_OMP_FOR)
657 /* If this is a combined parallel loop, we need to determine
658 whether or not to use the combined library calls. There
659 are two cases where we do not apply the transformation:
660 static loops and any kind of ordered loop. In the first
661 case, we already open code the loop so there is no need
662 to do anything else. In the latter case, the combined
663 parallel loop call would still need extra synchronization
664 to implement ordered semantics, so there would not be any
665 gain in using the combined call. */
666 tree clauses = gimple_omp_for_clauses (ws_stmt);
667 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
668 if (c == NULL
669 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
670 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
672 region->is_combined_parallel = false;
673 region->inner->is_combined_parallel = false;
674 return;
678 region->is_combined_parallel = true;
679 region->inner->is_combined_parallel = true;
680 region->ws_args = get_ws_args_for (ws_stmt);
685 /* Return true if EXPR is variable sized. */
687 static inline bool
688 is_variable_sized (const_tree expr)
690 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
693 /* Return true if DECL is a reference type. */
695 static inline bool
696 is_reference (tree decl)
698 return lang_hooks.decls.omp_privatize_by_reference (decl);
701 /* Lookup variables in the decl or field splay trees. The "maybe" form
702 allows for the variable form to not have been entered, otherwise we
703 assert that the variable must have been entered. */
705 static inline tree
706 lookup_decl (tree var, omp_context *ctx)
708 tree *n;
709 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
710 return *n;
713 static inline tree
714 maybe_lookup_decl (const_tree var, omp_context *ctx)
716 tree *n;
717 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
718 return n ? *n : NULL_TREE;
721 static inline tree
722 lookup_field (tree var, omp_context *ctx)
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return (tree) n->value;
729 static inline tree
730 lookup_sfield (tree var, omp_context *ctx)
732 splay_tree_node n;
733 n = splay_tree_lookup (ctx->sfield_map
734 ? ctx->sfield_map : ctx->field_map,
735 (splay_tree_key) var);
736 return (tree) n->value;
739 static inline tree
740 maybe_lookup_field (tree var, omp_context *ctx)
742 splay_tree_node n;
743 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
744 return n ? (tree) n->value : NULL_TREE;
747 /* Return true if DECL should be copied by pointer. SHARED_CTX is
748 the parallel context if DECL is to be shared. */
750 static bool
751 use_pointer_for_field (tree decl, omp_context *shared_ctx)
753 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
754 return true;
756 /* We can only use copy-in/copy-out semantics for shared variables
757 when we know the value is not accessible from an outer scope. */
758 if (shared_ctx)
760 /* ??? Trivially accessible from anywhere. But why would we even
761 be passing an address in this case? Should we simply assert
762 this to be false, or should we have a cleanup pass that removes
763 these from the list of mappings? */
764 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
765 return true;
767 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
768 without analyzing the expression whether or not its location
769 is accessible to anyone else. In the case of nested parallel
770 regions it certainly may be. */
771 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
772 return true;
774 /* Do not use copy-in/copy-out for variables that have their
775 address taken. */
776 if (TREE_ADDRESSABLE (decl))
777 return true;
779 /* lower_send_shared_vars only uses copy-in, but not copy-out
780 for these. */
781 if (TREE_READONLY (decl)
782 || ((TREE_CODE (decl) == RESULT_DECL
783 || TREE_CODE (decl) == PARM_DECL)
784 && DECL_BY_REFERENCE (decl)))
785 return false;
787 /* Disallow copy-in/out in nested parallel if
788 decl is shared in outer parallel, otherwise
789 each thread could store the shared variable
790 in its own copy-in location, making the
791 variable no longer really shared. */
792 if (shared_ctx->is_nested)
794 omp_context *up;
796 for (up = shared_ctx->outer; up; up = up->outer)
797 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
798 break;
800 if (up)
802 tree c;
804 for (c = gimple_omp_taskreg_clauses (up->stmt);
805 c; c = OMP_CLAUSE_CHAIN (c))
806 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
807 && OMP_CLAUSE_DECL (c) == decl)
808 break;
810 if (c)
811 goto maybe_mark_addressable_and_ret;
815 /* For tasks avoid using copy-in/out. As tasks can be
816 deferred or executed in different thread, when GOMP_task
817 returns, the task hasn't necessarily terminated. */
818 if (is_task_ctx (shared_ctx))
820 tree outer;
821 maybe_mark_addressable_and_ret:
822 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
823 if (is_gimple_reg (outer))
825 /* Taking address of OUTER in lower_send_shared_vars
826 might need regimplification of everything that uses the
827 variable. */
828 if (!task_shared_vars)
829 task_shared_vars = BITMAP_ALLOC (NULL);
830 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
831 TREE_ADDRESSABLE (outer) = 1;
833 return true;
837 return false;
840 /* Create a new VAR_DECL and copy information from VAR to it. */
842 tree
843 copy_var_decl (tree var, tree name, tree type)
845 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
847 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
848 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
849 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
850 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
851 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
852 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
853 TREE_NO_WARNING (copy) = TREE_NO_WARNING (var);
854 TREE_USED (copy) = 1;
855 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
857 return copy;
860 /* Construct a new automatic decl similar to VAR. */
862 static tree
863 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
865 tree copy = copy_var_decl (var, name, type);
867 DECL_CONTEXT (copy) = current_function_decl;
868 DECL_CHAIN (copy) = ctx->block_vars;
869 ctx->block_vars = copy;
871 return copy;
874 static tree
875 omp_copy_decl_1 (tree var, omp_context *ctx)
877 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
880 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
881 as appropriate. */
882 static tree
883 omp_build_component_ref (tree obj, tree field)
885 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
886 if (TREE_THIS_VOLATILE (field))
887 TREE_THIS_VOLATILE (ret) |= 1;
888 if (TREE_READONLY (field))
889 TREE_READONLY (ret) |= 1;
890 return ret;
893 /* Build tree nodes to access the field for VAR on the receiver side. */
895 static tree
896 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
898 tree x, field = lookup_field (var, ctx);
900 /* If the receiver record type was remapped in the child function,
901 remap the field into the new record type. */
902 x = maybe_lookup_field (field, ctx);
903 if (x != NULL)
904 field = x;
906 x = build_simple_mem_ref (ctx->receiver_decl);
907 x = omp_build_component_ref (x, field);
908 if (by_ref)
909 x = build_simple_mem_ref (x);
911 return x;
914 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
915 of a parallel, this is a component reference; for workshare constructs
916 this is some variable. */
918 static tree
919 build_outer_var_ref (tree var, omp_context *ctx)
921 tree x;
923 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
924 x = var;
925 else if (is_variable_sized (var))
927 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
928 x = build_outer_var_ref (x, ctx);
929 x = build_simple_mem_ref (x);
931 else if (is_taskreg_ctx (ctx))
933 bool by_ref = use_pointer_for_field (var, NULL);
934 x = build_receiver_ref (var, by_ref, ctx);
936 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
937 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
939 /* #pragma omp simd isn't a worksharing construct, and can reference even
940 private vars in its linear etc. clauses. */
941 x = NULL_TREE;
942 if (ctx->outer && is_taskreg_ctx (ctx))
943 x = lookup_decl (var, ctx->outer);
944 else if (ctx->outer)
945 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
946 if (x == NULL_TREE)
947 x = var;
949 else if (ctx->outer)
950 x = lookup_decl (var, ctx->outer);
951 else if (is_reference (var))
952 /* This can happen with orphaned constructs. If var is reference, it is
953 possible it is shared and as such valid. */
954 x = var;
955 else
956 gcc_unreachable ();
958 if (is_reference (var))
959 x = build_simple_mem_ref (x);
961 return x;
964 /* Build tree nodes to access the field for VAR on the sender side. */
966 static tree
967 build_sender_ref (tree var, omp_context *ctx)
969 tree field = lookup_sfield (var, ctx);
970 return omp_build_component_ref (ctx->sender_decl, field);
973 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
975 static void
976 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
978 tree field, type, sfield = NULL_TREE;
980 gcc_assert ((mask & 1) == 0
981 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
982 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
983 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
985 type = TREE_TYPE (var);
986 if (by_ref)
987 type = build_pointer_type (type);
988 else if ((mask & 3) == 1 && is_reference (var))
989 type = TREE_TYPE (type);
991 field = build_decl (DECL_SOURCE_LOCATION (var),
992 FIELD_DECL, DECL_NAME (var), type);
994 /* Remember what variable this field was created for. This does have a
995 side effect of making dwarf2out ignore this member, so for helpful
996 debugging we clear it later in delete_omp_context. */
997 DECL_ABSTRACT_ORIGIN (field) = var;
998 if (type == TREE_TYPE (var))
1000 DECL_ALIGN (field) = DECL_ALIGN (var);
1001 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1002 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1004 else
1005 DECL_ALIGN (field) = TYPE_ALIGN (type);
1007 if ((mask & 3) == 3)
1009 insert_field_into_struct (ctx->record_type, field);
1010 if (ctx->srecord_type)
1012 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1013 FIELD_DECL, DECL_NAME (var), type);
1014 DECL_ABSTRACT_ORIGIN (sfield) = var;
1015 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1016 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1017 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1018 insert_field_into_struct (ctx->srecord_type, sfield);
1021 else
1023 if (ctx->srecord_type == NULL_TREE)
1025 tree t;
1027 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1028 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1029 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1031 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1032 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1033 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1034 insert_field_into_struct (ctx->srecord_type, sfield);
1035 splay_tree_insert (ctx->sfield_map,
1036 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1037 (splay_tree_value) sfield);
1040 sfield = field;
1041 insert_field_into_struct ((mask & 1) ? ctx->record_type
1042 : ctx->srecord_type, field);
1045 if (mask & 1)
1046 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1047 (splay_tree_value) field);
1048 if ((mask & 2) && ctx->sfield_map)
1049 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1050 (splay_tree_value) sfield);
1053 static tree
1054 install_var_local (tree var, omp_context *ctx)
1056 tree new_var = omp_copy_decl_1 (var, ctx);
1057 insert_decl_map (&ctx->cb, var, new_var);
1058 return new_var;
1061 /* Adjust the replacement for DECL in CTX for the new context. This means
1062 copying the DECL_VALUE_EXPR, and fixing up the type. */
1064 static void
1065 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1067 tree new_decl, size;
1069 new_decl = lookup_decl (decl, ctx);
1071 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1073 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1074 && DECL_HAS_VALUE_EXPR_P (decl))
1076 tree ve = DECL_VALUE_EXPR (decl);
1077 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1078 SET_DECL_VALUE_EXPR (new_decl, ve);
1079 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1082 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1084 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1085 if (size == error_mark_node)
1086 size = TYPE_SIZE (TREE_TYPE (new_decl));
1087 DECL_SIZE (new_decl) = size;
1089 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1090 if (size == error_mark_node)
1091 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1092 DECL_SIZE_UNIT (new_decl) = size;
1096 /* The callback for remap_decl. Search all containing contexts for a
1097 mapping of the variable; this avoids having to duplicate the splay
1098 tree ahead of time. We know a mapping doesn't already exist in the
1099 given context. Create new mappings to implement default semantics. */
1101 static tree
1102 omp_copy_decl (tree var, copy_body_data *cb)
1104 omp_context *ctx = (omp_context *) cb;
1105 tree new_var;
1107 if (TREE_CODE (var) == LABEL_DECL)
1109 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1110 DECL_CONTEXT (new_var) = current_function_decl;
1111 insert_decl_map (&ctx->cb, var, new_var);
1112 return new_var;
1115 while (!is_taskreg_ctx (ctx))
1117 ctx = ctx->outer;
1118 if (ctx == NULL)
1119 return var;
1120 new_var = maybe_lookup_decl (var, ctx);
1121 if (new_var)
1122 return new_var;
1125 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1126 return var;
1128 return error_mark_node;
1132 /* Return the parallel region associated with STMT. */
1134 /* Debugging dumps for parallel regions. */
1135 void dump_omp_region (FILE *, struct omp_region *, int);
1136 void debug_omp_region (struct omp_region *);
1137 void debug_all_omp_regions (void);
1139 /* Dump the parallel region tree rooted at REGION. */
1141 void
1142 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1144 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1145 gimple_code_name[region->type]);
1147 if (region->inner)
1148 dump_omp_region (file, region->inner, indent + 4);
1150 if (region->cont)
1152 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1153 region->cont->index);
1156 if (region->exit)
1157 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1158 region->exit->index);
1159 else
1160 fprintf (file, "%*s[no exit marker]\n", indent, "");
1162 if (region->next)
1163 dump_omp_region (file, region->next, indent);
1166 DEBUG_FUNCTION void
1167 debug_omp_region (struct omp_region *region)
1169 dump_omp_region (stderr, region, 0);
1172 DEBUG_FUNCTION void
1173 debug_all_omp_regions (void)
1175 dump_omp_region (stderr, root_omp_region, 0);
1179 /* Create a new parallel region starting at STMT inside region PARENT. */
1181 struct omp_region *
1182 new_omp_region (basic_block bb, enum gimple_code type,
1183 struct omp_region *parent)
1185 struct omp_region *region = XCNEW (struct omp_region);
1187 region->outer = parent;
1188 region->entry = bb;
1189 region->type = type;
1191 if (parent)
1193 /* This is a nested region. Add it to the list of inner
1194 regions in PARENT. */
1195 region->next = parent->inner;
1196 parent->inner = region;
1198 else
1200 /* This is a toplevel region. Add it to the list of toplevel
1201 regions in ROOT_OMP_REGION. */
1202 region->next = root_omp_region;
1203 root_omp_region = region;
1206 return region;
1209 /* Release the memory associated with the region tree rooted at REGION. */
1211 static void
1212 free_omp_region_1 (struct omp_region *region)
1214 struct omp_region *i, *n;
1216 for (i = region->inner; i ; i = n)
1218 n = i->next;
1219 free_omp_region_1 (i);
1222 free (region);
1225 /* Release the memory for the entire omp region tree. */
1227 void
1228 free_omp_regions (void)
1230 struct omp_region *r, *n;
1231 for (r = root_omp_region; r ; r = n)
1233 n = r->next;
1234 free_omp_region_1 (r);
1236 root_omp_region = NULL;
1240 /* Create a new context, with OUTER_CTX being the surrounding context. */
1242 static omp_context *
1243 new_omp_context (gimple stmt, omp_context *outer_ctx)
1245 omp_context *ctx = XCNEW (omp_context);
1247 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1248 (splay_tree_value) ctx);
1249 ctx->stmt = stmt;
1251 if (outer_ctx)
1253 ctx->outer = outer_ctx;
1254 ctx->cb = outer_ctx->cb;
1255 ctx->cb.block = NULL;
1256 ctx->depth = outer_ctx->depth + 1;
1258 else
1260 ctx->cb.src_fn = current_function_decl;
1261 ctx->cb.dst_fn = current_function_decl;
1262 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1263 gcc_checking_assert (ctx->cb.src_node);
1264 ctx->cb.dst_node = ctx->cb.src_node;
1265 ctx->cb.src_cfun = cfun;
1266 ctx->cb.copy_decl = omp_copy_decl;
1267 ctx->cb.eh_lp_nr = 0;
1268 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1269 ctx->depth = 1;
1272 ctx->cb.decl_map = pointer_map_create ();
1274 return ctx;
1277 static gimple_seq maybe_catch_exception (gimple_seq);
1279 /* Finalize task copyfn. */
1281 static void
1282 finalize_task_copyfn (gimple task_stmt)
1284 struct function *child_cfun;
1285 tree child_fn;
1286 gimple_seq seq = NULL, new_seq;
1287 gimple bind;
1289 child_fn = gimple_omp_task_copy_fn (task_stmt);
1290 if (child_fn == NULL_TREE)
1291 return;
1293 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1294 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1296 push_cfun (child_cfun);
1297 bind = gimplify_body (child_fn, false);
1298 gimple_seq_add_stmt (&seq, bind);
1299 new_seq = maybe_catch_exception (seq);
1300 if (new_seq != seq)
1302 bind = gimple_build_bind (NULL, new_seq, NULL);
1303 seq = NULL;
1304 gimple_seq_add_stmt (&seq, bind);
1306 gimple_set_body (child_fn, seq);
1307 pop_cfun ();
1309 /* Inform the callgraph about the new function. */
1310 cgraph_add_new_function (child_fn, false);
1313 /* Destroy a omp_context data structures. Called through the splay tree
1314 value delete callback. */
1316 static void
1317 delete_omp_context (splay_tree_value value)
1319 omp_context *ctx = (omp_context *) value;
1321 pointer_map_destroy (ctx->cb.decl_map);
1323 if (ctx->field_map)
1324 splay_tree_delete (ctx->field_map);
1325 if (ctx->sfield_map)
1326 splay_tree_delete (ctx->sfield_map);
1328 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1329 it produces corrupt debug information. */
1330 if (ctx->record_type)
1332 tree t;
1333 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1334 DECL_ABSTRACT_ORIGIN (t) = NULL;
1336 if (ctx->srecord_type)
1338 tree t;
1339 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1340 DECL_ABSTRACT_ORIGIN (t) = NULL;
1343 if (is_task_ctx (ctx))
1344 finalize_task_copyfn (ctx->stmt);
1346 XDELETE (ctx);
1349 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1350 context. */
1352 static void
1353 fixup_child_record_type (omp_context *ctx)
1355 tree f, type = ctx->record_type;
1357 /* ??? It isn't sufficient to just call remap_type here, because
1358 variably_modified_type_p doesn't work the way we expect for
1359 record types. Testing each field for whether it needs remapping
1360 and creating a new record by hand works, however. */
1361 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1362 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1363 break;
1364 if (f)
1366 tree name, new_fields = NULL;
1368 type = lang_hooks.types.make_type (RECORD_TYPE);
1369 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1370 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1371 TYPE_DECL, name, type);
1372 TYPE_NAME (type) = name;
1374 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1376 tree new_f = copy_node (f);
1377 DECL_CONTEXT (new_f) = type;
1378 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1379 DECL_CHAIN (new_f) = new_fields;
1380 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1381 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1382 &ctx->cb, NULL);
1383 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1384 &ctx->cb, NULL);
1385 new_fields = new_f;
1387 /* Arrange to be able to look up the receiver field
1388 given the sender field. */
1389 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1390 (splay_tree_value) new_f);
1392 TYPE_FIELDS (type) = nreverse (new_fields);
1393 layout_type (type);
1396 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1399 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1400 specified by CLAUSES. */
1402 static void
1403 scan_sharing_clauses (tree clauses, omp_context *ctx)
1405 tree c, decl;
1406 bool scan_array_reductions = false;
1408 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1410 bool by_ref;
1412 switch (OMP_CLAUSE_CODE (c))
1414 case OMP_CLAUSE_PRIVATE:
1415 decl = OMP_CLAUSE_DECL (c);
1416 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1417 goto do_private;
1418 else if (!is_variable_sized (decl))
1419 install_var_local (decl, ctx);
1420 break;
1422 case OMP_CLAUSE_SHARED:
1423 gcc_assert (is_taskreg_ctx (ctx));
1424 decl = OMP_CLAUSE_DECL (c);
1425 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1426 || !is_variable_sized (decl));
1427 /* Global variables don't need to be copied,
1428 the receiver side will use them directly. */
1429 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1430 break;
1431 by_ref = use_pointer_for_field (decl, ctx);
1432 if (! TREE_READONLY (decl)
1433 || TREE_ADDRESSABLE (decl)
1434 || by_ref
1435 || is_reference (decl))
1437 install_var_field (decl, by_ref, 3, ctx);
1438 install_var_local (decl, ctx);
1439 break;
1441 /* We don't need to copy const scalar vars back. */
1442 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1443 goto do_private;
1445 case OMP_CLAUSE_LASTPRIVATE:
1446 /* Let the corresponding firstprivate clause create
1447 the variable. */
1448 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1449 break;
1450 /* FALLTHRU */
1452 case OMP_CLAUSE_FIRSTPRIVATE:
1453 case OMP_CLAUSE_REDUCTION:
1454 case OMP_CLAUSE_LINEAR:
1455 decl = OMP_CLAUSE_DECL (c);
1456 do_private:
1457 if (is_variable_sized (decl))
1459 if (is_task_ctx (ctx))
1460 install_var_field (decl, false, 1, ctx);
1461 break;
1463 else if (is_taskreg_ctx (ctx))
1465 bool global
1466 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1467 by_ref = use_pointer_for_field (decl, NULL);
1469 if (is_task_ctx (ctx)
1470 && (global || by_ref || is_reference (decl)))
1472 install_var_field (decl, false, 1, ctx);
1473 if (!global)
1474 install_var_field (decl, by_ref, 2, ctx);
1476 else if (!global)
1477 install_var_field (decl, by_ref, 3, ctx);
1479 install_var_local (decl, ctx);
1480 break;
1482 case OMP_CLAUSE_COPYPRIVATE:
1483 case OMP_CLAUSE_COPYIN:
1484 decl = OMP_CLAUSE_DECL (c);
1485 by_ref = use_pointer_for_field (decl, NULL);
1486 install_var_field (decl, by_ref, 3, ctx);
1487 break;
1489 case OMP_CLAUSE_DEFAULT:
1490 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1491 break;
1493 case OMP_CLAUSE_FINAL:
1494 case OMP_CLAUSE_IF:
1495 case OMP_CLAUSE_NUM_THREADS:
1496 case OMP_CLAUSE_SCHEDULE:
1497 if (ctx->outer)
1498 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1499 break;
1501 case OMP_CLAUSE_NOWAIT:
1502 case OMP_CLAUSE_ORDERED:
1503 case OMP_CLAUSE_COLLAPSE:
1504 case OMP_CLAUSE_UNTIED:
1505 case OMP_CLAUSE_MERGEABLE:
1506 case OMP_CLAUSE_SAFELEN:
1507 break;
1509 default:
1510 gcc_unreachable ();
1514 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 switch (OMP_CLAUSE_CODE (c))
1518 case OMP_CLAUSE_LASTPRIVATE:
1519 /* Let the corresponding firstprivate clause create
1520 the variable. */
1521 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1522 scan_array_reductions = true;
1523 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1524 break;
1525 /* FALLTHRU */
1527 case OMP_CLAUSE_PRIVATE:
1528 case OMP_CLAUSE_FIRSTPRIVATE:
1529 case OMP_CLAUSE_REDUCTION:
1530 case OMP_CLAUSE_LINEAR:
1531 decl = OMP_CLAUSE_DECL (c);
1532 if (is_variable_sized (decl))
1533 install_var_local (decl, ctx);
1534 fixup_remapped_decl (decl, ctx,
1535 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1536 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1537 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1538 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1539 scan_array_reductions = true;
1540 break;
1542 case OMP_CLAUSE_SHARED:
1543 decl = OMP_CLAUSE_DECL (c);
1544 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1545 fixup_remapped_decl (decl, ctx, false);
1546 break;
1548 case OMP_CLAUSE_COPYPRIVATE:
1549 case OMP_CLAUSE_COPYIN:
1550 case OMP_CLAUSE_DEFAULT:
1551 case OMP_CLAUSE_IF:
1552 case OMP_CLAUSE_NUM_THREADS:
1553 case OMP_CLAUSE_SCHEDULE:
1554 case OMP_CLAUSE_NOWAIT:
1555 case OMP_CLAUSE_ORDERED:
1556 case OMP_CLAUSE_COLLAPSE:
1557 case OMP_CLAUSE_UNTIED:
1558 case OMP_CLAUSE_FINAL:
1559 case OMP_CLAUSE_MERGEABLE:
1560 case OMP_CLAUSE_SAFELEN:
1561 break;
1563 default:
1564 gcc_unreachable ();
1568 if (scan_array_reductions)
1569 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1570 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1571 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1573 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1574 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1576 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1577 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1578 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1581 /* Create a new name for omp child function. Returns an identifier. */
1583 static GTY(()) unsigned int tmp_ompfn_id_num;
1585 static tree
1586 create_omp_child_function_name (bool task_copy)
1588 return (clone_function_name (current_function_decl,
1589 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1592 /* Build a decl for the omp child function. It'll not contain a body
1593 yet, just the bare decl. */
1595 static void
1596 create_omp_child_function (omp_context *ctx, bool task_copy)
1598 tree decl, type, name, t;
1600 name = create_omp_child_function_name (task_copy);
1601 if (task_copy)
1602 type = build_function_type_list (void_type_node, ptr_type_node,
1603 ptr_type_node, NULL_TREE);
1604 else
1605 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1607 decl = build_decl (gimple_location (ctx->stmt),
1608 FUNCTION_DECL, name, type);
1610 if (!task_copy)
1611 ctx->cb.dst_fn = decl;
1612 else
1613 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1615 TREE_STATIC (decl) = 1;
1616 TREE_USED (decl) = 1;
1617 DECL_ARTIFICIAL (decl) = 1;
1618 DECL_NAMELESS (decl) = 1;
1619 DECL_IGNORED_P (decl) = 0;
1620 TREE_PUBLIC (decl) = 0;
1621 DECL_UNINLINABLE (decl) = 1;
1622 DECL_EXTERNAL (decl) = 0;
1623 DECL_CONTEXT (decl) = NULL_TREE;
1624 DECL_INITIAL (decl) = make_node (BLOCK);
1626 t = build_decl (DECL_SOURCE_LOCATION (decl),
1627 RESULT_DECL, NULL_TREE, void_type_node);
1628 DECL_ARTIFICIAL (t) = 1;
1629 DECL_IGNORED_P (t) = 1;
1630 DECL_CONTEXT (t) = decl;
1631 DECL_RESULT (decl) = t;
1633 t = build_decl (DECL_SOURCE_LOCATION (decl),
1634 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1635 DECL_ARTIFICIAL (t) = 1;
1636 DECL_NAMELESS (t) = 1;
1637 DECL_ARG_TYPE (t) = ptr_type_node;
1638 DECL_CONTEXT (t) = current_function_decl;
1639 TREE_USED (t) = 1;
1640 DECL_ARGUMENTS (decl) = t;
1641 if (!task_copy)
1642 ctx->receiver_decl = t;
1643 else
1645 t = build_decl (DECL_SOURCE_LOCATION (decl),
1646 PARM_DECL, get_identifier (".omp_data_o"),
1647 ptr_type_node);
1648 DECL_ARTIFICIAL (t) = 1;
1649 DECL_NAMELESS (t) = 1;
1650 DECL_ARG_TYPE (t) = ptr_type_node;
1651 DECL_CONTEXT (t) = current_function_decl;
1652 TREE_USED (t) = 1;
1653 TREE_ADDRESSABLE (t) = 1;
1654 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1655 DECL_ARGUMENTS (decl) = t;
1658 /* Allocate memory for the function structure. The call to
1659 allocate_struct_function clobbers CFUN, so we need to restore
1660 it afterward. */
1661 push_struct_function (decl);
1662 cfun->function_end_locus = gimple_location (ctx->stmt);
1663 pop_cfun ();
1666 /* Scan an OpenMP parallel directive. */
1668 static void
1669 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1671 omp_context *ctx;
1672 tree name;
1673 gimple stmt = gsi_stmt (*gsi);
1675 /* Ignore parallel directives with empty bodies, unless there
1676 are copyin clauses. */
1677 if (optimize > 0
1678 && empty_body_p (gimple_omp_body (stmt))
1679 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1680 OMP_CLAUSE_COPYIN) == NULL)
1682 gsi_replace (gsi, gimple_build_nop (), false);
1683 return;
1686 ctx = new_omp_context (stmt, outer_ctx);
1687 if (taskreg_nesting_level > 1)
1688 ctx->is_nested = true;
1689 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1690 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1691 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1692 name = create_tmp_var_name (".omp_data_s");
1693 name = build_decl (gimple_location (stmt),
1694 TYPE_DECL, name, ctx->record_type);
1695 DECL_ARTIFICIAL (name) = 1;
1696 DECL_NAMELESS (name) = 1;
1697 TYPE_NAME (ctx->record_type) = name;
1698 create_omp_child_function (ctx, false);
1699 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1701 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1702 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1704 if (TYPE_FIELDS (ctx->record_type) == NULL)
1705 ctx->record_type = ctx->receiver_decl = NULL;
1706 else
1708 layout_type (ctx->record_type);
1709 fixup_child_record_type (ctx);
1713 /* Scan an OpenMP task directive. */
1715 static void
1716 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1718 omp_context *ctx;
1719 tree name, t;
1720 gimple stmt = gsi_stmt (*gsi);
1721 location_t loc = gimple_location (stmt);
1723 /* Ignore task directives with empty bodies. */
1724 if (optimize > 0
1725 && empty_body_p (gimple_omp_body (stmt)))
1727 gsi_replace (gsi, gimple_build_nop (), false);
1728 return;
1731 ctx = new_omp_context (stmt, outer_ctx);
1732 if (taskreg_nesting_level > 1)
1733 ctx->is_nested = true;
1734 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1735 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1736 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1737 name = create_tmp_var_name (".omp_data_s");
1738 name = build_decl (gimple_location (stmt),
1739 TYPE_DECL, name, ctx->record_type);
1740 DECL_ARTIFICIAL (name) = 1;
1741 DECL_NAMELESS (name) = 1;
1742 TYPE_NAME (ctx->record_type) = name;
1743 create_omp_child_function (ctx, false);
1744 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1746 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1748 if (ctx->srecord_type)
1750 name = create_tmp_var_name (".omp_data_a");
1751 name = build_decl (gimple_location (stmt),
1752 TYPE_DECL, name, ctx->srecord_type);
1753 DECL_ARTIFICIAL (name) = 1;
1754 DECL_NAMELESS (name) = 1;
1755 TYPE_NAME (ctx->srecord_type) = name;
1756 create_omp_child_function (ctx, true);
1759 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1761 if (TYPE_FIELDS (ctx->record_type) == NULL)
1763 ctx->record_type = ctx->receiver_decl = NULL;
1764 t = build_int_cst (long_integer_type_node, 0);
1765 gimple_omp_task_set_arg_size (stmt, t);
1766 t = build_int_cst (long_integer_type_node, 1);
1767 gimple_omp_task_set_arg_align (stmt, t);
1769 else
1771 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1772 /* Move VLA fields to the end. */
1773 p = &TYPE_FIELDS (ctx->record_type);
1774 while (*p)
1775 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1776 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1778 *q = *p;
1779 *p = TREE_CHAIN (*p);
1780 TREE_CHAIN (*q) = NULL_TREE;
1781 q = &TREE_CHAIN (*q);
1783 else
1784 p = &DECL_CHAIN (*p);
1785 *p = vla_fields;
1786 layout_type (ctx->record_type);
1787 fixup_child_record_type (ctx);
1788 if (ctx->srecord_type)
1789 layout_type (ctx->srecord_type);
1790 t = fold_convert_loc (loc, long_integer_type_node,
1791 TYPE_SIZE_UNIT (ctx->record_type));
1792 gimple_omp_task_set_arg_size (stmt, t);
1793 t = build_int_cst (long_integer_type_node,
1794 TYPE_ALIGN_UNIT (ctx->record_type));
1795 gimple_omp_task_set_arg_align (stmt, t);
1800 /* Scan an OpenMP loop directive. */
1802 static void
1803 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1805 omp_context *ctx;
1806 size_t i;
1808 ctx = new_omp_context (stmt, outer_ctx);
1810 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1812 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1813 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1815 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1816 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1817 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1818 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1820 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1823 /* Scan an OpenMP sections directive. */
1825 static void
1826 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1828 omp_context *ctx;
1830 ctx = new_omp_context (stmt, outer_ctx);
1831 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1832 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1835 /* Scan an OpenMP single directive. */
1837 static void
1838 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1840 omp_context *ctx;
1841 tree name;
1843 ctx = new_omp_context (stmt, outer_ctx);
1844 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1845 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1846 name = create_tmp_var_name (".omp_copy_s");
1847 name = build_decl (gimple_location (stmt),
1848 TYPE_DECL, name, ctx->record_type);
1849 TYPE_NAME (ctx->record_type) = name;
1851 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1852 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1854 if (TYPE_FIELDS (ctx->record_type) == NULL)
1855 ctx->record_type = NULL;
1856 else
1857 layout_type (ctx->record_type);
1861 /* Check OpenMP nesting restrictions. */
1862 static bool
1863 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1865 if (ctx != NULL)
1867 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1868 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
1870 error_at (gimple_location (stmt),
1871 "OpenMP constructs may not be nested inside simd region");
1872 return false;
1875 switch (gimple_code (stmt))
1877 case GIMPLE_OMP_FOR:
1878 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
1879 return true;
1880 /* FALLTHRU */
1881 case GIMPLE_OMP_SECTIONS:
1882 case GIMPLE_OMP_SINGLE:
1883 case GIMPLE_CALL:
1884 for (; ctx != NULL; ctx = ctx->outer)
1885 switch (gimple_code (ctx->stmt))
1887 case GIMPLE_OMP_FOR:
1888 case GIMPLE_OMP_SECTIONS:
1889 case GIMPLE_OMP_SINGLE:
1890 case GIMPLE_OMP_ORDERED:
1891 case GIMPLE_OMP_MASTER:
1892 case GIMPLE_OMP_TASK:
1893 if (is_gimple_call (stmt))
1895 error_at (gimple_location (stmt),
1896 "barrier region may not be closely nested inside "
1897 "of work-sharing, critical, ordered, master or "
1898 "explicit task region");
1899 return false;
1901 error_at (gimple_location (stmt),
1902 "work-sharing region may not be closely nested inside "
1903 "of work-sharing, critical, ordered, master or explicit "
1904 "task region");
1905 return false;
1906 case GIMPLE_OMP_PARALLEL:
1907 return true;
1908 default:
1909 break;
1911 break;
1912 case GIMPLE_OMP_MASTER:
1913 for (; ctx != NULL; ctx = ctx->outer)
1914 switch (gimple_code (ctx->stmt))
1916 case GIMPLE_OMP_FOR:
1917 case GIMPLE_OMP_SECTIONS:
1918 case GIMPLE_OMP_SINGLE:
1919 case GIMPLE_OMP_TASK:
1920 error_at (gimple_location (stmt),
1921 "master region may not be closely nested inside "
1922 "of work-sharing or explicit task region");
1923 return false;
1924 case GIMPLE_OMP_PARALLEL:
1925 return true;
1926 default:
1927 break;
1929 break;
1930 case GIMPLE_OMP_ORDERED:
1931 for (; ctx != NULL; ctx = ctx->outer)
1932 switch (gimple_code (ctx->stmt))
1934 case GIMPLE_OMP_CRITICAL:
1935 case GIMPLE_OMP_TASK:
1936 error_at (gimple_location (stmt),
1937 "ordered region may not be closely nested inside "
1938 "of critical or explicit task region");
1939 return false;
1940 case GIMPLE_OMP_FOR:
1941 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1942 OMP_CLAUSE_ORDERED) == NULL)
1944 error_at (gimple_location (stmt),
1945 "ordered region must be closely nested inside "
1946 "a loop region with an ordered clause");
1947 return false;
1949 return true;
1950 case GIMPLE_OMP_PARALLEL:
1951 return true;
1952 default:
1953 break;
1955 break;
1956 case GIMPLE_OMP_CRITICAL:
1957 for (; ctx != NULL; ctx = ctx->outer)
1958 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1959 && (gimple_omp_critical_name (stmt)
1960 == gimple_omp_critical_name (ctx->stmt)))
1962 error_at (gimple_location (stmt),
1963 "critical region may not be nested inside a critical "
1964 "region with the same name");
1965 return false;
1967 break;
1968 default:
1969 break;
1971 return true;
1975 /* Helper function scan_omp.
1977 Callback for walk_tree or operators in walk_gimple_stmt used to
1978 scan for OpenMP directives in TP. */
1980 static tree
1981 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1983 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1984 omp_context *ctx = (omp_context *) wi->info;
1985 tree t = *tp;
1987 switch (TREE_CODE (t))
1989 case VAR_DECL:
1990 case PARM_DECL:
1991 case LABEL_DECL:
1992 case RESULT_DECL:
1993 if (ctx)
1994 *tp = remap_decl (t, &ctx->cb);
1995 break;
1997 default:
1998 if (ctx && TYPE_P (t))
1999 *tp = remap_type (t, &ctx->cb);
2000 else if (!DECL_P (t))
2002 *walk_subtrees = 1;
2003 if (ctx)
2005 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2006 if (tem != TREE_TYPE (t))
2008 if (TREE_CODE (t) == INTEGER_CST)
2009 *tp = build_int_cst_wide (tem,
2010 TREE_INT_CST_LOW (t),
2011 TREE_INT_CST_HIGH (t));
2012 else
2013 TREE_TYPE (t) = tem;
2017 break;
2020 return NULL_TREE;
2024 /* Helper function for scan_omp.
2026 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2027 the current statement in GSI. */
2029 static tree
2030 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2031 struct walk_stmt_info *wi)
2033 gimple stmt = gsi_stmt (*gsi);
2034 omp_context *ctx = (omp_context *) wi->info;
2036 if (gimple_has_location (stmt))
2037 input_location = gimple_location (stmt);
2039 /* Check the OpenMP nesting restrictions. */
2040 if (ctx != NULL)
2042 bool remove = false;
2043 if (is_gimple_omp (stmt))
2044 remove = !check_omp_nesting_restrictions (stmt, ctx);
2045 else if (is_gimple_call (stmt))
2047 tree fndecl = gimple_call_fndecl (stmt);
2048 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2049 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2050 remove = !check_omp_nesting_restrictions (stmt, ctx);
2052 if (remove)
2054 stmt = gimple_build_nop ();
2055 gsi_replace (gsi, stmt, false);
2059 *handled_ops_p = true;
2061 switch (gimple_code (stmt))
2063 case GIMPLE_OMP_PARALLEL:
2064 taskreg_nesting_level++;
2065 scan_omp_parallel (gsi, ctx);
2066 taskreg_nesting_level--;
2067 break;
2069 case GIMPLE_OMP_TASK:
2070 taskreg_nesting_level++;
2071 scan_omp_task (gsi, ctx);
2072 taskreg_nesting_level--;
2073 break;
2075 case GIMPLE_OMP_FOR:
2076 scan_omp_for (stmt, ctx);
2077 break;
2079 case GIMPLE_OMP_SECTIONS:
2080 scan_omp_sections (stmt, ctx);
2081 break;
2083 case GIMPLE_OMP_SINGLE:
2084 scan_omp_single (stmt, ctx);
2085 break;
2087 case GIMPLE_OMP_SECTION:
2088 case GIMPLE_OMP_MASTER:
2089 case GIMPLE_OMP_ORDERED:
2090 case GIMPLE_OMP_CRITICAL:
2091 ctx = new_omp_context (stmt, ctx);
2092 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2093 break;
2095 case GIMPLE_BIND:
2097 tree var;
2099 *handled_ops_p = false;
2100 if (ctx)
2101 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2102 insert_decl_map (&ctx->cb, var, var);
2104 break;
2105 default:
2106 *handled_ops_p = false;
2107 break;
2110 return NULL_TREE;
2114 /* Scan all the statements starting at the current statement. CTX
2115 contains context information about the OpenMP directives and
2116 clauses found during the scan. */
2118 static void
2119 scan_omp (gimple_seq *body_p, omp_context *ctx)
2121 location_t saved_location;
2122 struct walk_stmt_info wi;
2124 memset (&wi, 0, sizeof (wi));
2125 wi.info = ctx;
2126 wi.want_locations = true;
2128 saved_location = input_location;
2129 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2130 input_location = saved_location;
2133 /* Re-gimplification and code generation routines. */
2135 /* Build a call to GOMP_barrier. */
2137 static tree
2138 build_omp_barrier (void)
2140 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2143 /* If a context was created for STMT when it was scanned, return it. */
2145 static omp_context *
2146 maybe_lookup_ctx (gimple stmt)
2148 splay_tree_node n;
2149 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2150 return n ? (omp_context *) n->value : NULL;
2154 /* Find the mapping for DECL in CTX or the immediately enclosing
2155 context that has a mapping for DECL.
2157 If CTX is a nested parallel directive, we may have to use the decl
2158 mappings created in CTX's parent context. Suppose that we have the
2159 following parallel nesting (variable UIDs showed for clarity):
2161 iD.1562 = 0;
2162 #omp parallel shared(iD.1562) -> outer parallel
2163 iD.1562 = iD.1562 + 1;
2165 #omp parallel shared (iD.1562) -> inner parallel
2166 iD.1562 = iD.1562 - 1;
2168 Each parallel structure will create a distinct .omp_data_s structure
2169 for copying iD.1562 in/out of the directive:
2171 outer parallel .omp_data_s.1.i -> iD.1562
2172 inner parallel .omp_data_s.2.i -> iD.1562
2174 A shared variable mapping will produce a copy-out operation before
2175 the parallel directive and a copy-in operation after it. So, in
2176 this case we would have:
2178 iD.1562 = 0;
2179 .omp_data_o.1.i = iD.1562;
2180 #omp parallel shared(iD.1562) -> outer parallel
2181 .omp_data_i.1 = &.omp_data_o.1
2182 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2184 .omp_data_o.2.i = iD.1562; -> **
2185 #omp parallel shared(iD.1562) -> inner parallel
2186 .omp_data_i.2 = &.omp_data_o.2
2187 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2190 ** This is a problem. The symbol iD.1562 cannot be referenced
2191 inside the body of the outer parallel region. But since we are
2192 emitting this copy operation while expanding the inner parallel
2193 directive, we need to access the CTX structure of the outer
2194 parallel directive to get the correct mapping:
2196 .omp_data_o.2.i = .omp_data_i.1->i
2198 Since there may be other workshare or parallel directives enclosing
2199 the parallel directive, it may be necessary to walk up the context
2200 parent chain. This is not a problem in general because nested
2201 parallelism happens only rarely. */
2203 static tree
2204 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2206 tree t;
2207 omp_context *up;
2209 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2210 t = maybe_lookup_decl (decl, up);
2212 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2214 return t ? t : decl;
2218 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2219 in outer contexts. */
2221 static tree
2222 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2224 tree t = NULL;
2225 omp_context *up;
2227 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2228 t = maybe_lookup_decl (decl, up);
2230 return t ? t : decl;
2234 /* Construct the initialization value for reduction CLAUSE. */
2236 tree
2237 omp_reduction_init (tree clause, tree type)
2239 location_t loc = OMP_CLAUSE_LOCATION (clause);
2240 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2242 case PLUS_EXPR:
2243 case MINUS_EXPR:
2244 case BIT_IOR_EXPR:
2245 case BIT_XOR_EXPR:
2246 case TRUTH_OR_EXPR:
2247 case TRUTH_ORIF_EXPR:
2248 case TRUTH_XOR_EXPR:
2249 case NE_EXPR:
2250 return build_zero_cst (type);
2252 case MULT_EXPR:
2253 case TRUTH_AND_EXPR:
2254 case TRUTH_ANDIF_EXPR:
2255 case EQ_EXPR:
2256 return fold_convert_loc (loc, type, integer_one_node);
2258 case BIT_AND_EXPR:
2259 return fold_convert_loc (loc, type, integer_minus_one_node);
2261 case MAX_EXPR:
2262 if (SCALAR_FLOAT_TYPE_P (type))
2264 REAL_VALUE_TYPE max, min;
2265 if (HONOR_INFINITIES (TYPE_MODE (type)))
2267 real_inf (&max);
2268 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2270 else
2271 real_maxval (&min, 1, TYPE_MODE (type));
2272 return build_real (type, min);
2274 else
2276 gcc_assert (INTEGRAL_TYPE_P (type));
2277 return TYPE_MIN_VALUE (type);
2280 case MIN_EXPR:
2281 if (SCALAR_FLOAT_TYPE_P (type))
2283 REAL_VALUE_TYPE max;
2284 if (HONOR_INFINITIES (TYPE_MODE (type)))
2285 real_inf (&max);
2286 else
2287 real_maxval (&max, 0, TYPE_MODE (type));
2288 return build_real (type, max);
2290 else
2292 gcc_assert (INTEGRAL_TYPE_P (type));
2293 return TYPE_MAX_VALUE (type);
2296 default:
2297 gcc_unreachable ();
2301 /* Return maximum possible vectorization factor for the target. */
2303 static int
2304 omp_max_vf (void)
2306 if (!optimize
2307 || optimize_debug
2308 || (!flag_tree_loop_vectorize
2309 && (global_options_set.x_flag_tree_loop_vectorize
2310 || global_options_set.x_flag_tree_vectorize)))
2311 return 1;
2313 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2314 if (vs)
2316 vs = 1 << floor_log2 (vs);
2317 return vs;
2319 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2320 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2321 return GET_MODE_NUNITS (vqimode);
2322 return 1;
2325 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2326 privatization. */
2328 static bool
2329 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2330 tree &idx, tree &lane, tree &ivar, tree &lvar)
2332 if (max_vf == 0)
2334 max_vf = omp_max_vf ();
2335 if (max_vf > 1)
2337 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2338 OMP_CLAUSE_SAFELEN);
2339 if (c
2340 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2341 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2343 if (max_vf > 1)
2345 idx = create_tmp_var (unsigned_type_node, NULL);
2346 lane = create_tmp_var (unsigned_type_node, NULL);
2349 if (max_vf == 1)
2350 return false;
2352 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2353 tree avar = create_tmp_var_raw (atype, NULL);
2354 if (TREE_ADDRESSABLE (new_var))
2355 TREE_ADDRESSABLE (avar) = 1;
2356 DECL_ATTRIBUTES (avar)
2357 = tree_cons (get_identifier ("omp simd array"), NULL,
2358 DECL_ATTRIBUTES (avar));
2359 gimple_add_tmp_var (avar);
2360 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2361 NULL_TREE, NULL_TREE);
2362 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2363 NULL_TREE, NULL_TREE);
2364 SET_DECL_VALUE_EXPR (new_var, lvar);
2365 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2366 return true;
2369 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2370 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2371 private variables. Initialization statements go in ILIST, while calls
2372 to destructors go in DLIST. */
2374 static void
2375 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2376 omp_context *ctx)
2378 tree c, dtor, copyin_seq, x, ptr;
2379 bool copyin_by_ref = false;
2380 bool lastprivate_firstprivate = false;
2381 int pass;
2382 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2383 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
2384 int max_vf = 0;
2385 tree lane = NULL_TREE, idx = NULL_TREE;
2386 tree ivar = NULL_TREE, lvar = NULL_TREE;
2387 gimple_seq llist[2] = { NULL, NULL };
2389 copyin_seq = NULL;
2391 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2392 with data sharing clauses referencing variable sized vars. That
2393 is unnecessarily hard to support and very unlikely to result in
2394 vectorized code anyway. */
2395 if (is_simd)
2396 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2397 switch (OMP_CLAUSE_CODE (c))
2399 case OMP_CLAUSE_REDUCTION:
2400 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2401 max_vf = 1;
2402 /* FALLTHRU */
2403 case OMP_CLAUSE_PRIVATE:
2404 case OMP_CLAUSE_FIRSTPRIVATE:
2405 case OMP_CLAUSE_LASTPRIVATE:
2406 case OMP_CLAUSE_LINEAR:
2407 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
2408 max_vf = 1;
2409 break;
2410 default:
2411 continue;
2414 /* Do all the fixed sized types in the first pass, and the variable sized
2415 types in the second pass. This makes sure that the scalar arguments to
2416 the variable sized types are processed before we use them in the
2417 variable sized operations. */
2418 for (pass = 0; pass < 2; ++pass)
2420 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2422 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2423 tree var, new_var;
2424 bool by_ref;
2425 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2427 switch (c_kind)
2429 case OMP_CLAUSE_PRIVATE:
2430 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2431 continue;
2432 break;
2433 case OMP_CLAUSE_SHARED:
2434 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2436 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2437 continue;
2439 case OMP_CLAUSE_FIRSTPRIVATE:
2440 case OMP_CLAUSE_COPYIN:
2441 case OMP_CLAUSE_REDUCTION:
2442 break;
2443 case OMP_CLAUSE_LINEAR:
2444 break;
2445 case OMP_CLAUSE_LASTPRIVATE:
2446 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2448 lastprivate_firstprivate = true;
2449 if (pass != 0)
2450 continue;
2452 break;
2453 default:
2454 continue;
2457 new_var = var = OMP_CLAUSE_DECL (c);
2458 if (c_kind != OMP_CLAUSE_COPYIN)
2459 new_var = lookup_decl (var, ctx);
2461 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2463 if (pass != 0)
2464 continue;
2466 else if (is_variable_sized (var))
2468 /* For variable sized types, we need to allocate the
2469 actual storage here. Call alloca and store the
2470 result in the pointer decl that we created elsewhere. */
2471 if (pass == 0)
2472 continue;
2474 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2476 gimple stmt;
2477 tree tmp, atmp;
2479 ptr = DECL_VALUE_EXPR (new_var);
2480 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2481 ptr = TREE_OPERAND (ptr, 0);
2482 gcc_assert (DECL_P (ptr));
2483 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2485 /* void *tmp = __builtin_alloca */
2486 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2487 stmt = gimple_build_call (atmp, 1, x);
2488 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2489 gimple_add_tmp_var (tmp);
2490 gimple_call_set_lhs (stmt, tmp);
2492 gimple_seq_add_stmt (ilist, stmt);
2494 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2495 gimplify_assign (ptr, x, ilist);
2498 else if (is_reference (var))
2500 /* For references that are being privatized for Fortran,
2501 allocate new backing storage for the new pointer
2502 variable. This allows us to avoid changing all the
2503 code that expects a pointer to something that expects
2504 a direct variable. Note that this doesn't apply to
2505 C++, since reference types are disallowed in data
2506 sharing clauses there, except for NRV optimized
2507 return values. */
2508 if (pass == 0)
2509 continue;
2511 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2512 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2514 x = build_receiver_ref (var, false, ctx);
2515 x = build_fold_addr_expr_loc (clause_loc, x);
2517 else if (TREE_CONSTANT (x))
2519 const char *name = NULL;
2520 if (DECL_NAME (var))
2521 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2523 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2524 name);
2525 gimple_add_tmp_var (x);
2526 TREE_ADDRESSABLE (x) = 1;
2527 x = build_fold_addr_expr_loc (clause_loc, x);
2529 else
2531 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2532 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2535 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2536 gimplify_assign (new_var, x, ilist);
2538 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2540 else if (c_kind == OMP_CLAUSE_REDUCTION
2541 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2543 if (pass == 0)
2544 continue;
2546 else if (pass != 0)
2547 continue;
2549 switch (OMP_CLAUSE_CODE (c))
2551 case OMP_CLAUSE_SHARED:
2552 /* Shared global vars are just accessed directly. */
2553 if (is_global_var (new_var))
2554 break;
2555 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2556 needs to be delayed until after fixup_child_record_type so
2557 that we get the correct type during the dereference. */
2558 by_ref = use_pointer_for_field (var, ctx);
2559 x = build_receiver_ref (var, by_ref, ctx);
2560 SET_DECL_VALUE_EXPR (new_var, x);
2561 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2563 /* ??? If VAR is not passed by reference, and the variable
2564 hasn't been initialized yet, then we'll get a warning for
2565 the store into the omp_data_s structure. Ideally, we'd be
2566 able to notice this and not store anything at all, but
2567 we're generating code too early. Suppress the warning. */
2568 if (!by_ref)
2569 TREE_NO_WARNING (var) = 1;
2570 break;
2572 case OMP_CLAUSE_LASTPRIVATE:
2573 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2574 break;
2575 /* FALLTHRU */
2577 case OMP_CLAUSE_PRIVATE:
2578 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2579 x = build_outer_var_ref (var, ctx);
2580 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2582 if (is_task_ctx (ctx))
2583 x = build_receiver_ref (var, false, ctx);
2584 else
2585 x = build_outer_var_ref (var, ctx);
2587 else
2588 x = NULL;
2589 do_private:
2590 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2591 if (is_simd)
2593 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
2594 if ((TREE_ADDRESSABLE (new_var) || x || y
2595 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2596 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2597 idx, lane, ivar, lvar))
2599 if (x)
2600 x = lang_hooks.decls.omp_clause_default_ctor
2601 (c, unshare_expr (ivar), x);
2602 if (x)
2603 gimplify_and_add (x, &llist[0]);
2604 if (y)
2606 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
2607 if (y)
2609 gimple_seq tseq = NULL;
2611 dtor = y;
2612 gimplify_stmt (&dtor, &tseq);
2613 gimple_seq_add_seq (&llist[1], tseq);
2616 break;
2619 if (x)
2620 gimplify_and_add (x, ilist);
2621 /* FALLTHRU */
2623 do_dtor:
2624 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2625 if (x)
2627 gimple_seq tseq = NULL;
2629 dtor = x;
2630 gimplify_stmt (&dtor, &tseq);
2631 gimple_seq_add_seq (dlist, tseq);
2633 break;
2635 case OMP_CLAUSE_LINEAR:
2636 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2637 goto do_firstprivate;
2638 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
2639 x = NULL;
2640 else
2641 x = build_outer_var_ref (var, ctx);
2642 goto do_private;
2644 case OMP_CLAUSE_FIRSTPRIVATE:
2645 if (is_task_ctx (ctx))
2647 if (is_reference (var) || is_variable_sized (var))
2648 goto do_dtor;
2649 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2650 ctx))
2651 || use_pointer_for_field (var, NULL))
2653 x = build_receiver_ref (var, false, ctx);
2654 SET_DECL_VALUE_EXPR (new_var, x);
2655 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2656 goto do_dtor;
2659 do_firstprivate:
2660 x = build_outer_var_ref (var, ctx);
2661 if (is_simd)
2663 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
2664 || TREE_ADDRESSABLE (new_var))
2665 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2666 idx, lane, ivar, lvar))
2668 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
2670 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
2671 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
2672 gimplify_and_add (x, ilist);
2673 gimple_stmt_iterator gsi
2674 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2675 gimple g
2676 = gimple_build_assign (unshare_expr (lvar), iv);
2677 gsi_insert_before_without_update (&gsi, g,
2678 GSI_SAME_STMT);
2679 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
2680 ? sizetype : TREE_TYPE (x);
2681 tree t = fold_convert (stept,
2682 OMP_CLAUSE_LINEAR_STEP (c));
2683 enum tree_code code = PLUS_EXPR;
2684 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
2685 code = POINTER_PLUS_EXPR;
2686 g = gimple_build_assign_with_ops (code, iv, iv, t);
2687 gsi_insert_before_without_update (&gsi, g,
2688 GSI_SAME_STMT);
2689 break;
2691 x = lang_hooks.decls.omp_clause_copy_ctor
2692 (c, unshare_expr (ivar), x);
2693 gimplify_and_add (x, &llist[0]);
2694 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
2695 if (x)
2697 gimple_seq tseq = NULL;
2699 dtor = x;
2700 gimplify_stmt (&dtor, &tseq);
2701 gimple_seq_add_seq (&llist[1], tseq);
2703 break;
2706 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2707 gimplify_and_add (x, ilist);
2708 goto do_dtor;
2710 case OMP_CLAUSE_COPYIN:
2711 by_ref = use_pointer_for_field (var, NULL);
2712 x = build_receiver_ref (var, by_ref, ctx);
2713 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2714 append_to_statement_list (x, &copyin_seq);
2715 copyin_by_ref |= by_ref;
2716 break;
2718 case OMP_CLAUSE_REDUCTION:
2719 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2721 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2722 x = build_outer_var_ref (var, ctx);
2724 /* FIXME: Not handled yet. */
2725 gcc_assert (!is_simd);
2726 if (is_reference (var))
2727 x = build_fold_addr_expr_loc (clause_loc, x);
2728 SET_DECL_VALUE_EXPR (placeholder, x);
2729 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2730 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2731 gimple_seq_add_seq (ilist,
2732 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2733 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2734 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2736 else
2738 x = omp_reduction_init (c, TREE_TYPE (new_var));
2739 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2740 if (is_simd
2741 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2742 idx, lane, ivar, lvar))
2744 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
2745 tree ref = build_outer_var_ref (var, ctx);
2747 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
2749 /* reduction(-:var) sums up the partial results, so it
2750 acts identically to reduction(+:var). */
2751 if (code == MINUS_EXPR)
2752 code = PLUS_EXPR;
2754 x = build2 (code, TREE_TYPE (ref), ref, ivar);
2755 ref = build_outer_var_ref (var, ctx);
2756 gimplify_assign (ref, x, &llist[1]);
2758 else
2760 gimplify_assign (new_var, x, ilist);
2761 if (is_simd)
2762 gimplify_assign (build_outer_var_ref (var, ctx),
2763 new_var, dlist);
2766 break;
2768 default:
2769 gcc_unreachable ();
2774 if (lane)
2776 tree uid = create_tmp_var (ptr_type_node, "simduid");
2777 gimple g
2778 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
2779 gimple_call_set_lhs (g, lane);
2780 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2781 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
2782 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
2783 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
2784 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2785 gimple_omp_for_set_clauses (ctx->stmt, c);
2786 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
2787 build_int_cst (unsigned_type_node, 0),
2788 NULL_TREE);
2789 gimple_seq_add_stmt (ilist, g);
2790 for (int i = 0; i < 2; i++)
2791 if (llist[i])
2793 tree vf = create_tmp_var (unsigned_type_node, NULL);
2794 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
2795 gimple_call_set_lhs (g, vf);
2796 gimple_seq *seq = i == 0 ? ilist : dlist;
2797 gimple_seq_add_stmt (seq, g);
2798 tree t = build_int_cst (unsigned_type_node, 0);
2799 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
2800 gimple_seq_add_stmt (seq, g);
2801 tree body = create_artificial_label (UNKNOWN_LOCATION);
2802 tree header = create_artificial_label (UNKNOWN_LOCATION);
2803 tree end = create_artificial_label (UNKNOWN_LOCATION);
2804 gimple_seq_add_stmt (seq, gimple_build_goto (header));
2805 gimple_seq_add_stmt (seq, gimple_build_label (body));
2806 gimple_seq_add_seq (seq, llist[i]);
2807 t = build_int_cst (unsigned_type_node, 1);
2808 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
2809 gimple_seq_add_stmt (seq, g);
2810 gimple_seq_add_stmt (seq, gimple_build_label (header));
2811 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
2812 gimple_seq_add_stmt (seq, g);
2813 gimple_seq_add_stmt (seq, gimple_build_label (end));
2817 /* The copyin sequence is not to be executed by the main thread, since
2818 that would result in self-copies. Perhaps not visible to scalars,
2819 but it certainly is to C++ operator=. */
2820 if (copyin_seq)
2822 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2824 x = build2 (NE_EXPR, boolean_type_node, x,
2825 build_int_cst (TREE_TYPE (x), 0));
2826 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2827 gimplify_and_add (x, ilist);
2830 /* If any copyin variable is passed by reference, we must ensure the
2831 master thread doesn't modify it before it is copied over in all
2832 threads. Similarly for variables in both firstprivate and
2833 lastprivate clauses we need to ensure the lastprivate copying
2834 happens after firstprivate copying in all threads. */
2835 if (copyin_by_ref || lastprivate_firstprivate)
2837 /* Don't add any barrier for #pragma omp simd or
2838 #pragma omp distribute. */
2839 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2840 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
2841 gimplify_and_add (build_omp_barrier (), ilist);
2844 /* If max_vf is non-zero, then we can use only a vectorization factor
2845 up to the max_vf we chose. So stick it into the safelen clause. */
2846 if (max_vf)
2848 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2849 OMP_CLAUSE_SAFELEN);
2850 if (c == NULL_TREE
2851 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
2852 max_vf) == 1)
2854 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
2855 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
2856 max_vf);
2857 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2858 gimple_omp_for_set_clauses (ctx->stmt, c);
2864 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2865 both parallel and workshare constructs. PREDICATE may be NULL if it's
2866 always true. */
2868 static void
2869 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2870 omp_context *ctx)
2872 tree x, c, label = NULL, orig_clauses = clauses;
2873 bool par_clauses = false;
2874 tree simduid = NULL, lastlane = NULL;
2876 /* Early exit if there are no lastprivate or linear clauses. */
2877 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
2878 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
2879 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
2880 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
2881 break;
2882 if (clauses == NULL)
2884 /* If this was a workshare clause, see if it had been combined
2885 with its parallel. In that case, look for the clauses on the
2886 parallel statement itself. */
2887 if (is_parallel_ctx (ctx))
2888 return;
2890 ctx = ctx->outer;
2891 if (ctx == NULL || !is_parallel_ctx (ctx))
2892 return;
2894 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2895 OMP_CLAUSE_LASTPRIVATE);
2896 if (clauses == NULL)
2897 return;
2898 par_clauses = true;
2901 if (predicate)
2903 gimple stmt;
2904 tree label_true, arm1, arm2;
2906 label = create_artificial_label (UNKNOWN_LOCATION);
2907 label_true = create_artificial_label (UNKNOWN_LOCATION);
2908 arm1 = TREE_OPERAND (predicate, 0);
2909 arm2 = TREE_OPERAND (predicate, 1);
2910 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2911 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2912 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2913 label_true, label);
2914 gimple_seq_add_stmt (stmt_list, stmt);
2915 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2918 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2919 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
2921 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
2922 if (simduid)
2923 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
2926 for (c = clauses; c ;)
2928 tree var, new_var;
2929 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2931 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2932 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2933 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
2935 var = OMP_CLAUSE_DECL (c);
2936 new_var = lookup_decl (var, ctx);
2938 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
2940 tree val = DECL_VALUE_EXPR (new_var);
2941 if (TREE_CODE (val) == ARRAY_REF
2942 && VAR_P (TREE_OPERAND (val, 0))
2943 && lookup_attribute ("omp simd array",
2944 DECL_ATTRIBUTES (TREE_OPERAND (val,
2945 0))))
2947 if (lastlane == NULL)
2949 lastlane = create_tmp_var (unsigned_type_node, NULL);
2950 gimple g
2951 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2952 2, simduid,
2953 TREE_OPERAND (val, 1));
2954 gimple_call_set_lhs (g, lastlane);
2955 gimple_seq_add_stmt (stmt_list, g);
2957 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
2958 TREE_OPERAND (val, 0), lastlane,
2959 NULL_TREE, NULL_TREE);
2963 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2964 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2966 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2967 gimple_seq_add_seq (stmt_list,
2968 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2969 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2972 x = build_outer_var_ref (var, ctx);
2973 if (is_reference (var))
2974 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2975 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2976 gimplify_and_add (x, stmt_list);
2978 c = OMP_CLAUSE_CHAIN (c);
2979 if (c == NULL && !par_clauses)
2981 /* If this was a workshare clause, see if it had been combined
2982 with its parallel. In that case, continue looking for the
2983 clauses also on the parallel statement itself. */
2984 if (is_parallel_ctx (ctx))
2985 break;
2987 ctx = ctx->outer;
2988 if (ctx == NULL || !is_parallel_ctx (ctx))
2989 break;
2991 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2992 OMP_CLAUSE_LASTPRIVATE);
2993 par_clauses = true;
2997 if (label)
2998 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3002 /* Generate code to implement the REDUCTION clauses. */
3004 static void
3005 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3007 gimple_seq sub_seq = NULL;
3008 gimple stmt;
3009 tree x, c;
3010 int count = 0;
3012 /* SIMD reductions are handled in lower_rec_input_clauses. */
3013 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3014 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3015 return;
3017 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3018 update in that case, otherwise use a lock. */
3019 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3020 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3022 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3024 /* Never use OMP_ATOMIC for array reductions. */
3025 count = -1;
3026 break;
3028 count++;
3031 if (count == 0)
3032 return;
3034 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3036 tree var, ref, new_var;
3037 enum tree_code code;
3038 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3040 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3041 continue;
3043 var = OMP_CLAUSE_DECL (c);
3044 new_var = lookup_decl (var, ctx);
3045 if (is_reference (var))
3046 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3047 ref = build_outer_var_ref (var, ctx);
3048 code = OMP_CLAUSE_REDUCTION_CODE (c);
3050 /* reduction(-:var) sums up the partial results, so it acts
3051 identically to reduction(+:var). */
3052 if (code == MINUS_EXPR)
3053 code = PLUS_EXPR;
3055 if (count == 1)
3057 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3059 addr = save_expr (addr);
3060 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3061 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3062 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3063 gimplify_and_add (x, stmt_seqp);
3064 return;
3067 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3069 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3071 if (is_reference (var))
3072 ref = build_fold_addr_expr_loc (clause_loc, ref);
3073 SET_DECL_VALUE_EXPR (placeholder, ref);
3074 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3075 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3076 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3077 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3078 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3080 else
3082 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3083 ref = build_outer_var_ref (var, ctx);
3084 gimplify_assign (ref, x, &sub_seq);
3088 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3090 gimple_seq_add_stmt (stmt_seqp, stmt);
3092 gimple_seq_add_seq (stmt_seqp, sub_seq);
3094 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3096 gimple_seq_add_stmt (stmt_seqp, stmt);
3100 /* Generate code to implement the COPYPRIVATE clauses. */
3102 static void
3103 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
3104 omp_context *ctx)
3106 tree c;
3108 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3110 tree var, new_var, ref, x;
3111 bool by_ref;
3112 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3114 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
3115 continue;
3117 var = OMP_CLAUSE_DECL (c);
3118 by_ref = use_pointer_for_field (var, NULL);
3120 ref = build_sender_ref (var, ctx);
3121 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3122 if (by_ref)
3124 x = build_fold_addr_expr_loc (clause_loc, new_var);
3125 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3127 gimplify_assign (ref, x, slist);
3129 ref = build_receiver_ref (var, false, ctx);
3130 if (by_ref)
3132 ref = fold_convert_loc (clause_loc,
3133 build_pointer_type (TREE_TYPE (new_var)),
3134 ref);
3135 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3137 if (is_reference (var))
3139 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
3140 ref = build_simple_mem_ref_loc (clause_loc, ref);
3141 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3143 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
3144 gimplify_and_add (x, rlist);
3149 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3150 and REDUCTION from the sender (aka parent) side. */
3152 static void
3153 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3154 omp_context *ctx)
3156 tree c;
3158 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3160 tree val, ref, x, var;
3161 bool by_ref, do_in = false, do_out = false;
3162 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3164 switch (OMP_CLAUSE_CODE (c))
3166 case OMP_CLAUSE_PRIVATE:
3167 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3168 break;
3169 continue;
3170 case OMP_CLAUSE_FIRSTPRIVATE:
3171 case OMP_CLAUSE_COPYIN:
3172 case OMP_CLAUSE_LASTPRIVATE:
3173 case OMP_CLAUSE_REDUCTION:
3174 break;
3175 default:
3176 continue;
3179 val = OMP_CLAUSE_DECL (c);
3180 var = lookup_decl_in_outer_ctx (val, ctx);
3182 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3183 && is_global_var (var))
3184 continue;
3185 if (is_variable_sized (val))
3186 continue;
3187 by_ref = use_pointer_for_field (val, NULL);
3189 switch (OMP_CLAUSE_CODE (c))
3191 case OMP_CLAUSE_PRIVATE:
3192 case OMP_CLAUSE_FIRSTPRIVATE:
3193 case OMP_CLAUSE_COPYIN:
3194 do_in = true;
3195 break;
3197 case OMP_CLAUSE_LASTPRIVATE:
3198 if (by_ref || is_reference (val))
3200 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3201 continue;
3202 do_in = true;
3204 else
3206 do_out = true;
3207 if (lang_hooks.decls.omp_private_outer_ref (val))
3208 do_in = true;
3210 break;
3212 case OMP_CLAUSE_REDUCTION:
3213 do_in = true;
3214 do_out = !(by_ref || is_reference (val));
3215 break;
3217 default:
3218 gcc_unreachable ();
3221 if (do_in)
3223 ref = build_sender_ref (val, ctx);
3224 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
3225 gimplify_assign (ref, x, ilist);
3226 if (is_task_ctx (ctx))
3227 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
3230 if (do_out)
3232 ref = build_sender_ref (val, ctx);
3233 gimplify_assign (var, ref, olist);
3238 /* Generate code to implement SHARED from the sender (aka parent)
3239 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3240 list things that got automatically shared. */
3242 static void
3243 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
3245 tree var, ovar, nvar, f, x, record_type;
3247 if (ctx->record_type == NULL)
3248 return;
3250 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
3251 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
3253 ovar = DECL_ABSTRACT_ORIGIN (f);
3254 nvar = maybe_lookup_decl (ovar, ctx);
3255 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
3256 continue;
3258 /* If CTX is a nested parallel directive. Find the immediately
3259 enclosing parallel or workshare construct that contains a
3260 mapping for OVAR. */
3261 var = lookup_decl_in_outer_ctx (ovar, ctx);
3263 if (use_pointer_for_field (ovar, ctx))
3265 x = build_sender_ref (ovar, ctx);
3266 var = build_fold_addr_expr (var);
3267 gimplify_assign (x, var, ilist);
3269 else
3271 x = build_sender_ref (ovar, ctx);
3272 gimplify_assign (x, var, ilist);
3274 if (!TREE_READONLY (var)
3275 /* We don't need to receive a new reference to a result
3276 or parm decl. In fact we may not store to it as we will
3277 invalidate any pending RSO and generate wrong gimple
3278 during inlining. */
3279 && !((TREE_CODE (var) == RESULT_DECL
3280 || TREE_CODE (var) == PARM_DECL)
3281 && DECL_BY_REFERENCE (var)))
3283 x = build_sender_ref (ovar, ctx);
3284 gimplify_assign (var, x, olist);
3291 /* A convenience function to build an empty GIMPLE_COND with just the
3292 condition. */
3294 static gimple
3295 gimple_build_cond_empty (tree cond)
3297 enum tree_code pred_code;
3298 tree lhs, rhs;
3300 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
3301 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
3305 /* Build the function calls to GOMP_parallel_start etc to actually
3306 generate the parallel operation. REGION is the parallel region
3307 being expanded. BB is the block where to insert the code. WS_ARGS
3308 will be set if this is a call to a combined parallel+workshare
3309 construct, it contains the list of additional arguments needed by
3310 the workshare construct. */
3312 static void
3313 expand_parallel_call (struct omp_region *region, basic_block bb,
3314 gimple entry_stmt, vec<tree, va_gc> *ws_args)
3316 tree t, t1, t2, val, cond, c, clauses;
3317 gimple_stmt_iterator gsi;
3318 gimple stmt;
3319 enum built_in_function start_ix;
3320 int start_ix2;
3321 location_t clause_loc;
3322 vec<tree, va_gc> *args;
3324 clauses = gimple_omp_parallel_clauses (entry_stmt);
3326 /* Determine what flavor of GOMP_parallel_start we will be
3327 emitting. */
3328 start_ix = BUILT_IN_GOMP_PARALLEL_START;
3329 if (is_combined_parallel (region))
3331 switch (region->inner->type)
3333 case GIMPLE_OMP_FOR:
3334 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
3335 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3336 + (region->inner->sched_kind
3337 == OMP_CLAUSE_SCHEDULE_RUNTIME
3338 ? 3 : region->inner->sched_kind));
3339 start_ix = (enum built_in_function)start_ix2;
3340 break;
3341 case GIMPLE_OMP_SECTIONS:
3342 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
3343 break;
3344 default:
3345 gcc_unreachable ();
3349 /* By default, the value of NUM_THREADS is zero (selected at run time)
3350 and there is no conditional. */
3351 cond = NULL_TREE;
3352 val = build_int_cst (unsigned_type_node, 0);
3354 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3355 if (c)
3356 cond = OMP_CLAUSE_IF_EXPR (c);
3358 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
3359 if (c)
3361 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3362 clause_loc = OMP_CLAUSE_LOCATION (c);
3364 else
3365 clause_loc = gimple_location (entry_stmt);
3367 /* Ensure 'val' is of the correct type. */
3368 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3370 /* If we found the clause 'if (cond)', build either
3371 (cond != 0) or (cond ? val : 1u). */
3372 if (cond)
3374 gimple_stmt_iterator gsi;
3376 cond = gimple_boolify (cond);
3378 if (integer_zerop (val))
3379 val = fold_build2_loc (clause_loc,
3380 EQ_EXPR, unsigned_type_node, cond,
3381 build_int_cst (TREE_TYPE (cond), 0));
3382 else
3384 basic_block cond_bb, then_bb, else_bb;
3385 edge e, e_then, e_else;
3386 tree tmp_then, tmp_else, tmp_join, tmp_var;
3388 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3389 if (gimple_in_ssa_p (cfun))
3391 tmp_then = make_ssa_name (tmp_var, NULL);
3392 tmp_else = make_ssa_name (tmp_var, NULL);
3393 tmp_join = make_ssa_name (tmp_var, NULL);
3395 else
3397 tmp_then = tmp_var;
3398 tmp_else = tmp_var;
3399 tmp_join = tmp_var;
3402 e = split_block (bb, NULL);
3403 cond_bb = e->src;
3404 bb = e->dest;
3405 remove_edge (e);
3407 then_bb = create_empty_bb (cond_bb);
3408 else_bb = create_empty_bb (then_bb);
3409 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3410 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3412 stmt = gimple_build_cond_empty (cond);
3413 gsi = gsi_start_bb (cond_bb);
3414 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3416 gsi = gsi_start_bb (then_bb);
3417 stmt = gimple_build_assign (tmp_then, val);
3418 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3420 gsi = gsi_start_bb (else_bb);
3421 stmt = gimple_build_assign
3422 (tmp_else, build_int_cst (unsigned_type_node, 1));
3423 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3425 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3426 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3427 if (current_loops)
3429 add_bb_to_loop (then_bb, cond_bb->loop_father);
3430 add_bb_to_loop (else_bb, cond_bb->loop_father);
3432 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3433 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3435 if (gimple_in_ssa_p (cfun))
3437 gimple phi = create_phi_node (tmp_join, bb);
3438 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3439 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3442 val = tmp_join;
3445 gsi = gsi_start_bb (bb);
3446 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3447 false, GSI_CONTINUE_LINKING);
3450 gsi = gsi_last_bb (bb);
3451 t = gimple_omp_parallel_data_arg (entry_stmt);
3452 if (t == NULL)
3453 t1 = null_pointer_node;
3454 else
3455 t1 = build_fold_addr_expr (t);
3456 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3458 vec_alloc (args, 3 + vec_safe_length (ws_args));
3459 args->quick_push (t2);
3460 args->quick_push (t1);
3461 args->quick_push (val);
3462 if (ws_args)
3463 args->splice (*ws_args);
3465 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3466 builtin_decl_explicit (start_ix), args);
3468 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3469 false, GSI_CONTINUE_LINKING);
3471 t = gimple_omp_parallel_data_arg (entry_stmt);
3472 if (t == NULL)
3473 t = null_pointer_node;
3474 else
3475 t = build_fold_addr_expr (t);
3476 t = build_call_expr_loc (gimple_location (entry_stmt),
3477 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3478 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3479 false, GSI_CONTINUE_LINKING);
3481 t = build_call_expr_loc (gimple_location (entry_stmt),
3482 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3484 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3485 false, GSI_CONTINUE_LINKING);
3489 /* Build the function call to GOMP_task to actually
3490 generate the task operation. BB is the block where to insert the code. */
3492 static void
3493 expand_task_call (basic_block bb, gimple entry_stmt)
3495 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3496 gimple_stmt_iterator gsi;
3497 location_t loc = gimple_location (entry_stmt);
3499 clauses = gimple_omp_task_clauses (entry_stmt);
3501 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3502 if (c)
3503 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3504 else
3505 cond = boolean_true_node;
3507 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3508 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3509 flags = build_int_cst (unsigned_type_node,
3510 (c ? 1 : 0) + (c2 ? 4 : 0));
3512 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3513 if (c)
3515 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3516 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3517 build_int_cst (unsigned_type_node, 2),
3518 build_int_cst (unsigned_type_node, 0));
3519 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3522 gsi = gsi_last_bb (bb);
3523 t = gimple_omp_task_data_arg (entry_stmt);
3524 if (t == NULL)
3525 t2 = null_pointer_node;
3526 else
3527 t2 = build_fold_addr_expr_loc (loc, t);
3528 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3529 t = gimple_omp_task_copy_fn (entry_stmt);
3530 if (t == NULL)
3531 t3 = null_pointer_node;
3532 else
3533 t3 = build_fold_addr_expr_loc (loc, t);
3535 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3536 7, t1, t2, t3,
3537 gimple_omp_task_arg_size (entry_stmt),
3538 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3540 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3541 false, GSI_CONTINUE_LINKING);
3545 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3546 catch handler and return it. This prevents programs from violating the
3547 structured block semantics with throws. */
3549 static gimple_seq
3550 maybe_catch_exception (gimple_seq body)
3552 gimple g;
3553 tree decl;
3555 if (!flag_exceptions)
3556 return body;
3558 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3559 decl = lang_hooks.eh_protect_cleanup_actions ();
3560 else
3561 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3563 g = gimple_build_eh_must_not_throw (decl);
3564 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3565 GIMPLE_TRY_CATCH);
3567 return gimple_seq_alloc_with_stmt (g);
3570 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3572 static tree
3573 vec2chain (vec<tree, va_gc> *v)
3575 tree chain = NULL_TREE, t;
3576 unsigned ix;
3578 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3580 DECL_CHAIN (t) = chain;
3581 chain = t;
3584 return chain;
3588 /* Remove barriers in REGION->EXIT's block. Note that this is only
3589 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3590 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3591 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3592 removed. */
3594 static void
3595 remove_exit_barrier (struct omp_region *region)
3597 gimple_stmt_iterator gsi;
3598 basic_block exit_bb;
3599 edge_iterator ei;
3600 edge e;
3601 gimple stmt;
3602 int any_addressable_vars = -1;
3604 exit_bb = region->exit;
3606 /* If the parallel region doesn't return, we don't have REGION->EXIT
3607 block at all. */
3608 if (! exit_bb)
3609 return;
3611 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3612 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3613 statements that can appear in between are extremely limited -- no
3614 memory operations at all. Here, we allow nothing at all, so the
3615 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3616 gsi = gsi_last_bb (exit_bb);
3617 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3618 gsi_prev (&gsi);
3619 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3620 return;
3622 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3624 gsi = gsi_last_bb (e->src);
3625 if (gsi_end_p (gsi))
3626 continue;
3627 stmt = gsi_stmt (gsi);
3628 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3629 && !gimple_omp_return_nowait_p (stmt))
3631 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3632 in many cases. If there could be tasks queued, the barrier
3633 might be needed to let the tasks run before some local
3634 variable of the parallel that the task uses as shared
3635 runs out of scope. The task can be spawned either
3636 from within current function (this would be easy to check)
3637 or from some function it calls and gets passed an address
3638 of such a variable. */
3639 if (any_addressable_vars < 0)
3641 gimple parallel_stmt = last_stmt (region->entry);
3642 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3643 tree local_decls, block, decl;
3644 unsigned ix;
3646 any_addressable_vars = 0;
3647 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3648 if (TREE_ADDRESSABLE (decl))
3650 any_addressable_vars = 1;
3651 break;
3653 for (block = gimple_block (stmt);
3654 !any_addressable_vars
3655 && block
3656 && TREE_CODE (block) == BLOCK;
3657 block = BLOCK_SUPERCONTEXT (block))
3659 for (local_decls = BLOCK_VARS (block);
3660 local_decls;
3661 local_decls = DECL_CHAIN (local_decls))
3662 if (TREE_ADDRESSABLE (local_decls))
3664 any_addressable_vars = 1;
3665 break;
3667 if (block == gimple_block (parallel_stmt))
3668 break;
3671 if (!any_addressable_vars)
3672 gimple_omp_return_set_nowait (stmt);
3677 static void
3678 remove_exit_barriers (struct omp_region *region)
3680 if (region->type == GIMPLE_OMP_PARALLEL)
3681 remove_exit_barrier (region);
3683 if (region->inner)
3685 region = region->inner;
3686 remove_exit_barriers (region);
3687 while (region->next)
3689 region = region->next;
3690 remove_exit_barriers (region);
3695 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3696 calls. These can't be declared as const functions, but
3697 within one parallel body they are constant, so they can be
3698 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3699 which are declared const. Similarly for task body, except
3700 that in untied task omp_get_thread_num () can change at any task
3701 scheduling point. */
3703 static void
3704 optimize_omp_library_calls (gimple entry_stmt)
3706 basic_block bb;
3707 gimple_stmt_iterator gsi;
3708 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3709 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3710 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3711 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3712 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3713 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3714 OMP_CLAUSE_UNTIED) != NULL);
3716 FOR_EACH_BB (bb)
3717 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3719 gimple call = gsi_stmt (gsi);
3720 tree decl;
3722 if (is_gimple_call (call)
3723 && (decl = gimple_call_fndecl (call))
3724 && DECL_EXTERNAL (decl)
3725 && TREE_PUBLIC (decl)
3726 && DECL_INITIAL (decl) == NULL)
3728 tree built_in;
3730 if (DECL_NAME (decl) == thr_num_id)
3732 /* In #pragma omp task untied omp_get_thread_num () can change
3733 during the execution of the task region. */
3734 if (untied_task)
3735 continue;
3736 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3738 else if (DECL_NAME (decl) == num_thr_id)
3739 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3740 else
3741 continue;
3743 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3744 || gimple_call_num_args (call) != 0)
3745 continue;
3747 if (flag_exceptions && !TREE_NOTHROW (decl))
3748 continue;
3750 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3751 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3752 TREE_TYPE (TREE_TYPE (built_in))))
3753 continue;
3755 gimple_call_set_fndecl (call, built_in);
3760 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3761 regimplified. */
3763 static tree
3764 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3766 tree t = *tp;
3768 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3769 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3770 return t;
3772 if (TREE_CODE (t) == ADDR_EXPR)
3773 recompute_tree_invariant_for_addr_expr (t);
3775 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3776 return NULL_TREE;
3779 /* Prepend TO = FROM assignment before *GSI_P. */
3781 static void
3782 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
3784 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
3785 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
3786 true, GSI_SAME_STMT);
3787 gimple stmt = gimple_build_assign (to, from);
3788 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
3789 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
3790 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
3792 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3793 gimple_regimplify_operands (stmt, &gsi);
3797 /* Expand the OpenMP parallel or task directive starting at REGION. */
3799 static void
3800 expand_omp_taskreg (struct omp_region *region)
3802 basic_block entry_bb, exit_bb, new_bb;
3803 struct function *child_cfun;
3804 tree child_fn, block, t;
3805 gimple_stmt_iterator gsi;
3806 gimple entry_stmt, stmt;
3807 edge e;
3808 vec<tree, va_gc> *ws_args;
3810 entry_stmt = last_stmt (region->entry);
3811 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3812 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3814 entry_bb = region->entry;
3815 exit_bb = region->exit;
3817 if (is_combined_parallel (region))
3818 ws_args = region->ws_args;
3819 else
3820 ws_args = NULL;
3822 if (child_cfun->cfg)
3824 /* Due to inlining, it may happen that we have already outlined
3825 the region, in which case all we need to do is make the
3826 sub-graph unreachable and emit the parallel call. */
3827 edge entry_succ_e, exit_succ_e;
3828 gimple_stmt_iterator gsi;
3830 entry_succ_e = single_succ_edge (entry_bb);
3832 gsi = gsi_last_bb (entry_bb);
3833 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3834 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3835 gsi_remove (&gsi, true);
3837 new_bb = entry_bb;
3838 if (exit_bb)
3840 exit_succ_e = single_succ_edge (exit_bb);
3841 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3843 remove_edge_and_dominated_blocks (entry_succ_e);
3845 else
3847 unsigned srcidx, dstidx, num;
3849 /* If the parallel region needs data sent from the parent
3850 function, then the very first statement (except possible
3851 tree profile counter updates) of the parallel body
3852 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3853 &.OMP_DATA_O is passed as an argument to the child function,
3854 we need to replace it with the argument as seen by the child
3855 function.
3857 In most cases, this will end up being the identity assignment
3858 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3859 a function call that has been inlined, the original PARM_DECL
3860 .OMP_DATA_I may have been converted into a different local
3861 variable. In which case, we need to keep the assignment. */
3862 if (gimple_omp_taskreg_data_arg (entry_stmt))
3864 basic_block entry_succ_bb = single_succ (entry_bb);
3865 gimple_stmt_iterator gsi;
3866 tree arg, narg;
3867 gimple parcopy_stmt = NULL;
3869 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3871 gimple stmt;
3873 gcc_assert (!gsi_end_p (gsi));
3874 stmt = gsi_stmt (gsi);
3875 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3876 continue;
3878 if (gimple_num_ops (stmt) == 2)
3880 tree arg = gimple_assign_rhs1 (stmt);
3882 /* We're ignore the subcode because we're
3883 effectively doing a STRIP_NOPS. */
3885 if (TREE_CODE (arg) == ADDR_EXPR
3886 && TREE_OPERAND (arg, 0)
3887 == gimple_omp_taskreg_data_arg (entry_stmt))
3889 parcopy_stmt = stmt;
3890 break;
3895 gcc_assert (parcopy_stmt != NULL);
3896 arg = DECL_ARGUMENTS (child_fn);
3898 if (!gimple_in_ssa_p (cfun))
3900 if (gimple_assign_lhs (parcopy_stmt) == arg)
3901 gsi_remove (&gsi, true);
3902 else
3904 /* ?? Is setting the subcode really necessary ?? */
3905 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3906 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3909 else
3911 /* If we are in ssa form, we must load the value from the default
3912 definition of the argument. That should not be defined now,
3913 since the argument is not used uninitialized. */
3914 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3915 narg = make_ssa_name (arg, gimple_build_nop ());
3916 set_ssa_default_def (cfun, arg, narg);
3917 /* ?? Is setting the subcode really necessary ?? */
3918 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3919 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3920 update_stmt (parcopy_stmt);
3924 /* Declare local variables needed in CHILD_CFUN. */
3925 block = DECL_INITIAL (child_fn);
3926 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3927 /* The gimplifier could record temporaries in parallel/task block
3928 rather than in containing function's local_decls chain,
3929 which would mean cgraph missed finalizing them. Do it now. */
3930 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3931 if (TREE_CODE (t) == VAR_DECL
3932 && TREE_STATIC (t)
3933 && !DECL_EXTERNAL (t))
3934 varpool_finalize_decl (t);
3935 DECL_SAVED_TREE (child_fn) = NULL;
3936 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3937 gimple_set_body (child_fn, NULL);
3938 TREE_USED (block) = 1;
3940 /* Reset DECL_CONTEXT on function arguments. */
3941 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3942 DECL_CONTEXT (t) = child_fn;
3944 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3945 so that it can be moved to the child function. */
3946 gsi = gsi_last_bb (entry_bb);
3947 stmt = gsi_stmt (gsi);
3948 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3949 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3950 gsi_remove (&gsi, true);
3951 e = split_block (entry_bb, stmt);
3952 entry_bb = e->dest;
3953 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3955 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3956 if (exit_bb)
3958 gsi = gsi_last_bb (exit_bb);
3959 gcc_assert (!gsi_end_p (gsi)
3960 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3961 stmt = gimple_build_return (NULL);
3962 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3963 gsi_remove (&gsi, true);
3966 /* Move the parallel region into CHILD_CFUN. */
3968 if (gimple_in_ssa_p (cfun))
3970 init_tree_ssa (child_cfun);
3971 init_ssa_operands (child_cfun);
3972 child_cfun->gimple_df->in_ssa_p = true;
3973 block = NULL_TREE;
3975 else
3976 block = gimple_block (entry_stmt);
3978 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3979 if (exit_bb)
3980 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3981 /* When the OMP expansion process cannot guarantee an up-to-date
3982 loop tree arrange for the child function to fixup loops. */
3983 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
3984 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
3986 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3987 num = vec_safe_length (child_cfun->local_decls);
3988 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3990 t = (*child_cfun->local_decls)[srcidx];
3991 if (DECL_CONTEXT (t) == cfun->decl)
3992 continue;
3993 if (srcidx != dstidx)
3994 (*child_cfun->local_decls)[dstidx] = t;
3995 dstidx++;
3997 if (dstidx != num)
3998 vec_safe_truncate (child_cfun->local_decls, dstidx);
4000 /* Inform the callgraph about the new function. */
4001 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4002 cgraph_add_new_function (child_fn, true);
4004 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4005 fixed in a following pass. */
4006 push_cfun (child_cfun);
4007 if (optimize)
4008 optimize_omp_library_calls (entry_stmt);
4009 rebuild_cgraph_edges ();
4011 /* Some EH regions might become dead, see PR34608. If
4012 pass_cleanup_cfg isn't the first pass to happen with the
4013 new child, these dead EH edges might cause problems.
4014 Clean them up now. */
4015 if (flag_exceptions)
4017 basic_block bb;
4018 bool changed = false;
4020 FOR_EACH_BB (bb)
4021 changed |= gimple_purge_dead_eh_edges (bb);
4022 if (changed)
4023 cleanup_tree_cfg ();
4025 if (gimple_in_ssa_p (cfun))
4026 update_ssa (TODO_update_ssa);
4027 pop_cfun ();
4030 /* Emit a library call to launch the children threads. */
4031 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4032 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4033 else
4034 expand_task_call (new_bb, entry_stmt);
4035 if (gimple_in_ssa_p (cfun))
4036 update_ssa (TODO_update_ssa_only_virtuals);
4040 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4041 of the combined collapse > 1 loop constructs, generate code like:
4042 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4043 if (cond3 is <)
4044 adj = STEP3 - 1;
4045 else
4046 adj = STEP3 + 1;
4047 count3 = (adj + N32 - N31) / STEP3;
4048 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4049 if (cond2 is <)
4050 adj = STEP2 - 1;
4051 else
4052 adj = STEP2 + 1;
4053 count2 = (adj + N22 - N21) / STEP2;
4054 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4055 if (cond1 is <)
4056 adj = STEP1 - 1;
4057 else
4058 adj = STEP1 + 1;
4059 count1 = (adj + N12 - N11) / STEP1;
4060 count = count1 * count2 * count3;
4061 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4062 count = 0;
4063 and set ZERO_ITER_BB to that bb. */
4065 /* NOTE: It *could* be better to moosh all of the BBs together,
4066 creating one larger BB with all the computation and the unexpected
4067 jump at the end. I.e.
4069 bool zero3, zero2, zero1, zero;
4071 zero3 = N32 c3 N31;
4072 count3 = (N32 - N31) /[cl] STEP3;
4073 zero2 = N22 c2 N21;
4074 count2 = (N22 - N21) /[cl] STEP2;
4075 zero1 = N12 c1 N11;
4076 count1 = (N12 - N11) /[cl] STEP1;
4077 zero = zero3 || zero2 || zero1;
4078 count = count1 * count2 * count3;
4079 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4081 After all, we expect the zero=false, and thus we expect to have to
4082 evaluate all of the comparison expressions, so short-circuiting
4083 oughtn't be a win. Since the condition isn't protecting a
4084 denominator, we're not concerned about divide-by-zero, so we can
4085 fully evaluate count even if a numerator turned out to be wrong.
4087 It seems like putting this all together would create much better
4088 scheduling opportunities, and less pressure on the chip's branch
4089 predictor. */
4091 static void
4092 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4093 basic_block &entry_bb, tree *counts,
4094 basic_block &zero_iter_bb, int &first_zero_iter,
4095 basic_block &l2_dom_bb)
4097 tree t, type = TREE_TYPE (fd->loop.v);
4098 gimple stmt;
4099 edge e, ne;
4100 int i;
4102 /* Collapsed loops need work for expansion into SSA form. */
4103 gcc_assert (!gimple_in_ssa_p (cfun));
4105 for (i = 0; i < fd->collapse; i++)
4107 tree itype = TREE_TYPE (fd->loops[i].v);
4109 if (SSA_VAR_P (fd->loop.n2)
4110 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4111 fold_convert (itype, fd->loops[i].n1),
4112 fold_convert (itype, fd->loops[i].n2)))
4113 == NULL_TREE || !integer_onep (t)))
4115 tree n1, n2;
4116 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4117 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4118 true, GSI_SAME_STMT);
4119 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4120 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4121 true, GSI_SAME_STMT);
4122 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4123 NULL_TREE, NULL_TREE);
4124 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4125 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4126 expand_omp_regimplify_p, NULL, NULL)
4127 || walk_tree (gimple_cond_rhs_ptr (stmt),
4128 expand_omp_regimplify_p, NULL, NULL))
4130 *gsi = gsi_for_stmt (stmt);
4131 gimple_regimplify_operands (stmt, gsi);
4133 e = split_block (entry_bb, stmt);
4134 if (zero_iter_bb == NULL)
4136 first_zero_iter = i;
4137 zero_iter_bb = create_empty_bb (entry_bb);
4138 if (current_loops)
4139 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4140 *gsi = gsi_after_labels (zero_iter_bb);
4141 stmt = gimple_build_assign (fd->loop.n2,
4142 build_zero_cst (type));
4143 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4144 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4145 entry_bb);
4147 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4148 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4149 e->flags = EDGE_TRUE_VALUE;
4150 e->probability = REG_BR_PROB_BASE - ne->probability;
4151 if (l2_dom_bb == NULL)
4152 l2_dom_bb = entry_bb;
4153 entry_bb = e->dest;
4154 *gsi = gsi_last_bb (entry_bb);
4157 if (POINTER_TYPE_P (itype))
4158 itype = signed_type_for (itype);
4159 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4160 ? -1 : 1));
4161 t = fold_build2 (PLUS_EXPR, itype,
4162 fold_convert (itype, fd->loops[i].step), t);
4163 t = fold_build2 (PLUS_EXPR, itype, t,
4164 fold_convert (itype, fd->loops[i].n2));
4165 t = fold_build2 (MINUS_EXPR, itype, t,
4166 fold_convert (itype, fd->loops[i].n1));
4167 /* ?? We could probably use CEIL_DIV_EXPR instead of
4168 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4169 generate the same code in the end because generically we
4170 don't know that the values involved must be negative for
4171 GT?? */
4172 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4173 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4174 fold_build1 (NEGATE_EXPR, itype, t),
4175 fold_build1 (NEGATE_EXPR, itype,
4176 fold_convert (itype,
4177 fd->loops[i].step)));
4178 else
4179 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4180 fold_convert (itype, fd->loops[i].step));
4181 t = fold_convert (type, t);
4182 if (TREE_CODE (t) == INTEGER_CST)
4183 counts[i] = t;
4184 else
4186 counts[i] = create_tmp_reg (type, ".count");
4187 expand_omp_build_assign (gsi, counts[i], t);
4189 if (SSA_VAR_P (fd->loop.n2))
4191 if (i == 0)
4192 t = counts[0];
4193 else
4194 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
4195 expand_omp_build_assign (gsi, fd->loop.n2, t);
4201 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
4202 T = V;
4203 V3 = N31 + (T % count3) * STEP3;
4204 T = T / count3;
4205 V2 = N21 + (T % count2) * STEP2;
4206 T = T / count2;
4207 V1 = N11 + T * STEP1;
4208 if this loop doesn't have an inner loop construct combined with it. */
4210 static void
4211 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4212 tree *counts, tree startvar)
4214 int i;
4215 tree type = TREE_TYPE (fd->loop.v);
4216 tree tem = create_tmp_reg (type, ".tem");
4217 gimple stmt = gimple_build_assign (tem, startvar);
4218 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4220 for (i = fd->collapse - 1; i >= 0; i--)
4222 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
4223 itype = vtype;
4224 if (POINTER_TYPE_P (vtype))
4225 itype = signed_type_for (vtype);
4226 if (i != 0)
4227 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4228 else
4229 t = tem;
4230 t = fold_convert (itype, t);
4231 t = fold_build2 (MULT_EXPR, itype, t,
4232 fold_convert (itype, fd->loops[i].step));
4233 if (POINTER_TYPE_P (vtype))
4234 t = fold_build_pointer_plus (fd->loops[i].n1, t);
4235 else
4236 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4237 t = force_gimple_operand_gsi (gsi, t,
4238 DECL_P (fd->loops[i].v)
4239 && TREE_ADDRESSABLE (fd->loops[i].v),
4240 NULL_TREE, false,
4241 GSI_CONTINUE_LINKING);
4242 stmt = gimple_build_assign (fd->loops[i].v, t);
4243 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4244 if (i != 0)
4246 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4247 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
4248 false, GSI_CONTINUE_LINKING);
4249 stmt = gimple_build_assign (tem, t);
4250 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4256 /* Helper function for expand_omp_for_*. Generate code like:
4257 L10:
4258 V3 += STEP3;
4259 if (V3 cond3 N32) goto BODY_BB; else goto L11;
4260 L11:
4261 V3 = N31;
4262 V2 += STEP2;
4263 if (V2 cond2 N22) goto BODY_BB; else goto L12;
4264 L12:
4265 V2 = N21;
4266 V1 += STEP1;
4267 goto BODY_BB; */
4269 static basic_block
4270 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
4271 basic_block body_bb)
4273 basic_block last_bb, bb, collapse_bb = NULL;
4274 int i;
4275 gimple_stmt_iterator gsi;
4276 edge e;
4277 tree t;
4278 gimple stmt;
4280 last_bb = cont_bb;
4281 for (i = fd->collapse - 1; i >= 0; i--)
4283 tree vtype = TREE_TYPE (fd->loops[i].v);
4285 bb = create_empty_bb (last_bb);
4286 if (current_loops)
4287 add_bb_to_loop (bb, last_bb->loop_father);
4288 gsi = gsi_start_bb (bb);
4290 if (i < fd->collapse - 1)
4292 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4293 e->probability = REG_BR_PROB_BASE / 8;
4295 t = fd->loops[i + 1].n1;
4296 t = force_gimple_operand_gsi (&gsi, t,
4297 DECL_P (fd->loops[i + 1].v)
4298 && TREE_ADDRESSABLE (fd->loops[i
4299 + 1].v),
4300 NULL_TREE, false,
4301 GSI_CONTINUE_LINKING);
4302 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4303 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4305 else
4306 collapse_bb = bb;
4308 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4310 if (POINTER_TYPE_P (vtype))
4311 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4312 else
4313 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
4314 t = force_gimple_operand_gsi (&gsi, t,
4315 DECL_P (fd->loops[i].v)
4316 && TREE_ADDRESSABLE (fd->loops[i].v),
4317 NULL_TREE, false, GSI_CONTINUE_LINKING);
4318 stmt = gimple_build_assign (fd->loops[i].v, t);
4319 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4321 if (i > 0)
4323 t = fd->loops[i].n2;
4324 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4325 false, GSI_CONTINUE_LINKING);
4326 tree v = fd->loops[i].v;
4327 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4328 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4329 false, GSI_CONTINUE_LINKING);
4330 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
4331 stmt = gimple_build_cond_empty (t);
4332 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4333 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
4334 e->probability = REG_BR_PROB_BASE * 7 / 8;
4336 else
4337 make_edge (bb, body_bb, EDGE_FALLTHRU);
4338 last_bb = bb;
4341 return collapse_bb;
4345 /* A subroutine of expand_omp_for. Generate code for a parallel
4346 loop with any schedule. Given parameters:
4348 for (V = N1; V cond N2; V += STEP) BODY;
4350 where COND is "<" or ">", we generate pseudocode
4352 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
4353 if (more) goto L0; else goto L3;
4355 V = istart0;
4356 iend = iend0;
4358 BODY;
4359 V += STEP;
4360 if (V cond iend) goto L1; else goto L2;
4362 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4365 If this is a combined omp parallel loop, instead of the call to
4366 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
4368 For collapsed loops, given parameters:
4369 collapse(3)
4370 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4371 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4372 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4373 BODY;
4375 we generate pseudocode
4377 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
4378 if (cond3 is <)
4379 adj = STEP3 - 1;
4380 else
4381 adj = STEP3 + 1;
4382 count3 = (adj + N32 - N31) / STEP3;
4383 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
4384 if (cond2 is <)
4385 adj = STEP2 - 1;
4386 else
4387 adj = STEP2 + 1;
4388 count2 = (adj + N22 - N21) / STEP2;
4389 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
4390 if (cond1 is <)
4391 adj = STEP1 - 1;
4392 else
4393 adj = STEP1 + 1;
4394 count1 = (adj + N12 - N11) / STEP1;
4395 count = count1 * count2 * count3;
4396 goto Z1;
4398 count = 0;
4400 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
4401 if (more) goto L0; else goto L3;
4403 V = istart0;
4404 T = V;
4405 V3 = N31 + (T % count3) * STEP3;
4406 T = T / count3;
4407 V2 = N21 + (T % count2) * STEP2;
4408 T = T / count2;
4409 V1 = N11 + T * STEP1;
4410 iend = iend0;
4412 BODY;
4413 V += 1;
4414 if (V < iend) goto L10; else goto L2;
4415 L10:
4416 V3 += STEP3;
4417 if (V3 cond3 N32) goto L1; else goto L11;
4418 L11:
4419 V3 = N31;
4420 V2 += STEP2;
4421 if (V2 cond2 N22) goto L1; else goto L12;
4422 L12:
4423 V2 = N21;
4424 V1 += STEP1;
4425 goto L1;
4427 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4432 static void
4433 expand_omp_for_generic (struct omp_region *region,
4434 struct omp_for_data *fd,
4435 enum built_in_function start_fn,
4436 enum built_in_function next_fn)
4438 tree type, istart0, iend0, iend;
4439 tree t, vmain, vback, bias = NULL_TREE;
4440 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
4441 basic_block l2_bb = NULL, l3_bb = NULL;
4442 gimple_stmt_iterator gsi;
4443 gimple stmt;
4444 bool in_combined_parallel = is_combined_parallel (region);
4445 bool broken_loop = region->cont == NULL;
4446 edge e, ne;
4447 tree *counts = NULL;
4448 int i;
4450 gcc_assert (!broken_loop || !in_combined_parallel);
4451 gcc_assert (fd->iter_type == long_integer_type_node
4452 || !in_combined_parallel);
4454 type = TREE_TYPE (fd->loop.v);
4455 istart0 = create_tmp_var (fd->iter_type, ".istart0");
4456 iend0 = create_tmp_var (fd->iter_type, ".iend0");
4457 TREE_ADDRESSABLE (istart0) = 1;
4458 TREE_ADDRESSABLE (iend0) = 1;
4460 /* See if we need to bias by LLONG_MIN. */
4461 if (fd->iter_type == long_long_unsigned_type_node
4462 && TREE_CODE (type) == INTEGER_TYPE
4463 && !TYPE_UNSIGNED (type))
4465 tree n1, n2;
4467 if (fd->loop.cond_code == LT_EXPR)
4469 n1 = fd->loop.n1;
4470 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4472 else
4474 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4475 n2 = fd->loop.n1;
4477 if (TREE_CODE (n1) != INTEGER_CST
4478 || TREE_CODE (n2) != INTEGER_CST
4479 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4480 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4483 entry_bb = region->entry;
4484 cont_bb = region->cont;
4485 collapse_bb = NULL;
4486 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4487 gcc_assert (broken_loop
4488 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4489 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4490 l1_bb = single_succ (l0_bb);
4491 if (!broken_loop)
4493 l2_bb = create_empty_bb (cont_bb);
4494 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
4495 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4497 else
4498 l2_bb = NULL;
4499 l3_bb = BRANCH_EDGE (entry_bb)->dest;
4500 exit_bb = region->exit;
4502 gsi = gsi_last_bb (entry_bb);
4504 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4505 if (fd->collapse > 1)
4507 int first_zero_iter = -1;
4508 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
4510 counts = XALLOCAVEC (tree, fd->collapse);
4511 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4512 zero_iter_bb, first_zero_iter,
4513 l2_dom_bb);
4515 if (zero_iter_bb)
4517 /* Some counts[i] vars might be uninitialized if
4518 some loop has zero iterations. But the body shouldn't
4519 be executed in that case, so just avoid uninit warnings. */
4520 for (i = first_zero_iter; i < fd->collapse; i++)
4521 if (SSA_VAR_P (counts[i]))
4522 TREE_NO_WARNING (counts[i]) = 1;
4523 gsi_prev (&gsi);
4524 e = split_block (entry_bb, gsi_stmt (gsi));
4525 entry_bb = e->dest;
4526 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4527 gsi = gsi_last_bb (entry_bb);
4528 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4529 get_immediate_dominator (CDI_DOMINATORS,
4530 zero_iter_bb));
4533 if (in_combined_parallel)
4535 /* In a combined parallel loop, emit a call to
4536 GOMP_loop_foo_next. */
4537 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4538 build_fold_addr_expr (istart0),
4539 build_fold_addr_expr (iend0));
4541 else
4543 tree t0, t1, t2, t3, t4;
4544 /* If this is not a combined parallel loop, emit a call to
4545 GOMP_loop_foo_start in ENTRY_BB. */
4546 t4 = build_fold_addr_expr (iend0);
4547 t3 = build_fold_addr_expr (istart0);
4548 t2 = fold_convert (fd->iter_type, fd->loop.step);
4549 t1 = fd->loop.n2;
4550 t0 = fd->loop.n1;
4551 if (POINTER_TYPE_P (TREE_TYPE (t0))
4552 && TYPE_PRECISION (TREE_TYPE (t0))
4553 != TYPE_PRECISION (fd->iter_type))
4555 /* Avoid casting pointers to integer of a different size. */
4556 tree itype = signed_type_for (type);
4557 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4558 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
4560 else
4562 t1 = fold_convert (fd->iter_type, t1);
4563 t0 = fold_convert (fd->iter_type, t0);
4565 if (bias)
4567 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4568 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4570 if (fd->iter_type == long_integer_type_node)
4572 if (fd->chunk_size)
4574 t = fold_convert (fd->iter_type, fd->chunk_size);
4575 t = build_call_expr (builtin_decl_explicit (start_fn),
4576 6, t0, t1, t2, t, t3, t4);
4578 else
4579 t = build_call_expr (builtin_decl_explicit (start_fn),
4580 5, t0, t1, t2, t3, t4);
4582 else
4584 tree t5;
4585 tree c_bool_type;
4586 tree bfn_decl;
4588 /* The GOMP_loop_ull_*start functions have additional boolean
4589 argument, true for < loops and false for > loops.
4590 In Fortran, the C bool type can be different from
4591 boolean_type_node. */
4592 bfn_decl = builtin_decl_explicit (start_fn);
4593 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
4594 t5 = build_int_cst (c_bool_type,
4595 fd->loop.cond_code == LT_EXPR ? 1 : 0);
4596 if (fd->chunk_size)
4598 tree bfn_decl = builtin_decl_explicit (start_fn);
4599 t = fold_convert (fd->iter_type, fd->chunk_size);
4600 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
4602 else
4603 t = build_call_expr (builtin_decl_explicit (start_fn),
4604 6, t5, t0, t1, t2, t3, t4);
4607 if (TREE_TYPE (t) != boolean_type_node)
4608 t = fold_build2 (NE_EXPR, boolean_type_node,
4609 t, build_int_cst (TREE_TYPE (t), 0));
4610 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4611 true, GSI_SAME_STMT);
4612 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4614 /* Remove the GIMPLE_OMP_FOR statement. */
4615 gsi_remove (&gsi, true);
4617 /* Iteration setup for sequential loop goes in L0_BB. */
4618 tree startvar = fd->loop.v;
4619 tree endvar = NULL_TREE;
4621 gsi = gsi_start_bb (l0_bb);
4622 t = istart0;
4623 if (bias)
4624 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4625 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4626 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4627 t = fold_convert (TREE_TYPE (startvar), t);
4628 t = force_gimple_operand_gsi (&gsi, t,
4629 DECL_P (startvar)
4630 && TREE_ADDRESSABLE (startvar),
4631 NULL_TREE, false, GSI_CONTINUE_LINKING);
4632 stmt = gimple_build_assign (startvar, t);
4633 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4635 t = iend0;
4636 if (bias)
4637 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4638 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4639 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4640 t = fold_convert (TREE_TYPE (startvar), t);
4641 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4642 false, GSI_CONTINUE_LINKING);
4643 if (endvar)
4645 stmt = gimple_build_assign (endvar, iend);
4646 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4648 if (fd->collapse > 1)
4649 expand_omp_for_init_vars (fd, &gsi, counts, startvar);
4651 if (!broken_loop)
4653 /* Code to control the increment and predicate for the sequential
4654 loop goes in the CONT_BB. */
4655 gsi = gsi_last_bb (cont_bb);
4656 stmt = gsi_stmt (gsi);
4657 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4658 vmain = gimple_omp_continue_control_use (stmt);
4659 vback = gimple_omp_continue_control_def (stmt);
4661 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4662 if (1)
4664 if (POINTER_TYPE_P (type))
4665 t = fold_build_pointer_plus (vmain, fd->loop.step);
4666 else
4667 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4668 t = force_gimple_operand_gsi (&gsi, t,
4669 DECL_P (vback)
4670 && TREE_ADDRESSABLE (vback),
4671 NULL_TREE, true, GSI_SAME_STMT);
4672 stmt = gimple_build_assign (vback, t);
4673 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4675 t = build2 (fd->loop.cond_code, boolean_type_node,
4676 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4677 iend);
4678 stmt = gimple_build_cond_empty (t);
4679 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4682 /* Remove GIMPLE_OMP_CONTINUE. */
4683 gsi_remove (&gsi, true);
4685 if (fd->collapse > 1)
4686 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
4688 /* Emit code to get the next parallel iteration in L2_BB. */
4689 gsi = gsi_start_bb (l2_bb);
4691 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4692 build_fold_addr_expr (istart0),
4693 build_fold_addr_expr (iend0));
4694 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4695 false, GSI_CONTINUE_LINKING);
4696 if (TREE_TYPE (t) != boolean_type_node)
4697 t = fold_build2 (NE_EXPR, boolean_type_node,
4698 t, build_int_cst (TREE_TYPE (t), 0));
4699 stmt = gimple_build_cond_empty (t);
4700 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4703 /* Add the loop cleanup function. */
4704 gsi = gsi_last_bb (exit_bb);
4705 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4706 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4707 else
4708 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4709 stmt = gimple_build_call (t, 0);
4710 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4711 gsi_remove (&gsi, true);
4713 /* Connect the new blocks. */
4714 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4715 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4717 if (!broken_loop)
4719 gimple_seq phis;
4721 e = find_edge (cont_bb, l3_bb);
4722 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4724 phis = phi_nodes (l3_bb);
4725 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4727 gimple phi = gsi_stmt (gsi);
4728 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4729 PHI_ARG_DEF_FROM_EDGE (phi, e));
4731 remove_edge (e);
4733 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4734 if (current_loops)
4735 add_bb_to_loop (l2_bb, cont_bb->loop_father);
4736 e = find_edge (cont_bb, l1_bb);
4737 /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
4738 if (0)
4740 else if (fd->collapse > 1)
4742 remove_edge (e);
4743 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4745 else
4746 e->flags = EDGE_TRUE_VALUE;
4747 if (e)
4749 e->probability = REG_BR_PROB_BASE * 7 / 8;
4750 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4752 else
4754 e = find_edge (cont_bb, l2_bb);
4755 e->flags = EDGE_FALLTHRU;
4757 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4759 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4760 recompute_dominator (CDI_DOMINATORS, l2_bb));
4761 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4762 recompute_dominator (CDI_DOMINATORS, l3_bb));
4763 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4764 recompute_dominator (CDI_DOMINATORS, l0_bb));
4765 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4766 recompute_dominator (CDI_DOMINATORS, l1_bb));
4768 struct loop *outer_loop = alloc_loop ();
4769 outer_loop->header = l0_bb;
4770 outer_loop->latch = l2_bb;
4771 add_loop (outer_loop, l0_bb->loop_father);
4773 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4774 if (1)
4776 struct loop *loop = alloc_loop ();
4777 loop->header = l1_bb;
4778 /* The loop may have multiple latches. */
4779 add_loop (loop, outer_loop);
4785 /* A subroutine of expand_omp_for. Generate code for a parallel
4786 loop with static schedule and no specified chunk size. Given
4787 parameters:
4789 for (V = N1; V cond N2; V += STEP) BODY;
4791 where COND is "<" or ">", we generate pseudocode
4793 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4794 if (cond is <)
4795 adj = STEP - 1;
4796 else
4797 adj = STEP + 1;
4798 if ((__typeof (V)) -1 > 0 && cond is >)
4799 n = -(adj + N2 - N1) / -STEP;
4800 else
4801 n = (adj + N2 - N1) / STEP;
4802 q = n / nthreads;
4803 tt = n % nthreads;
4804 if (threadid < tt) goto L3; else goto L4;
4806 tt = 0;
4807 q = q + 1;
4809 s0 = q * threadid + tt;
4810 e0 = s0 + q;
4811 V = s0 * STEP + N1;
4812 if (s0 >= e0) goto L2; else goto L0;
4814 e = e0 * STEP + N1;
4816 BODY;
4817 V += STEP;
4818 if (V cond e) goto L1;
4822 static void
4823 expand_omp_for_static_nochunk (struct omp_region *region,
4824 struct omp_for_data *fd)
4826 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4827 tree type, itype, vmain, vback;
4828 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4829 basic_block body_bb, cont_bb;
4830 basic_block fin_bb;
4831 gimple_stmt_iterator gsi;
4832 gimple stmt;
4833 edge ep;
4835 itype = type = TREE_TYPE (fd->loop.v);
4836 if (POINTER_TYPE_P (type))
4837 itype = signed_type_for (type);
4839 entry_bb = region->entry;
4840 cont_bb = region->cont;
4841 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4842 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4843 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4844 body_bb = single_succ (seq_start_bb);
4845 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4846 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4847 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4848 exit_bb = region->exit;
4850 /* Iteration space partitioning goes in ENTRY_BB. */
4851 gsi = gsi_last_bb (entry_bb);
4852 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4854 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4855 fold_convert (type, fd->loop.n1),
4856 fold_convert (type, fd->loop.n2));
4857 if (TYPE_UNSIGNED (type)
4858 && (t == NULL_TREE || !integer_onep (t)))
4860 tree n1, n2;
4861 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4862 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4863 true, GSI_SAME_STMT);
4864 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4865 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4866 true, GSI_SAME_STMT);
4867 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4868 NULL_TREE, NULL_TREE);
4869 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4870 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4871 expand_omp_regimplify_p, NULL, NULL)
4872 || walk_tree (gimple_cond_rhs_ptr (stmt),
4873 expand_omp_regimplify_p, NULL, NULL))
4875 gsi = gsi_for_stmt (stmt);
4876 gimple_regimplify_operands (stmt, &gsi);
4878 ep = split_block (entry_bb, stmt);
4879 ep->flags = EDGE_TRUE_VALUE;
4880 entry_bb = ep->dest;
4881 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4882 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4883 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4884 if (gimple_in_ssa_p (cfun))
4886 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4887 for (gsi = gsi_start_phis (fin_bb);
4888 !gsi_end_p (gsi); gsi_next (&gsi))
4890 gimple phi = gsi_stmt (gsi);
4891 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4892 ep, UNKNOWN_LOCATION);
4895 gsi = gsi_last_bb (entry_bb);
4898 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4899 t = fold_convert (itype, t);
4900 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4901 true, GSI_SAME_STMT);
4903 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4904 t = fold_convert (itype, t);
4905 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4906 true, GSI_SAME_STMT);
4908 fd->loop.n1
4909 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4910 true, NULL_TREE, true, GSI_SAME_STMT);
4911 fd->loop.n2
4912 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4913 true, NULL_TREE, true, GSI_SAME_STMT);
4914 fd->loop.step
4915 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4916 true, NULL_TREE, true, GSI_SAME_STMT);
4918 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4919 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4920 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4921 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4922 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4923 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4924 fold_build1 (NEGATE_EXPR, itype, t),
4925 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4926 else
4927 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4928 t = fold_convert (itype, t);
4929 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4931 q = create_tmp_reg (itype, "q");
4932 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4933 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4934 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4936 tt = create_tmp_reg (itype, "tt");
4937 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4938 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4939 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4941 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4942 stmt = gimple_build_cond_empty (t);
4943 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4945 second_bb = split_block (entry_bb, stmt)->dest;
4946 gsi = gsi_last_bb (second_bb);
4947 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4949 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4950 GSI_SAME_STMT);
4951 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4952 build_int_cst (itype, 1));
4953 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4955 third_bb = split_block (second_bb, stmt)->dest;
4956 gsi = gsi_last_bb (third_bb);
4957 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4959 t = build2 (MULT_EXPR, itype, q, threadid);
4960 t = build2 (PLUS_EXPR, itype, t, tt);
4961 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4963 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4964 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4966 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4967 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4969 /* Remove the GIMPLE_OMP_FOR statement. */
4970 gsi_remove (&gsi, true);
4972 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4973 gsi = gsi_start_bb (seq_start_bb);
4975 t = fold_convert (itype, s0);
4976 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4977 if (POINTER_TYPE_P (type))
4978 t = fold_build_pointer_plus (fd->loop.n1, t);
4979 else
4980 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4981 t = force_gimple_operand_gsi (&gsi, t,
4982 DECL_P (fd->loop.v)
4983 && TREE_ADDRESSABLE (fd->loop.v),
4984 NULL_TREE, false, GSI_CONTINUE_LINKING);
4985 stmt = gimple_build_assign (fd->loop.v, t);
4986 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4988 t = fold_convert (itype, e0);
4989 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4990 if (POINTER_TYPE_P (type))
4991 t = fold_build_pointer_plus (fd->loop.n1, t);
4992 else
4993 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4994 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4995 false, GSI_CONTINUE_LINKING);
4997 /* The code controlling the sequential loop replaces the
4998 GIMPLE_OMP_CONTINUE. */
4999 gsi = gsi_last_bb (cont_bb);
5000 stmt = gsi_stmt (gsi);
5001 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5002 vmain = gimple_omp_continue_control_use (stmt);
5003 vback = gimple_omp_continue_control_def (stmt);
5005 if (POINTER_TYPE_P (type))
5006 t = fold_build_pointer_plus (vmain, fd->loop.step);
5007 else
5008 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5009 t = force_gimple_operand_gsi (&gsi, t,
5010 DECL_P (vback) && TREE_ADDRESSABLE (vback),
5011 NULL_TREE, true, GSI_SAME_STMT);
5012 stmt = gimple_build_assign (vback, t);
5013 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5015 t = build2 (fd->loop.cond_code, boolean_type_node,
5016 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
5017 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5019 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5020 gsi_remove (&gsi, true);
5022 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5023 gsi = gsi_last_bb (exit_bb);
5024 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5025 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
5026 false, GSI_SAME_STMT);
5027 gsi_remove (&gsi, true);
5029 /* Connect all the blocks. */
5030 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5031 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5032 ep = find_edge (entry_bb, second_bb);
5033 ep->flags = EDGE_TRUE_VALUE;
5034 ep->probability = REG_BR_PROB_BASE / 4;
5035 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5036 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
5038 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5039 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5041 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
5042 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
5043 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
5044 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5045 recompute_dominator (CDI_DOMINATORS, body_bb));
5046 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5047 recompute_dominator (CDI_DOMINATORS, fin_bb));
5049 struct loop *loop = alloc_loop ();
5050 loop->header = body_bb;
5051 loop->latch = cont_bb;
5052 add_loop (loop, body_bb->loop_father);
5056 /* A subroutine of expand_omp_for. Generate code for a parallel
5057 loop with static schedule and a specified chunk size. Given
5058 parameters:
5060 for (V = N1; V cond N2; V += STEP) BODY;
5062 where COND is "<" or ">", we generate pseudocode
5064 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5065 if (cond is <)
5066 adj = STEP - 1;
5067 else
5068 adj = STEP + 1;
5069 if ((__typeof (V)) -1 > 0 && cond is >)
5070 n = -(adj + N2 - N1) / -STEP;
5071 else
5072 n = (adj + N2 - N1) / STEP;
5073 trip = 0;
5074 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
5075 here so that V is defined
5076 if the loop is not entered
5078 s0 = (trip * nthreads + threadid) * CHUNK;
5079 e0 = min(s0 + CHUNK, n);
5080 if (s0 < n) goto L1; else goto L4;
5082 V = s0 * STEP + N1;
5083 e = e0 * STEP + N1;
5085 BODY;
5086 V += STEP;
5087 if (V cond e) goto L2; else goto L3;
5089 trip += 1;
5090 goto L0;
5094 static void
5095 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
5097 tree n, s0, e0, e, t;
5098 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
5099 tree type, itype, v_main, v_back, v_extra;
5100 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
5101 basic_block trip_update_bb, cont_bb, fin_bb;
5102 gimple_stmt_iterator si;
5103 gimple stmt;
5104 edge se;
5106 itype = type = TREE_TYPE (fd->loop.v);
5107 if (POINTER_TYPE_P (type))
5108 itype = signed_type_for (type);
5110 entry_bb = region->entry;
5111 se = split_block (entry_bb, last_stmt (entry_bb));
5112 entry_bb = se->src;
5113 iter_part_bb = se->dest;
5114 cont_bb = region->cont;
5115 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
5116 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
5117 == FALLTHRU_EDGE (cont_bb)->dest);
5118 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
5119 body_bb = single_succ (seq_start_bb);
5120 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5121 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5122 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
5123 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
5124 exit_bb = region->exit;
5126 /* Trip and adjustment setup goes in ENTRY_BB. */
5127 si = gsi_last_bb (entry_bb);
5128 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
5130 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5131 fold_convert (type, fd->loop.n1),
5132 fold_convert (type, fd->loop.n2));
5133 if (TYPE_UNSIGNED (type)
5134 && (t == NULL_TREE || !integer_onep (t)))
5136 tree n1, n2;
5137 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5138 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
5139 true, GSI_SAME_STMT);
5140 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5141 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
5142 true, GSI_SAME_STMT);
5143 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5144 NULL_TREE, NULL_TREE);
5145 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5146 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5147 expand_omp_regimplify_p, NULL, NULL)
5148 || walk_tree (gimple_cond_rhs_ptr (stmt),
5149 expand_omp_regimplify_p, NULL, NULL))
5151 si = gsi_for_stmt (stmt);
5152 gimple_regimplify_operands (stmt, &si);
5154 se = split_block (entry_bb, stmt);
5155 se->flags = EDGE_TRUE_VALUE;
5156 entry_bb = se->dest;
5157 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5158 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
5159 se->probability = REG_BR_PROB_BASE / 2000 - 1;
5160 if (gimple_in_ssa_p (cfun))
5162 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5163 for (si = gsi_start_phis (fin_bb);
5164 !gsi_end_p (si); gsi_next (&si))
5166 gimple phi = gsi_stmt (si);
5167 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5168 se, UNKNOWN_LOCATION);
5171 si = gsi_last_bb (entry_bb);
5174 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
5175 t = fold_convert (itype, t);
5176 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5177 true, GSI_SAME_STMT);
5179 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
5180 t = fold_convert (itype, t);
5181 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5182 true, GSI_SAME_STMT);
5184 fd->loop.n1
5185 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
5186 true, NULL_TREE, true, GSI_SAME_STMT);
5187 fd->loop.n2
5188 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
5189 true, NULL_TREE, true, GSI_SAME_STMT);
5190 fd->loop.step
5191 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
5192 true, NULL_TREE, true, GSI_SAME_STMT);
5193 fd->chunk_size
5194 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
5195 true, NULL_TREE, true, GSI_SAME_STMT);
5197 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5198 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
5199 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
5200 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
5201 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5202 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5203 fold_build1 (NEGATE_EXPR, itype, t),
5204 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
5205 else
5206 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
5207 t = fold_convert (itype, t);
5208 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5209 true, GSI_SAME_STMT);
5211 trip_var = create_tmp_reg (itype, ".trip");
5212 if (gimple_in_ssa_p (cfun))
5214 trip_init = make_ssa_name (trip_var, NULL);
5215 trip_main = make_ssa_name (trip_var, NULL);
5216 trip_back = make_ssa_name (trip_var, NULL);
5218 else
5220 trip_init = trip_var;
5221 trip_main = trip_var;
5222 trip_back = trip_var;
5225 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
5226 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5228 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
5229 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5230 if (POINTER_TYPE_P (type))
5231 t = fold_build_pointer_plus (fd->loop.n1, t);
5232 else
5233 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5234 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5235 true, GSI_SAME_STMT);
5237 /* Remove the GIMPLE_OMP_FOR. */
5238 gsi_remove (&si, true);
5240 /* Iteration space partitioning goes in ITER_PART_BB. */
5241 si = gsi_last_bb (iter_part_bb);
5243 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
5244 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
5245 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
5246 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5247 false, GSI_CONTINUE_LINKING);
5249 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
5250 t = fold_build2 (MIN_EXPR, itype, t, n);
5251 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5252 false, GSI_CONTINUE_LINKING);
5254 t = build2 (LT_EXPR, boolean_type_node, s0, n);
5255 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
5257 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5258 si = gsi_start_bb (seq_start_bb);
5260 t = fold_convert (itype, s0);
5261 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5262 if (POINTER_TYPE_P (type))
5263 t = fold_build_pointer_plus (fd->loop.n1, t);
5264 else
5265 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5266 t = force_gimple_operand_gsi (&si, t,
5267 DECL_P (fd->loop.v)
5268 && TREE_ADDRESSABLE (fd->loop.v),
5269 NULL_TREE, false, GSI_CONTINUE_LINKING);
5270 stmt = gimple_build_assign (fd->loop.v, t);
5271 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5273 t = fold_convert (itype, e0);
5274 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5275 if (POINTER_TYPE_P (type))
5276 t = fold_build_pointer_plus (fd->loop.n1, t);
5277 else
5278 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5279 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5280 false, GSI_CONTINUE_LINKING);
5282 /* The code controlling the sequential loop goes in CONT_BB,
5283 replacing the GIMPLE_OMP_CONTINUE. */
5284 si = gsi_last_bb (cont_bb);
5285 stmt = gsi_stmt (si);
5286 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5287 v_main = gimple_omp_continue_control_use (stmt);
5288 v_back = gimple_omp_continue_control_def (stmt);
5290 if (POINTER_TYPE_P (type))
5291 t = fold_build_pointer_plus (v_main, fd->loop.step);
5292 else
5293 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
5294 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
5295 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5296 true, GSI_SAME_STMT);
5297 stmt = gimple_build_assign (v_back, t);
5298 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5300 t = build2 (fd->loop.cond_code, boolean_type_node,
5301 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
5302 ? t : v_back, e);
5303 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
5305 /* Remove GIMPLE_OMP_CONTINUE. */
5306 gsi_remove (&si, true);
5308 /* Trip update code goes into TRIP_UPDATE_BB. */
5309 si = gsi_start_bb (trip_update_bb);
5311 t = build_int_cst (itype, 1);
5312 t = build2 (PLUS_EXPR, itype, trip_main, t);
5313 stmt = gimple_build_assign (trip_back, t);
5314 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5316 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5317 si = gsi_last_bb (exit_bb);
5318 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
5319 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5320 false, GSI_SAME_STMT);
5321 gsi_remove (&si, true);
5323 /* Connect the new blocks. */
5324 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
5325 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5327 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5328 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
5330 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
5332 if (gimple_in_ssa_p (cfun))
5334 gimple_stmt_iterator psi;
5335 gimple phi;
5336 edge re, ene;
5337 edge_var_map_vector *head;
5338 edge_var_map *vm;
5339 size_t i;
5341 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
5342 remove arguments of the phi nodes in fin_bb. We need to create
5343 appropriate phi nodes in iter_part_bb instead. */
5344 se = single_pred_edge (fin_bb);
5345 re = single_succ_edge (trip_update_bb);
5346 head = redirect_edge_var_map_vector (re);
5347 ene = single_succ_edge (entry_bb);
5349 psi = gsi_start_phis (fin_bb);
5350 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
5351 gsi_next (&psi), ++i)
5353 gimple nphi;
5354 source_location locus;
5356 phi = gsi_stmt (psi);
5357 t = gimple_phi_result (phi);
5358 gcc_assert (t == redirect_edge_var_map_result (vm));
5359 nphi = create_phi_node (t, iter_part_bb);
5361 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
5362 locus = gimple_phi_arg_location_from_edge (phi, se);
5364 /* A special case -- fd->loop.v is not yet computed in
5365 iter_part_bb, we need to use v_extra instead. */
5366 if (t == fd->loop.v)
5367 t = v_extra;
5368 add_phi_arg (nphi, t, ene, locus);
5369 locus = redirect_edge_var_map_location (vm);
5370 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
5372 gcc_assert (!gsi_end_p (psi) && i == head->length ());
5373 redirect_edge_var_map_clear (re);
5374 while (1)
5376 psi = gsi_start_phis (fin_bb);
5377 if (gsi_end_p (psi))
5378 break;
5379 remove_phi_node (&psi, false);
5382 /* Make phi node for trip. */
5383 phi = create_phi_node (trip_main, iter_part_bb);
5384 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
5385 UNKNOWN_LOCATION);
5386 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
5387 UNKNOWN_LOCATION);
5390 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
5391 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
5392 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
5393 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5394 recompute_dominator (CDI_DOMINATORS, fin_bb));
5395 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
5396 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
5397 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5398 recompute_dominator (CDI_DOMINATORS, body_bb));
5400 struct loop *trip_loop = alloc_loop ();
5401 trip_loop->header = iter_part_bb;
5402 trip_loop->latch = trip_update_bb;
5403 add_loop (trip_loop, iter_part_bb->loop_father);
5405 struct loop *loop = alloc_loop ();
5406 loop->header = body_bb;
5407 loop->latch = cont_bb;
5408 add_loop (loop, trip_loop);
5411 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
5412 loop. Given parameters:
5414 for (V = N1; V cond N2; V += STEP) BODY;
5416 where COND is "<" or ">", we generate pseudocode
5418 V = N1;
5419 goto L1;
5421 BODY;
5422 V += STEP;
5424 if (V cond N2) goto L0; else goto L2;
5427 For collapsed loops, given parameters:
5428 collapse(3)
5429 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5430 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5431 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5432 BODY;
5434 we generate pseudocode
5436 if (cond3 is <)
5437 adj = STEP3 - 1;
5438 else
5439 adj = STEP3 + 1;
5440 count3 = (adj + N32 - N31) / STEP3;
5441 if (cond2 is <)
5442 adj = STEP2 - 1;
5443 else
5444 adj = STEP2 + 1;
5445 count2 = (adj + N22 - N21) / STEP2;
5446 if (cond1 is <)
5447 adj = STEP1 - 1;
5448 else
5449 adj = STEP1 + 1;
5450 count1 = (adj + N12 - N11) / STEP1;
5451 count = count1 * count2 * count3;
5452 V = 0;
5453 V1 = N11;
5454 V2 = N21;
5455 V3 = N31;
5456 goto L1;
5458 BODY;
5459 V += 1;
5460 V3 += STEP3;
5461 V2 += (V3 cond3 N32) ? 0 : STEP2;
5462 V3 = (V3 cond3 N32) ? V3 : N31;
5463 V1 += (V2 cond2 N22) ? 0 : STEP1;
5464 V2 = (V2 cond2 N22) ? V2 : N21;
5466 if (V < count) goto L0; else goto L2;
5471 static void
5472 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
5474 tree type, t;
5475 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
5476 gimple_stmt_iterator gsi;
5477 gimple stmt;
5478 bool broken_loop = region->cont == NULL;
5479 edge e, ne;
5480 tree *counts = NULL;
5481 int i;
5482 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5483 OMP_CLAUSE_SAFELEN);
5484 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5485 OMP_CLAUSE__SIMDUID_);
5486 tree n2;
5488 type = TREE_TYPE (fd->loop.v);
5489 entry_bb = region->entry;
5490 cont_bb = region->cont;
5491 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5492 gcc_assert (broken_loop
5493 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5494 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
5495 if (!broken_loop)
5497 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
5498 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5499 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
5500 l2_bb = BRANCH_EDGE (entry_bb)->dest;
5502 else
5504 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
5505 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
5506 l2_bb = single_succ (l1_bb);
5508 exit_bb = region->exit;
5509 l2_dom_bb = NULL;
5511 gsi = gsi_last_bb (entry_bb);
5513 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5514 /* Not needed in SSA form right now. */
5515 gcc_assert (!gimple_in_ssa_p (cfun));
5516 if (fd->collapse > 1)
5518 int first_zero_iter = -1;
5519 basic_block zero_iter_bb = l2_bb;
5521 counts = XALLOCAVEC (tree, fd->collapse);
5522 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5523 zero_iter_bb, first_zero_iter,
5524 l2_dom_bb);
5526 if (l2_dom_bb == NULL)
5527 l2_dom_bb = l1_bb;
5529 n2 = fd->loop.n2;
5530 if (0)
5531 /* Place holder for gimple_omp_for_combined_into_p() in
5532 the upcoming gomp-4_0-branch merge. */;
5533 else
5535 expand_omp_build_assign (&gsi, fd->loop.v,
5536 fold_convert (type, fd->loop.n1));
5537 if (fd->collapse > 1)
5538 for (i = 0; i < fd->collapse; i++)
5540 tree itype = TREE_TYPE (fd->loops[i].v);
5541 if (POINTER_TYPE_P (itype))
5542 itype = signed_type_for (itype);
5543 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
5544 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5548 /* Remove the GIMPLE_OMP_FOR statement. */
5549 gsi_remove (&gsi, true);
5551 if (!broken_loop)
5553 /* Code to control the increment goes in the CONT_BB. */
5554 gsi = gsi_last_bb (cont_bb);
5555 stmt = gsi_stmt (gsi);
5556 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5558 if (POINTER_TYPE_P (type))
5559 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
5560 else
5561 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
5562 expand_omp_build_assign (&gsi, fd->loop.v, t);
5564 if (fd->collapse > 1)
5566 i = fd->collapse - 1;
5567 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
5569 t = fold_convert (sizetype, fd->loops[i].step);
5570 t = fold_build_pointer_plus (fd->loops[i].v, t);
5572 else
5574 t = fold_convert (TREE_TYPE (fd->loops[i].v),
5575 fd->loops[i].step);
5576 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
5577 fd->loops[i].v, t);
5579 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5581 for (i = fd->collapse - 1; i > 0; i--)
5583 tree itype = TREE_TYPE (fd->loops[i].v);
5584 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
5585 if (POINTER_TYPE_P (itype2))
5586 itype2 = signed_type_for (itype2);
5587 t = build3 (COND_EXPR, itype2,
5588 build2 (fd->loops[i].cond_code, boolean_type_node,
5589 fd->loops[i].v,
5590 fold_convert (itype, fd->loops[i].n2)),
5591 build_int_cst (itype2, 0),
5592 fold_convert (itype2, fd->loops[i - 1].step));
5593 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
5594 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
5595 else
5596 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
5597 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
5599 t = build3 (COND_EXPR, itype,
5600 build2 (fd->loops[i].cond_code, boolean_type_node,
5601 fd->loops[i].v,
5602 fold_convert (itype, fd->loops[i].n2)),
5603 fd->loops[i].v,
5604 fold_convert (itype, fd->loops[i].n1));
5605 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5609 /* Remove GIMPLE_OMP_CONTINUE. */
5610 gsi_remove (&gsi, true);
5613 /* Emit the condition in L1_BB. */
5614 gsi = gsi_start_bb (l1_bb);
5616 t = fold_convert (type, n2);
5617 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5618 false, GSI_CONTINUE_LINKING);
5619 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
5620 stmt = gimple_build_cond_empty (t);
5621 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5622 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
5623 NULL, NULL)
5624 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
5625 NULL, NULL))
5627 gsi = gsi_for_stmt (stmt);
5628 gimple_regimplify_operands (stmt, &gsi);
5631 /* Remove GIMPLE_OMP_RETURN. */
5632 gsi = gsi_last_bb (exit_bb);
5633 gsi_remove (&gsi, true);
5635 /* Connect the new blocks. */
5636 remove_edge (FALLTHRU_EDGE (entry_bb));
5638 if (!broken_loop)
5640 remove_edge (BRANCH_EDGE (entry_bb));
5641 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
5643 e = BRANCH_EDGE (l1_bb);
5644 ne = FALLTHRU_EDGE (l1_bb);
5645 e->flags = EDGE_TRUE_VALUE;
5647 else
5649 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5651 ne = single_succ_edge (l1_bb);
5652 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
5655 ne->flags = EDGE_FALSE_VALUE;
5656 e->probability = REG_BR_PROB_BASE * 7 / 8;
5657 ne->probability = REG_BR_PROB_BASE / 8;
5659 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
5660 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
5661 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
5663 if (!broken_loop)
5665 struct loop *loop = alloc_loop ();
5666 loop->header = l1_bb;
5667 loop->latch = e->dest;
5668 add_loop (loop, l1_bb->loop_father);
5669 if (safelen == NULL_TREE)
5670 loop->safelen = INT_MAX;
5671 else
5673 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
5674 if (!host_integerp (safelen, 1)
5675 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
5676 > INT_MAX)
5677 loop->safelen = INT_MAX;
5678 else
5679 loop->safelen = tree_low_cst (safelen, 1);
5680 if (loop->safelen == 1)
5681 loop->safelen = 0;
5683 if (simduid)
5685 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5686 cfun->has_simduid_loops = true;
5688 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
5689 the loop. */
5690 if ((flag_tree_loop_vectorize
5691 || (!global_options_set.x_flag_tree_loop_vectorize
5692 && !global_options_set.x_flag_tree_vectorize))
5693 && loop->safelen > 1)
5695 loop->force_vect = true;
5696 cfun->has_force_vect_loops = true;
5702 /* Expand the OpenMP loop defined by REGION. */
5704 static void
5705 expand_omp_for (struct omp_region *region)
5707 struct omp_for_data fd;
5708 struct omp_for_data_loop *loops;
5710 loops
5711 = (struct omp_for_data_loop *)
5712 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
5713 * sizeof (struct omp_for_data_loop));
5714 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
5715 region->sched_kind = fd.sched_kind;
5717 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5718 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5719 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5720 if (region->cont)
5722 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5723 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5724 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5726 else
5727 /* If there isn't a continue then this is a degerate case where
5728 the introduction of abnormal edges during lowering will prevent
5729 original loops from being detected. Fix that up. */
5730 loops_state_set (LOOPS_NEED_FIXUP);
5732 if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
5733 expand_omp_simd (region, &fd);
5734 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
5735 && !fd.have_ordered
5736 && fd.collapse == 1
5737 && region->cont != NULL)
5739 if (fd.chunk_size == NULL)
5740 expand_omp_for_static_nochunk (region, &fd);
5741 else
5742 expand_omp_for_static_chunk (region, &fd);
5744 else
5746 int fn_index, start_ix, next_ix;
5748 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5749 == GF_OMP_FOR_KIND_FOR);
5750 if (fd.chunk_size == NULL
5751 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5752 fd.chunk_size = integer_zero_node;
5753 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5754 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
5755 ? 3 : fd.sched_kind;
5756 fn_index += fd.have_ordered * 4;
5757 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5758 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
5759 if (fd.iter_type == long_long_unsigned_type_node)
5761 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5762 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5763 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5764 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
5766 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5767 (enum built_in_function) next_ix);
5770 if (gimple_in_ssa_p (cfun))
5771 update_ssa (TODO_update_ssa_only_virtuals);
5775 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5777 v = GOMP_sections_start (n);
5779 switch (v)
5781 case 0:
5782 goto L2;
5783 case 1:
5784 section 1;
5785 goto L1;
5786 case 2:
5788 case n:
5790 default:
5791 abort ();
5794 v = GOMP_sections_next ();
5795 goto L0;
5797 reduction;
5799 If this is a combined parallel sections, replace the call to
5800 GOMP_sections_start with call to GOMP_sections_next. */
5802 static void
5803 expand_omp_sections (struct omp_region *region)
5805 tree t, u, vin = NULL, vmain, vnext, l2;
5806 vec<tree> label_vec;
5807 unsigned len;
5808 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5809 gimple_stmt_iterator si, switch_si;
5810 gimple sections_stmt, stmt, cont;
5811 edge_iterator ei;
5812 edge e;
5813 struct omp_region *inner;
5814 unsigned i, casei;
5815 bool exit_reachable = region->cont != NULL;
5817 gcc_assert (region->exit != NULL);
5818 entry_bb = region->entry;
5819 l0_bb = single_succ (entry_bb);
5820 l1_bb = region->cont;
5821 l2_bb = region->exit;
5822 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5823 l2 = gimple_block_label (l2_bb);
5824 else
5826 /* This can happen if there are reductions. */
5827 len = EDGE_COUNT (l0_bb->succs);
5828 gcc_assert (len > 0);
5829 e = EDGE_SUCC (l0_bb, len - 1);
5830 si = gsi_last_bb (e->dest);
5831 l2 = NULL_TREE;
5832 if (gsi_end_p (si)
5833 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5834 l2 = gimple_block_label (e->dest);
5835 else
5836 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5838 si = gsi_last_bb (e->dest);
5839 if (gsi_end_p (si)
5840 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5842 l2 = gimple_block_label (e->dest);
5843 break;
5847 if (exit_reachable)
5848 default_bb = create_empty_bb (l1_bb->prev_bb);
5849 else
5850 default_bb = create_empty_bb (l0_bb);
5852 /* We will build a switch() with enough cases for all the
5853 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5854 and a default case to abort if something goes wrong. */
5855 len = EDGE_COUNT (l0_bb->succs);
5857 /* Use vec::quick_push on label_vec throughout, since we know the size
5858 in advance. */
5859 label_vec.create (len);
5861 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5862 GIMPLE_OMP_SECTIONS statement. */
5863 si = gsi_last_bb (entry_bb);
5864 sections_stmt = gsi_stmt (si);
5865 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5866 vin = gimple_omp_sections_control (sections_stmt);
5867 if (!is_combined_parallel (region))
5869 /* If we are not inside a combined parallel+sections region,
5870 call GOMP_sections_start. */
5871 t = build_int_cst (unsigned_type_node,
5872 exit_reachable ? len - 1 : len);
5873 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5874 stmt = gimple_build_call (u, 1, t);
5876 else
5878 /* Otherwise, call GOMP_sections_next. */
5879 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5880 stmt = gimple_build_call (u, 0);
5882 gimple_call_set_lhs (stmt, vin);
5883 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5884 gsi_remove (&si, true);
5886 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5887 L0_BB. */
5888 switch_si = gsi_last_bb (l0_bb);
5889 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5890 if (exit_reachable)
5892 cont = last_stmt (l1_bb);
5893 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5894 vmain = gimple_omp_continue_control_use (cont);
5895 vnext = gimple_omp_continue_control_def (cont);
5897 else
5899 vmain = vin;
5900 vnext = NULL_TREE;
5903 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5904 label_vec.quick_push (t);
5905 i = 1;
5907 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5908 for (inner = region->inner, casei = 1;
5909 inner;
5910 inner = inner->next, i++, casei++)
5912 basic_block s_entry_bb, s_exit_bb;
5914 /* Skip optional reduction region. */
5915 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5917 --i;
5918 --casei;
5919 continue;
5922 s_entry_bb = inner->entry;
5923 s_exit_bb = inner->exit;
5925 t = gimple_block_label (s_entry_bb);
5926 u = build_int_cst (unsigned_type_node, casei);
5927 u = build_case_label (u, NULL, t);
5928 label_vec.quick_push (u);
5930 si = gsi_last_bb (s_entry_bb);
5931 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5932 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5933 gsi_remove (&si, true);
5934 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5936 if (s_exit_bb == NULL)
5937 continue;
5939 si = gsi_last_bb (s_exit_bb);
5940 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5941 gsi_remove (&si, true);
5943 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5946 /* Error handling code goes in DEFAULT_BB. */
5947 t = gimple_block_label (default_bb);
5948 u = build_case_label (NULL, NULL, t);
5949 make_edge (l0_bb, default_bb, 0);
5950 if (current_loops)
5951 add_bb_to_loop (default_bb, current_loops->tree_root);
5953 stmt = gimple_build_switch (vmain, u, label_vec);
5954 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5955 gsi_remove (&switch_si, true);
5956 label_vec.release ();
5958 si = gsi_start_bb (default_bb);
5959 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5960 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5962 if (exit_reachable)
5964 tree bfn_decl;
5966 /* Code to get the next section goes in L1_BB. */
5967 si = gsi_last_bb (l1_bb);
5968 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5970 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5971 stmt = gimple_build_call (bfn_decl, 0);
5972 gimple_call_set_lhs (stmt, vnext);
5973 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5974 gsi_remove (&si, true);
5976 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5979 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5980 si = gsi_last_bb (l2_bb);
5981 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5982 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5983 else
5984 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5985 stmt = gimple_build_call (t, 0);
5986 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5987 gsi_remove (&si, true);
5989 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5993 /* Expand code for an OpenMP single directive. We've already expanded
5994 much of the code, here we simply place the GOMP_barrier call. */
5996 static void
5997 expand_omp_single (struct omp_region *region)
5999 basic_block entry_bb, exit_bb;
6000 gimple_stmt_iterator si;
6001 bool need_barrier = false;
6003 entry_bb = region->entry;
6004 exit_bb = region->exit;
6006 si = gsi_last_bb (entry_bb);
6007 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
6008 be removed. We need to ensure that the thread that entered the single
6009 does not exit before the data is copied out by the other threads. */
6010 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
6011 OMP_CLAUSE_COPYPRIVATE))
6012 need_barrier = true;
6013 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
6014 gsi_remove (&si, true);
6015 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6017 si = gsi_last_bb (exit_bb);
6018 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
6019 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
6020 false, GSI_SAME_STMT);
6021 gsi_remove (&si, true);
6022 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6026 /* Generic expansion for OpenMP synchronization directives: master,
6027 ordered and critical. All we need to do here is remove the entry
6028 and exit markers for REGION. */
6030 static void
6031 expand_omp_synch (struct omp_region *region)
6033 basic_block entry_bb, exit_bb;
6034 gimple_stmt_iterator si;
6036 entry_bb = region->entry;
6037 exit_bb = region->exit;
6039 si = gsi_last_bb (entry_bb);
6040 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
6041 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
6042 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
6043 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
6044 gsi_remove (&si, true);
6045 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6047 if (exit_bb)
6049 si = gsi_last_bb (exit_bb);
6050 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6051 gsi_remove (&si, true);
6052 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6056 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6057 operation as a normal volatile load. */
6059 static bool
6060 expand_omp_atomic_load (basic_block load_bb, tree addr,
6061 tree loaded_val, int index)
6063 enum built_in_function tmpbase;
6064 gimple_stmt_iterator gsi;
6065 basic_block store_bb;
6066 location_t loc;
6067 gimple stmt;
6068 tree decl, call, type, itype;
6070 gsi = gsi_last_bb (load_bb);
6071 stmt = gsi_stmt (gsi);
6072 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6073 loc = gimple_location (stmt);
6075 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6076 is smaller than word size, then expand_atomic_load assumes that the load
6077 is atomic. We could avoid the builtin entirely in this case. */
6079 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6080 decl = builtin_decl_explicit (tmpbase);
6081 if (decl == NULL_TREE)
6082 return false;
6084 type = TREE_TYPE (loaded_val);
6085 itype = TREE_TYPE (TREE_TYPE (decl));
6087 call = build_call_expr_loc (loc, decl, 2, addr,
6088 build_int_cst (NULL, MEMMODEL_RELAXED));
6089 if (!useless_type_conversion_p (type, itype))
6090 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6091 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6093 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6094 gsi_remove (&gsi, true);
6096 store_bb = single_succ (load_bb);
6097 gsi = gsi_last_bb (store_bb);
6098 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6099 gsi_remove (&gsi, true);
6101 if (gimple_in_ssa_p (cfun))
6102 update_ssa (TODO_update_ssa_no_phi);
6104 return true;
6107 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6108 operation as a normal volatile store. */
6110 static bool
6111 expand_omp_atomic_store (basic_block load_bb, tree addr,
6112 tree loaded_val, tree stored_val, int index)
6114 enum built_in_function tmpbase;
6115 gimple_stmt_iterator gsi;
6116 basic_block store_bb = single_succ (load_bb);
6117 location_t loc;
6118 gimple stmt;
6119 tree decl, call, type, itype;
6120 enum machine_mode imode;
6121 bool exchange;
6123 gsi = gsi_last_bb (load_bb);
6124 stmt = gsi_stmt (gsi);
6125 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6127 /* If the load value is needed, then this isn't a store but an exchange. */
6128 exchange = gimple_omp_atomic_need_value_p (stmt);
6130 gsi = gsi_last_bb (store_bb);
6131 stmt = gsi_stmt (gsi);
6132 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6133 loc = gimple_location (stmt);
6135 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6136 is smaller than word size, then expand_atomic_store assumes that the store
6137 is atomic. We could avoid the builtin entirely in this case. */
6139 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6140 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6141 decl = builtin_decl_explicit (tmpbase);
6142 if (decl == NULL_TREE)
6143 return false;
6145 type = TREE_TYPE (stored_val);
6147 /* Dig out the type of the function's second argument. */
6148 itype = TREE_TYPE (decl);
6149 itype = TYPE_ARG_TYPES (itype);
6150 itype = TREE_CHAIN (itype);
6151 itype = TREE_VALUE (itype);
6152 imode = TYPE_MODE (itype);
6154 if (exchange && !can_atomic_exchange_p (imode, true))
6155 return false;
6157 if (!useless_type_conversion_p (itype, type))
6158 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6159 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6160 build_int_cst (NULL, MEMMODEL_RELAXED));
6161 if (exchange)
6163 if (!useless_type_conversion_p (type, itype))
6164 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6165 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6168 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6169 gsi_remove (&gsi, true);
6171 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6172 gsi = gsi_last_bb (load_bb);
6173 gsi_remove (&gsi, true);
6175 if (gimple_in_ssa_p (cfun))
6176 update_ssa (TODO_update_ssa_no_phi);
6178 return true;
6181 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6182 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6183 size of the data type, and thus usable to find the index of the builtin
6184 decl. Returns false if the expression is not of the proper form. */
6186 static bool
6187 expand_omp_atomic_fetch_op (basic_block load_bb,
6188 tree addr, tree loaded_val,
6189 tree stored_val, int index)
6191 enum built_in_function oldbase, newbase, tmpbase;
6192 tree decl, itype, call;
6193 tree lhs, rhs;
6194 basic_block store_bb = single_succ (load_bb);
6195 gimple_stmt_iterator gsi;
6196 gimple stmt;
6197 location_t loc;
6198 enum tree_code code;
6199 bool need_old, need_new;
6200 enum machine_mode imode;
6202 /* We expect to find the following sequences:
6204 load_bb:
6205 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6207 store_bb:
6208 val = tmp OP something; (or: something OP tmp)
6209 GIMPLE_OMP_STORE (val)
6211 ???FIXME: Allow a more flexible sequence.
6212 Perhaps use data flow to pick the statements.
6216 gsi = gsi_after_labels (store_bb);
6217 stmt = gsi_stmt (gsi);
6218 loc = gimple_location (stmt);
6219 if (!is_gimple_assign (stmt))
6220 return false;
6221 gsi_next (&gsi);
6222 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
6223 return false;
6224 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6225 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6226 gcc_checking_assert (!need_old || !need_new);
6228 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
6229 return false;
6231 /* Check for one of the supported fetch-op operations. */
6232 code = gimple_assign_rhs_code (stmt);
6233 switch (code)
6235 case PLUS_EXPR:
6236 case POINTER_PLUS_EXPR:
6237 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6238 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
6239 break;
6240 case MINUS_EXPR:
6241 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6242 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
6243 break;
6244 case BIT_AND_EXPR:
6245 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6246 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
6247 break;
6248 case BIT_IOR_EXPR:
6249 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6250 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
6251 break;
6252 case BIT_XOR_EXPR:
6253 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6254 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
6255 break;
6256 default:
6257 return false;
6260 /* Make sure the expression is of the proper form. */
6261 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6262 rhs = gimple_assign_rhs2 (stmt);
6263 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6264 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6265 rhs = gimple_assign_rhs1 (stmt);
6266 else
6267 return false;
6269 tmpbase = ((enum built_in_function)
6270 ((need_new ? newbase : oldbase) + index + 1));
6271 decl = builtin_decl_explicit (tmpbase);
6272 if (decl == NULL_TREE)
6273 return false;
6274 itype = TREE_TYPE (TREE_TYPE (decl));
6275 imode = TYPE_MODE (itype);
6277 /* We could test all of the various optabs involved, but the fact of the
6278 matter is that (with the exception of i486 vs i586 and xadd) all targets
6279 that support any atomic operaton optab also implements compare-and-swap.
6280 Let optabs.c take care of expanding any compare-and-swap loop. */
6281 if (!can_compare_and_swap_p (imode, true))
6282 return false;
6284 gsi = gsi_last_bb (load_bb);
6285 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
6287 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6288 It only requires that the operation happen atomically. Thus we can
6289 use the RELAXED memory model. */
6290 call = build_call_expr_loc (loc, decl, 3, addr,
6291 fold_convert_loc (loc, itype, rhs),
6292 build_int_cst (NULL, MEMMODEL_RELAXED));
6294 if (need_old || need_new)
6296 lhs = need_old ? loaded_val : stored_val;
6297 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6298 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6300 else
6301 call = fold_convert_loc (loc, void_type_node, call);
6302 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6303 gsi_remove (&gsi, true);
6305 gsi = gsi_last_bb (store_bb);
6306 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6307 gsi_remove (&gsi, true);
6308 gsi = gsi_last_bb (store_bb);
6309 gsi_remove (&gsi, true);
6311 if (gimple_in_ssa_p (cfun))
6312 update_ssa (TODO_update_ssa_no_phi);
6314 return true;
6317 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6319 oldval = *addr;
6320 repeat:
6321 newval = rhs; // with oldval replacing *addr in rhs
6322 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6323 if (oldval != newval)
6324 goto repeat;
6326 INDEX is log2 of the size of the data type, and thus usable to find the
6327 index of the builtin decl. */
6329 static bool
6330 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6331 tree addr, tree loaded_val, tree stored_val,
6332 int index)
6334 tree loadedi, storedi, initial, new_storedi, old_vali;
6335 tree type, itype, cmpxchg, iaddr;
6336 gimple_stmt_iterator si;
6337 basic_block loop_header = single_succ (load_bb);
6338 gimple phi, stmt;
6339 edge e;
6340 enum built_in_function fncode;
6342 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6343 order to use the RELAXED memory model effectively. */
6344 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6345 + index + 1);
6346 cmpxchg = builtin_decl_explicit (fncode);
6347 if (cmpxchg == NULL_TREE)
6348 return false;
6349 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6350 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6352 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
6353 return false;
6355 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6356 si = gsi_last_bb (load_bb);
6357 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6359 /* For floating-point values, we'll need to view-convert them to integers
6360 so that we can perform the atomic compare and swap. Simplify the
6361 following code by always setting up the "i"ntegral variables. */
6362 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6364 tree iaddr_val;
6366 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6367 true), NULL);
6368 iaddr_val
6369 = force_gimple_operand_gsi (&si,
6370 fold_convert (TREE_TYPE (iaddr), addr),
6371 false, NULL_TREE, true, GSI_SAME_STMT);
6372 stmt = gimple_build_assign (iaddr, iaddr_val);
6373 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6374 loadedi = create_tmp_var (itype, NULL);
6375 if (gimple_in_ssa_p (cfun))
6376 loadedi = make_ssa_name (loadedi, NULL);
6378 else
6380 iaddr = addr;
6381 loadedi = loaded_val;
6384 initial
6385 = force_gimple_operand_gsi (&si,
6386 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
6387 iaddr,
6388 build_int_cst (TREE_TYPE (iaddr), 0)),
6389 true, NULL_TREE, true, GSI_SAME_STMT);
6391 /* Move the value to the LOADEDI temporary. */
6392 if (gimple_in_ssa_p (cfun))
6394 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
6395 phi = create_phi_node (loadedi, loop_header);
6396 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6397 initial);
6399 else
6400 gsi_insert_before (&si,
6401 gimple_build_assign (loadedi, initial),
6402 GSI_SAME_STMT);
6403 if (loadedi != loaded_val)
6405 gimple_stmt_iterator gsi2;
6406 tree x;
6408 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
6409 gsi2 = gsi_start_bb (loop_header);
6410 if (gimple_in_ssa_p (cfun))
6412 gimple stmt;
6413 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6414 true, GSI_SAME_STMT);
6415 stmt = gimple_build_assign (loaded_val, x);
6416 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
6418 else
6420 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6421 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6422 true, GSI_SAME_STMT);
6425 gsi_remove (&si, true);
6427 si = gsi_last_bb (store_bb);
6428 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6430 if (iaddr == addr)
6431 storedi = stored_val;
6432 else
6433 storedi =
6434 force_gimple_operand_gsi (&si,
6435 build1 (VIEW_CONVERT_EXPR, itype,
6436 stored_val), true, NULL_TREE, true,
6437 GSI_SAME_STMT);
6439 /* Build the compare&swap statement. */
6440 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
6441 new_storedi = force_gimple_operand_gsi (&si,
6442 fold_convert (TREE_TYPE (loadedi),
6443 new_storedi),
6444 true, NULL_TREE,
6445 true, GSI_SAME_STMT);
6447 if (gimple_in_ssa_p (cfun))
6448 old_vali = loadedi;
6449 else
6451 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
6452 stmt = gimple_build_assign (old_vali, loadedi);
6453 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6455 stmt = gimple_build_assign (loadedi, new_storedi);
6456 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6459 /* Note that we always perform the comparison as an integer, even for
6460 floating point. This allows the atomic operation to properly
6461 succeed even with NaNs and -0.0. */
6462 stmt = gimple_build_cond_empty
6463 (build2 (NE_EXPR, boolean_type_node,
6464 new_storedi, old_vali));
6465 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6467 /* Update cfg. */
6468 e = single_succ_edge (store_bb);
6469 e->flags &= ~EDGE_FALLTHRU;
6470 e->flags |= EDGE_FALSE_VALUE;
6472 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6474 /* Copy the new value to loadedi (we already did that before the condition
6475 if we are not in SSA). */
6476 if (gimple_in_ssa_p (cfun))
6478 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
6479 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
6482 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6483 gsi_remove (&si, true);
6485 struct loop *loop = alloc_loop ();
6486 loop->header = loop_header;
6487 loop->latch = store_bb;
6488 add_loop (loop, loop_header->loop_father);
6490 if (gimple_in_ssa_p (cfun))
6491 update_ssa (TODO_update_ssa_no_phi);
6493 return true;
6496 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6498 GOMP_atomic_start ();
6499 *addr = rhs;
6500 GOMP_atomic_end ();
6502 The result is not globally atomic, but works so long as all parallel
6503 references are within #pragma omp atomic directives. According to
6504 responses received from omp@openmp.org, appears to be within spec.
6505 Which makes sense, since that's how several other compilers handle
6506 this situation as well.
6507 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6508 expanding. STORED_VAL is the operand of the matching
6509 GIMPLE_OMP_ATOMIC_STORE.
6511 We replace
6512 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6513 loaded_val = *addr;
6515 and replace
6516 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6517 *addr = stored_val;
6520 static bool
6521 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6522 tree addr, tree loaded_val, tree stored_val)
6524 gimple_stmt_iterator si;
6525 gimple stmt;
6526 tree t;
6528 si = gsi_last_bb (load_bb);
6529 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6531 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
6532 t = build_call_expr (t, 0);
6533 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6535 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
6536 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6537 gsi_remove (&si, true);
6539 si = gsi_last_bb (store_bb);
6540 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6542 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6543 stored_val);
6544 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6546 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
6547 t = build_call_expr (t, 0);
6548 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6549 gsi_remove (&si, true);
6551 if (gimple_in_ssa_p (cfun))
6552 update_ssa (TODO_update_ssa_no_phi);
6553 return true;
6556 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6557 using expand_omp_atomic_fetch_op. If it failed, we try to
6558 call expand_omp_atomic_pipeline, and if it fails too, the
6559 ultimate fallback is wrapping the operation in a mutex
6560 (expand_omp_atomic_mutex). REGION is the atomic region built
6561 by build_omp_regions_1(). */
6563 static void
6564 expand_omp_atomic (struct omp_region *region)
6566 basic_block load_bb = region->entry, store_bb = region->exit;
6567 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
6568 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6569 tree addr = gimple_omp_atomic_load_rhs (load);
6570 tree stored_val = gimple_omp_atomic_store_val (store);
6571 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6572 HOST_WIDE_INT index;
6574 /* Make sure the type is one of the supported sizes. */
6575 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
6576 index = exact_log2 (index);
6577 if (index >= 0 && index <= 4)
6579 unsigned int align = TYPE_ALIGN_UNIT (type);
6581 /* __sync builtins require strict data alignment. */
6582 if (exact_log2 (align) >= index)
6584 /* Atomic load. */
6585 if (loaded_val == stored_val
6586 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6587 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6588 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6589 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
6590 return;
6592 /* Atomic store. */
6593 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6594 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6595 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6596 && store_bb == single_succ (load_bb)
6597 && first_stmt (store_bb) == store
6598 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6599 stored_val, index))
6600 return;
6602 /* When possible, use specialized atomic update functions. */
6603 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
6604 && store_bb == single_succ (load_bb)
6605 && expand_omp_atomic_fetch_op (load_bb, addr,
6606 loaded_val, stored_val, index))
6607 return;
6609 /* If we don't have specialized __sync builtins, try and implement
6610 as a compare and swap loop. */
6611 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6612 loaded_val, stored_val, index))
6613 return;
6617 /* The ultimate fallback is wrapping the operation in a mutex. */
6618 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6622 /* Expand the parallel region tree rooted at REGION. Expansion
6623 proceeds in depth-first order. Innermost regions are expanded
6624 first. This way, parallel regions that require a new function to
6625 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
6626 internal dependencies in their body. */
6628 static void
6629 expand_omp (struct omp_region *region)
6631 while (region)
6633 location_t saved_location;
6635 /* First, determine whether this is a combined parallel+workshare
6636 region. */
6637 if (region->type == GIMPLE_OMP_PARALLEL)
6638 determine_parallel_type (region);
6640 if (region->inner)
6641 expand_omp (region->inner);
6643 saved_location = input_location;
6644 if (gimple_has_location (last_stmt (region->entry)))
6645 input_location = gimple_location (last_stmt (region->entry));
6647 switch (region->type)
6649 case GIMPLE_OMP_PARALLEL:
6650 case GIMPLE_OMP_TASK:
6651 expand_omp_taskreg (region);
6652 break;
6654 case GIMPLE_OMP_FOR:
6655 expand_omp_for (region);
6656 break;
6658 case GIMPLE_OMP_SECTIONS:
6659 expand_omp_sections (region);
6660 break;
6662 case GIMPLE_OMP_SECTION:
6663 /* Individual omp sections are handled together with their
6664 parent GIMPLE_OMP_SECTIONS region. */
6665 break;
6667 case GIMPLE_OMP_SINGLE:
6668 expand_omp_single (region);
6669 break;
6671 case GIMPLE_OMP_MASTER:
6672 case GIMPLE_OMP_ORDERED:
6673 case GIMPLE_OMP_CRITICAL:
6674 expand_omp_synch (region);
6675 break;
6677 case GIMPLE_OMP_ATOMIC_LOAD:
6678 expand_omp_atomic (region);
6679 break;
6681 default:
6682 gcc_unreachable ();
6685 input_location = saved_location;
6686 region = region->next;
6691 /* Helper for build_omp_regions. Scan the dominator tree starting at
6692 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
6693 true, the function ends once a single tree is built (otherwise, whole
6694 forest of OMP constructs may be built). */
6696 static void
6697 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
6698 bool single_tree)
6700 gimple_stmt_iterator gsi;
6701 gimple stmt;
6702 basic_block son;
6704 gsi = gsi_last_bb (bb);
6705 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
6707 struct omp_region *region;
6708 enum gimple_code code;
6710 stmt = gsi_stmt (gsi);
6711 code = gimple_code (stmt);
6712 if (code == GIMPLE_OMP_RETURN)
6714 /* STMT is the return point out of region PARENT. Mark it
6715 as the exit point and make PARENT the immediately
6716 enclosing region. */
6717 gcc_assert (parent);
6718 region = parent;
6719 region->exit = bb;
6720 parent = parent->outer;
6722 else if (code == GIMPLE_OMP_ATOMIC_STORE)
6724 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
6725 GIMPLE_OMP_RETURN, but matches with
6726 GIMPLE_OMP_ATOMIC_LOAD. */
6727 gcc_assert (parent);
6728 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
6729 region = parent;
6730 region->exit = bb;
6731 parent = parent->outer;
6734 else if (code == GIMPLE_OMP_CONTINUE)
6736 gcc_assert (parent);
6737 parent->cont = bb;
6739 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
6741 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
6742 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
6745 else
6747 /* Otherwise, this directive becomes the parent for a new
6748 region. */
6749 region = new_omp_region (bb, code, parent);
6750 parent = region;
6754 if (single_tree && !parent)
6755 return;
6757 for (son = first_dom_son (CDI_DOMINATORS, bb);
6758 son;
6759 son = next_dom_son (CDI_DOMINATORS, son))
6760 build_omp_regions_1 (son, parent, single_tree);
6763 /* Builds the tree of OMP regions rooted at ROOT, storing it to
6764 root_omp_region. */
6766 static void
6767 build_omp_regions_root (basic_block root)
6769 gcc_assert (root_omp_region == NULL);
6770 build_omp_regions_1 (root, NULL, true);
6771 gcc_assert (root_omp_region != NULL);
6774 /* Expands omp construct (and its subconstructs) starting in HEAD. */
6776 void
6777 omp_expand_local (basic_block head)
6779 build_omp_regions_root (head);
6780 if (dump_file && (dump_flags & TDF_DETAILS))
6782 fprintf (dump_file, "\nOMP region tree\n\n");
6783 dump_omp_region (dump_file, root_omp_region, 0);
6784 fprintf (dump_file, "\n");
6787 remove_exit_barriers (root_omp_region);
6788 expand_omp (root_omp_region);
6790 free_omp_regions ();
6793 /* Scan the CFG and build a tree of OMP regions. Return the root of
6794 the OMP region tree. */
6796 static void
6797 build_omp_regions (void)
6799 gcc_assert (root_omp_region == NULL);
6800 calculate_dominance_info (CDI_DOMINATORS);
6801 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
6804 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
6806 static unsigned int
6807 execute_expand_omp (void)
6809 build_omp_regions ();
6811 if (!root_omp_region)
6812 return 0;
6814 if (dump_file)
6816 fprintf (dump_file, "\nOMP region tree\n\n");
6817 dump_omp_region (dump_file, root_omp_region, 0);
6818 fprintf (dump_file, "\n");
6821 remove_exit_barriers (root_omp_region);
6823 expand_omp (root_omp_region);
6825 cleanup_tree_cfg ();
6827 free_omp_regions ();
6829 return 0;
6832 /* OMP expansion -- the default pass, run before creation of SSA form. */
6834 static bool
6835 gate_expand_omp (void)
6837 return (flag_openmp != 0 && !seen_error ());
6840 namespace {
6842 const pass_data pass_data_expand_omp =
6844 GIMPLE_PASS, /* type */
6845 "ompexp", /* name */
6846 OPTGROUP_NONE, /* optinfo_flags */
6847 true, /* has_gate */
6848 true, /* has_execute */
6849 TV_NONE, /* tv_id */
6850 PROP_gimple_any, /* properties_required */
6851 0, /* properties_provided */
6852 0, /* properties_destroyed */
6853 0, /* todo_flags_start */
6854 0, /* todo_flags_finish */
6857 class pass_expand_omp : public gimple_opt_pass
6859 public:
6860 pass_expand_omp(gcc::context *ctxt)
6861 : gimple_opt_pass(pass_data_expand_omp, ctxt)
6864 /* opt_pass methods: */
6865 bool gate () { return gate_expand_omp (); }
6866 unsigned int execute () { return execute_expand_omp (); }
6868 }; // class pass_expand_omp
6870 } // anon namespace
6872 gimple_opt_pass *
6873 make_pass_expand_omp (gcc::context *ctxt)
6875 return new pass_expand_omp (ctxt);
6878 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
6880 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6881 CTX is the enclosing OMP context for the current statement. */
6883 static void
6884 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6886 tree block, control;
6887 gimple_stmt_iterator tgsi;
6888 gimple stmt, new_stmt, bind, t;
6889 gimple_seq ilist, dlist, olist, new_body;
6890 struct gimplify_ctx gctx;
6892 stmt = gsi_stmt (*gsi_p);
6894 push_gimplify_context (&gctx);
6896 dlist = NULL;
6897 ilist = NULL;
6898 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6899 &ilist, &dlist, ctx);
6901 new_body = gimple_omp_body (stmt);
6902 gimple_omp_set_body (stmt, NULL);
6903 tgsi = gsi_start (new_body);
6904 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
6906 omp_context *sctx;
6907 gimple sec_start;
6909 sec_start = gsi_stmt (tgsi);
6910 sctx = maybe_lookup_ctx (sec_start);
6911 gcc_assert (sctx);
6913 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6914 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6915 GSI_CONTINUE_LINKING);
6916 gimple_omp_set_body (sec_start, NULL);
6918 if (gsi_one_before_end_p (tgsi))
6920 gimple_seq l = NULL;
6921 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
6922 &l, ctx);
6923 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
6924 gimple_omp_section_set_last (sec_start);
6927 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6928 GSI_CONTINUE_LINKING);
6931 block = make_node (BLOCK);
6932 bind = gimple_build_bind (NULL, new_body, block);
6934 olist = NULL;
6935 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
6937 block = make_node (BLOCK);
6938 new_stmt = gimple_build_bind (NULL, NULL, block);
6939 gsi_replace (gsi_p, new_stmt, true);
6941 pop_gimplify_context (new_stmt);
6942 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6943 BLOCK_VARS (block) = gimple_bind_vars (bind);
6944 if (BLOCK_VARS (block))
6945 TREE_USED (block) = 1;
6947 new_body = NULL;
6948 gimple_seq_add_seq (&new_body, ilist);
6949 gimple_seq_add_stmt (&new_body, stmt);
6950 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6951 gimple_seq_add_stmt (&new_body, bind);
6953 control = create_tmp_var (unsigned_type_node, ".section");
6954 t = gimple_build_omp_continue (control, control);
6955 gimple_omp_sections_set_control (stmt, control);
6956 gimple_seq_add_stmt (&new_body, t);
6958 gimple_seq_add_seq (&new_body, olist);
6959 gimple_seq_add_seq (&new_body, dlist);
6961 new_body = maybe_catch_exception (new_body);
6963 t = gimple_build_omp_return
6964 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6965 OMP_CLAUSE_NOWAIT));
6966 gimple_seq_add_stmt (&new_body, t);
6968 gimple_bind_set_body (new_stmt, new_body);
6972 /* A subroutine of lower_omp_single. Expand the simple form of
6973 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6975 if (GOMP_single_start ())
6976 BODY;
6977 [ GOMP_barrier (); ] -> unless 'nowait' is present.
6979 FIXME. It may be better to delay expanding the logic of this until
6980 pass_expand_omp. The expanded logic may make the job more difficult
6981 to a synchronization analysis pass. */
6983 static void
6984 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
6986 location_t loc = gimple_location (single_stmt);
6987 tree tlabel = create_artificial_label (loc);
6988 tree flabel = create_artificial_label (loc);
6989 gimple call, cond;
6990 tree lhs, decl;
6992 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
6993 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6994 call = gimple_build_call (decl, 0);
6995 gimple_call_set_lhs (call, lhs);
6996 gimple_seq_add_stmt (pre_p, call);
6998 cond = gimple_build_cond (EQ_EXPR, lhs,
6999 fold_convert_loc (loc, TREE_TYPE (lhs),
7000 boolean_true_node),
7001 tlabel, flabel);
7002 gimple_seq_add_stmt (pre_p, cond);
7003 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
7004 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7005 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
7009 /* A subroutine of lower_omp_single. Expand the simple form of
7010 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
7012 #pragma omp single copyprivate (a, b, c)
7014 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
7017 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
7019 BODY;
7020 copyout.a = a;
7021 copyout.b = b;
7022 copyout.c = c;
7023 GOMP_single_copy_end (&copyout);
7025 else
7027 a = copyout_p->a;
7028 b = copyout_p->b;
7029 c = copyout_p->c;
7031 GOMP_barrier ();
7034 FIXME. It may be better to delay expanding the logic of this until
7035 pass_expand_omp. The expanded logic may make the job more difficult
7036 to a synchronization analysis pass. */
7038 static void
7039 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
7041 tree ptr_type, t, l0, l1, l2, bfn_decl;
7042 gimple_seq copyin_seq;
7043 location_t loc = gimple_location (single_stmt);
7045 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
7047 ptr_type = build_pointer_type (ctx->record_type);
7048 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
7050 l0 = create_artificial_label (loc);
7051 l1 = create_artificial_label (loc);
7052 l2 = create_artificial_label (loc);
7054 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
7055 t = build_call_expr_loc (loc, bfn_decl, 0);
7056 t = fold_convert_loc (loc, ptr_type, t);
7057 gimplify_assign (ctx->receiver_decl, t, pre_p);
7059 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
7060 build_int_cst (ptr_type, 0));
7061 t = build3 (COND_EXPR, void_type_node, t,
7062 build_and_jump (&l0), build_and_jump (&l1));
7063 gimplify_and_add (t, pre_p);
7065 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
7067 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7069 copyin_seq = NULL;
7070 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
7071 &copyin_seq, ctx);
7073 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7074 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
7075 t = build_call_expr_loc (loc, bfn_decl, 1, t);
7076 gimplify_and_add (t, pre_p);
7078 t = build_and_jump (&l2);
7079 gimplify_and_add (t, pre_p);
7081 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
7083 gimple_seq_add_seq (pre_p, copyin_seq);
7085 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
7089 /* Expand code for an OpenMP single directive. */
7091 static void
7092 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7094 tree block;
7095 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
7096 gimple_seq bind_body, dlist;
7097 struct gimplify_ctx gctx;
7099 push_gimplify_context (&gctx);
7101 block = make_node (BLOCK);
7102 bind = gimple_build_bind (NULL, NULL, block);
7103 gsi_replace (gsi_p, bind, true);
7104 bind_body = NULL;
7105 dlist = NULL;
7106 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
7107 &bind_body, &dlist, ctx);
7108 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
7110 gimple_seq_add_stmt (&bind_body, single_stmt);
7112 if (ctx->record_type)
7113 lower_omp_single_copy (single_stmt, &bind_body, ctx);
7114 else
7115 lower_omp_single_simple (single_stmt, &bind_body);
7117 gimple_omp_set_body (single_stmt, NULL);
7119 gimple_seq_add_seq (&bind_body, dlist);
7121 bind_body = maybe_catch_exception (bind_body);
7123 t = gimple_build_omp_return
7124 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
7125 OMP_CLAUSE_NOWAIT));
7126 gimple_seq_add_stmt (&bind_body, t);
7127 gimple_bind_set_body (bind, bind_body);
7129 pop_gimplify_context (bind);
7131 gimple_bind_append_vars (bind, ctx->block_vars);
7132 BLOCK_VARS (block) = ctx->block_vars;
7133 if (BLOCK_VARS (block))
7134 TREE_USED (block) = 1;
7138 /* Expand code for an OpenMP master directive. */
7140 static void
7141 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7143 tree block, lab = NULL, x, bfn_decl;
7144 gimple stmt = gsi_stmt (*gsi_p), bind;
7145 location_t loc = gimple_location (stmt);
7146 gimple_seq tseq;
7147 struct gimplify_ctx gctx;
7149 push_gimplify_context (&gctx);
7151 block = make_node (BLOCK);
7152 bind = gimple_build_bind (NULL, NULL, block);
7153 gsi_replace (gsi_p, bind, true);
7154 gimple_bind_add_stmt (bind, stmt);
7156 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7157 x = build_call_expr_loc (loc, bfn_decl, 0);
7158 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
7159 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
7160 tseq = NULL;
7161 gimplify_and_add (x, &tseq);
7162 gimple_bind_add_seq (bind, tseq);
7164 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7165 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7166 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7167 gimple_omp_set_body (stmt, NULL);
7169 gimple_bind_add_stmt (bind, gimple_build_label (lab));
7171 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7173 pop_gimplify_context (bind);
7175 gimple_bind_append_vars (bind, ctx->block_vars);
7176 BLOCK_VARS (block) = ctx->block_vars;
7180 /* Expand code for an OpenMP ordered directive. */
7182 static void
7183 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7185 tree block;
7186 gimple stmt = gsi_stmt (*gsi_p), bind, x;
7187 struct gimplify_ctx gctx;
7189 push_gimplify_context (&gctx);
7191 block = make_node (BLOCK);
7192 bind = gimple_build_bind (NULL, NULL, block);
7193 gsi_replace (gsi_p, bind, true);
7194 gimple_bind_add_stmt (bind, stmt);
7196 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
7198 gimple_bind_add_stmt (bind, x);
7200 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7201 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7202 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7203 gimple_omp_set_body (stmt, NULL);
7205 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
7206 gimple_bind_add_stmt (bind, x);
7208 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7210 pop_gimplify_context (bind);
7212 gimple_bind_append_vars (bind, ctx->block_vars);
7213 BLOCK_VARS (block) = gimple_bind_vars (bind);
7217 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
7218 substitution of a couple of function calls. But in the NAMED case,
7219 requires that languages coordinate a symbol name. It is therefore
7220 best put here in common code. */
7222 static GTY((param1_is (tree), param2_is (tree)))
7223 splay_tree critical_name_mutexes;
7225 static void
7226 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7228 tree block;
7229 tree name, lock, unlock;
7230 gimple stmt = gsi_stmt (*gsi_p), bind;
7231 location_t loc = gimple_location (stmt);
7232 gimple_seq tbody;
7233 struct gimplify_ctx gctx;
7235 name = gimple_omp_critical_name (stmt);
7236 if (name)
7238 tree decl;
7239 splay_tree_node n;
7241 if (!critical_name_mutexes)
7242 critical_name_mutexes
7243 = splay_tree_new_ggc (splay_tree_compare_pointers,
7244 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
7245 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
7247 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
7248 if (n == NULL)
7250 char *new_str;
7252 decl = create_tmp_var_raw (ptr_type_node, NULL);
7254 new_str = ACONCAT ((".gomp_critical_user_",
7255 IDENTIFIER_POINTER (name), NULL));
7256 DECL_NAME (decl) = get_identifier (new_str);
7257 TREE_PUBLIC (decl) = 1;
7258 TREE_STATIC (decl) = 1;
7259 DECL_COMMON (decl) = 1;
7260 DECL_ARTIFICIAL (decl) = 1;
7261 DECL_IGNORED_P (decl) = 1;
7262 varpool_finalize_decl (decl);
7264 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
7265 (splay_tree_value) decl);
7267 else
7268 decl = (tree) n->value;
7270 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
7271 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
7273 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
7274 unlock = build_call_expr_loc (loc, unlock, 1,
7275 build_fold_addr_expr_loc (loc, decl));
7277 else
7279 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
7280 lock = build_call_expr_loc (loc, lock, 0);
7282 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
7283 unlock = build_call_expr_loc (loc, unlock, 0);
7286 push_gimplify_context (&gctx);
7288 block = make_node (BLOCK);
7289 bind = gimple_build_bind (NULL, NULL, block);
7290 gsi_replace (gsi_p, bind, true);
7291 gimple_bind_add_stmt (bind, stmt);
7293 tbody = gimple_bind_body (bind);
7294 gimplify_and_add (lock, &tbody);
7295 gimple_bind_set_body (bind, tbody);
7297 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7298 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7299 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7300 gimple_omp_set_body (stmt, NULL);
7302 tbody = gimple_bind_body (bind);
7303 gimplify_and_add (unlock, &tbody);
7304 gimple_bind_set_body (bind, tbody);
7306 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7308 pop_gimplify_context (bind);
7309 gimple_bind_append_vars (bind, ctx->block_vars);
7310 BLOCK_VARS (block) = gimple_bind_vars (bind);
7314 /* A subroutine of lower_omp_for. Generate code to emit the predicate
7315 for a lastprivate clause. Given a loop control predicate of (V
7316 cond N2), we gate the clause on (!(V cond N2)). The lowered form
7317 is appended to *DLIST, iterator initialization is appended to
7318 *BODY_P. */
7320 static void
7321 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
7322 gimple_seq *dlist, struct omp_context *ctx)
7324 tree clauses, cond, vinit;
7325 enum tree_code cond_code;
7326 gimple_seq stmts;
7328 cond_code = fd->loop.cond_code;
7329 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
7331 /* When possible, use a strict equality expression. This can let VRP
7332 type optimizations deduce the value and remove a copy. */
7333 if (host_integerp (fd->loop.step, 0))
7335 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
7336 if (step == 1 || step == -1)
7337 cond_code = EQ_EXPR;
7340 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
7342 clauses = gimple_omp_for_clauses (fd->for_stmt);
7343 stmts = NULL;
7344 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
7345 if (!gimple_seq_empty_p (stmts))
7347 gimple_seq_add_seq (&stmts, *dlist);
7348 *dlist = stmts;
7350 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
7351 vinit = fd->loop.n1;
7352 if (cond_code == EQ_EXPR
7353 && host_integerp (fd->loop.n2, 0)
7354 && ! integer_zerop (fd->loop.n2))
7355 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
7356 else
7357 vinit = unshare_expr (vinit);
7359 /* Initialize the iterator variable, so that threads that don't execute
7360 any iterations don't execute the lastprivate clauses by accident. */
7361 gimplify_assign (fd->loop.v, vinit, body_p);
7366 /* Lower code for an OpenMP loop directive. */
7368 static void
7369 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7371 tree *rhs_p, block;
7372 struct omp_for_data fd;
7373 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
7374 gimple_seq omp_for_body, body, dlist;
7375 size_t i;
7376 struct gimplify_ctx gctx;
7378 push_gimplify_context (&gctx);
7380 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
7382 block = make_node (BLOCK);
7383 new_stmt = gimple_build_bind (NULL, NULL, block);
7384 /* Replace at gsi right away, so that 'stmt' is no member
7385 of a sequence anymore as we're going to add to to a different
7386 one below. */
7387 gsi_replace (gsi_p, new_stmt, true);
7389 /* Move declaration of temporaries in the loop body before we make
7390 it go away. */
7391 omp_for_body = gimple_omp_body (stmt);
7392 if (!gimple_seq_empty_p (omp_for_body)
7393 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
7395 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
7396 gimple_bind_append_vars (new_stmt, vars);
7399 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
7400 dlist = NULL;
7401 body = NULL;
7402 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
7403 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
7405 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7407 /* Lower the header expressions. At this point, we can assume that
7408 the header is of the form:
7410 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
7412 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
7413 using the .omp_data_s mapping, if needed. */
7414 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
7416 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
7417 if (!is_gimple_min_invariant (*rhs_p))
7418 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7420 rhs_p = gimple_omp_for_final_ptr (stmt, i);
7421 if (!is_gimple_min_invariant (*rhs_p))
7422 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7424 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
7425 if (!is_gimple_min_invariant (*rhs_p))
7426 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7429 /* Once lowered, extract the bounds and clauses. */
7430 extract_omp_for_data (stmt, &fd, NULL);
7432 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
7434 gimple_seq_add_stmt (&body, stmt);
7435 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
7437 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
7438 fd.loop.v));
7440 /* After the loop, add exit clauses. */
7441 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
7442 gimple_seq_add_seq (&body, dlist);
7444 body = maybe_catch_exception (body);
7446 /* Region exit marker goes at the end of the loop body. */
7447 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
7449 pop_gimplify_context (new_stmt);
7451 gimple_bind_append_vars (new_stmt, ctx->block_vars);
7452 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
7453 if (BLOCK_VARS (block))
7454 TREE_USED (block) = 1;
7456 gimple_bind_set_body (new_stmt, body);
7457 gimple_omp_set_body (stmt, NULL);
7458 gimple_omp_for_set_pre_body (stmt, NULL);
7461 /* Callback for walk_stmts. Check if the current statement only contains
7462 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
7464 static tree
7465 check_combined_parallel (gimple_stmt_iterator *gsi_p,
7466 bool *handled_ops_p,
7467 struct walk_stmt_info *wi)
7469 int *info = (int *) wi->info;
7470 gimple stmt = gsi_stmt (*gsi_p);
7472 *handled_ops_p = true;
7473 switch (gimple_code (stmt))
7475 WALK_SUBSTMTS;
7477 case GIMPLE_OMP_FOR:
7478 case GIMPLE_OMP_SECTIONS:
7479 *info = *info == 0 ? 1 : -1;
7480 break;
7481 default:
7482 *info = -1;
7483 break;
7485 return NULL;
7488 struct omp_taskcopy_context
7490 /* This field must be at the beginning, as we do "inheritance": Some
7491 callback functions for tree-inline.c (e.g., omp_copy_decl)
7492 receive a copy_body_data pointer that is up-casted to an
7493 omp_context pointer. */
7494 copy_body_data cb;
7495 omp_context *ctx;
7498 static tree
7499 task_copyfn_copy_decl (tree var, copy_body_data *cb)
7501 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
7503 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
7504 return create_tmp_var (TREE_TYPE (var), NULL);
7506 return var;
7509 static tree
7510 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
7512 tree name, new_fields = NULL, type, f;
7514 type = lang_hooks.types.make_type (RECORD_TYPE);
7515 name = DECL_NAME (TYPE_NAME (orig_type));
7516 name = build_decl (gimple_location (tcctx->ctx->stmt),
7517 TYPE_DECL, name, type);
7518 TYPE_NAME (type) = name;
7520 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
7522 tree new_f = copy_node (f);
7523 DECL_CONTEXT (new_f) = type;
7524 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
7525 TREE_CHAIN (new_f) = new_fields;
7526 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7527 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7528 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
7529 &tcctx->cb, NULL);
7530 new_fields = new_f;
7531 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
7533 TYPE_FIELDS (type) = nreverse (new_fields);
7534 layout_type (type);
7535 return type;
7538 /* Create task copyfn. */
7540 static void
7541 create_task_copyfn (gimple task_stmt, omp_context *ctx)
7543 struct function *child_cfun;
7544 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
7545 tree record_type, srecord_type, bind, list;
7546 bool record_needs_remap = false, srecord_needs_remap = false;
7547 splay_tree_node n;
7548 struct omp_taskcopy_context tcctx;
7549 struct gimplify_ctx gctx;
7550 location_t loc = gimple_location (task_stmt);
7552 child_fn = gimple_omp_task_copy_fn (task_stmt);
7553 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7554 gcc_assert (child_cfun->cfg == NULL);
7555 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
7557 /* Reset DECL_CONTEXT on function arguments. */
7558 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7559 DECL_CONTEXT (t) = child_fn;
7561 /* Populate the function. */
7562 push_gimplify_context (&gctx);
7563 push_cfun (child_cfun);
7565 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
7566 TREE_SIDE_EFFECTS (bind) = 1;
7567 list = NULL;
7568 DECL_SAVED_TREE (child_fn) = bind;
7569 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
7571 /* Remap src and dst argument types if needed. */
7572 record_type = ctx->record_type;
7573 srecord_type = ctx->srecord_type;
7574 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
7575 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7577 record_needs_remap = true;
7578 break;
7580 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
7581 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7583 srecord_needs_remap = true;
7584 break;
7587 if (record_needs_remap || srecord_needs_remap)
7589 memset (&tcctx, '\0', sizeof (tcctx));
7590 tcctx.cb.src_fn = ctx->cb.src_fn;
7591 tcctx.cb.dst_fn = child_fn;
7592 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
7593 gcc_checking_assert (tcctx.cb.src_node);
7594 tcctx.cb.dst_node = tcctx.cb.src_node;
7595 tcctx.cb.src_cfun = ctx->cb.src_cfun;
7596 tcctx.cb.copy_decl = task_copyfn_copy_decl;
7597 tcctx.cb.eh_lp_nr = 0;
7598 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
7599 tcctx.cb.decl_map = pointer_map_create ();
7600 tcctx.ctx = ctx;
7602 if (record_needs_remap)
7603 record_type = task_copyfn_remap_type (&tcctx, record_type);
7604 if (srecord_needs_remap)
7605 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
7607 else
7608 tcctx.cb.decl_map = NULL;
7610 arg = DECL_ARGUMENTS (child_fn);
7611 TREE_TYPE (arg) = build_pointer_type (record_type);
7612 sarg = DECL_CHAIN (arg);
7613 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
7615 /* First pass: initialize temporaries used in record_type and srecord_type
7616 sizes and field offsets. */
7617 if (tcctx.cb.decl_map)
7618 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7619 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7621 tree *p;
7623 decl = OMP_CLAUSE_DECL (c);
7624 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
7625 if (p == NULL)
7626 continue;
7627 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7628 sf = (tree) n->value;
7629 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7630 src = build_simple_mem_ref_loc (loc, sarg);
7631 src = omp_build_component_ref (src, sf);
7632 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
7633 append_to_statement_list (t, &list);
7636 /* Second pass: copy shared var pointers and copy construct non-VLA
7637 firstprivate vars. */
7638 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7639 switch (OMP_CLAUSE_CODE (c))
7641 case OMP_CLAUSE_SHARED:
7642 decl = OMP_CLAUSE_DECL (c);
7643 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7644 if (n == NULL)
7645 break;
7646 f = (tree) n->value;
7647 if (tcctx.cb.decl_map)
7648 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7649 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7650 sf = (tree) n->value;
7651 if (tcctx.cb.decl_map)
7652 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7653 src = build_simple_mem_ref_loc (loc, sarg);
7654 src = omp_build_component_ref (src, sf);
7655 dst = build_simple_mem_ref_loc (loc, arg);
7656 dst = omp_build_component_ref (dst, f);
7657 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7658 append_to_statement_list (t, &list);
7659 break;
7660 case OMP_CLAUSE_FIRSTPRIVATE:
7661 decl = OMP_CLAUSE_DECL (c);
7662 if (is_variable_sized (decl))
7663 break;
7664 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7665 if (n == NULL)
7666 break;
7667 f = (tree) n->value;
7668 if (tcctx.cb.decl_map)
7669 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7670 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7671 if (n != NULL)
7673 sf = (tree) n->value;
7674 if (tcctx.cb.decl_map)
7675 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7676 src = build_simple_mem_ref_loc (loc, sarg);
7677 src = omp_build_component_ref (src, sf);
7678 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
7679 src = build_simple_mem_ref_loc (loc, src);
7681 else
7682 src = decl;
7683 dst = build_simple_mem_ref_loc (loc, arg);
7684 dst = omp_build_component_ref (dst, f);
7685 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7686 append_to_statement_list (t, &list);
7687 break;
7688 case OMP_CLAUSE_PRIVATE:
7689 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
7690 break;
7691 decl = OMP_CLAUSE_DECL (c);
7692 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7693 f = (tree) n->value;
7694 if (tcctx.cb.decl_map)
7695 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7696 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7697 if (n != NULL)
7699 sf = (tree) n->value;
7700 if (tcctx.cb.decl_map)
7701 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7702 src = build_simple_mem_ref_loc (loc, sarg);
7703 src = omp_build_component_ref (src, sf);
7704 if (use_pointer_for_field (decl, NULL))
7705 src = build_simple_mem_ref_loc (loc, src);
7707 else
7708 src = decl;
7709 dst = build_simple_mem_ref_loc (loc, arg);
7710 dst = omp_build_component_ref (dst, f);
7711 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7712 append_to_statement_list (t, &list);
7713 break;
7714 default:
7715 break;
7718 /* Last pass: handle VLA firstprivates. */
7719 if (tcctx.cb.decl_map)
7720 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7721 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7723 tree ind, ptr, df;
7725 decl = OMP_CLAUSE_DECL (c);
7726 if (!is_variable_sized (decl))
7727 continue;
7728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7729 if (n == NULL)
7730 continue;
7731 f = (tree) n->value;
7732 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7733 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
7734 ind = DECL_VALUE_EXPR (decl);
7735 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
7736 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
7737 n = splay_tree_lookup (ctx->sfield_map,
7738 (splay_tree_key) TREE_OPERAND (ind, 0));
7739 sf = (tree) n->value;
7740 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7741 src = build_simple_mem_ref_loc (loc, sarg);
7742 src = omp_build_component_ref (src, sf);
7743 src = build_simple_mem_ref_loc (loc, src);
7744 dst = build_simple_mem_ref_loc (loc, arg);
7745 dst = omp_build_component_ref (dst, f);
7746 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7747 append_to_statement_list (t, &list);
7748 n = splay_tree_lookup (ctx->field_map,
7749 (splay_tree_key) TREE_OPERAND (ind, 0));
7750 df = (tree) n->value;
7751 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
7752 ptr = build_simple_mem_ref_loc (loc, arg);
7753 ptr = omp_build_component_ref (ptr, df);
7754 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
7755 build_fold_addr_expr_loc (loc, dst));
7756 append_to_statement_list (t, &list);
7759 t = build1 (RETURN_EXPR, void_type_node, NULL);
7760 append_to_statement_list (t, &list);
7762 if (tcctx.cb.decl_map)
7763 pointer_map_destroy (tcctx.cb.decl_map);
7764 pop_gimplify_context (NULL);
7765 BIND_EXPR_BODY (bind) = list;
7766 pop_cfun ();
7769 /* Lower the OpenMP parallel or task directive in the current statement
7770 in GSI_P. CTX holds context information for the directive. */
7772 static void
7773 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7775 tree clauses;
7776 tree child_fn, t;
7777 gimple stmt = gsi_stmt (*gsi_p);
7778 gimple par_bind, bind;
7779 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
7780 struct gimplify_ctx gctx;
7781 location_t loc = gimple_location (stmt);
7783 clauses = gimple_omp_taskreg_clauses (stmt);
7784 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
7785 par_body = gimple_bind_body (par_bind);
7786 child_fn = ctx->cb.dst_fn;
7787 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7788 && !gimple_omp_parallel_combined_p (stmt))
7790 struct walk_stmt_info wi;
7791 int ws_num = 0;
7793 memset (&wi, 0, sizeof (wi));
7794 wi.info = &ws_num;
7795 wi.val_only = true;
7796 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
7797 if (ws_num == 1)
7798 gimple_omp_parallel_set_combined_p (stmt, true);
7800 if (ctx->srecord_type)
7801 create_task_copyfn (stmt, ctx);
7803 push_gimplify_context (&gctx);
7805 par_olist = NULL;
7806 par_ilist = NULL;
7807 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
7808 lower_omp (&par_body, ctx);
7809 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
7810 lower_reduction_clauses (clauses, &par_olist, ctx);
7812 /* Declare all the variables created by mapping and the variables
7813 declared in the scope of the parallel body. */
7814 record_vars_into (ctx->block_vars, child_fn);
7815 record_vars_into (gimple_bind_vars (par_bind), child_fn);
7817 if (ctx->record_type)
7819 ctx->sender_decl
7820 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
7821 : ctx->record_type, ".omp_data_o");
7822 DECL_NAMELESS (ctx->sender_decl) = 1;
7823 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
7824 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
7827 olist = NULL;
7828 ilist = NULL;
7829 lower_send_clauses (clauses, &ilist, &olist, ctx);
7830 lower_send_shared_vars (&ilist, &olist, ctx);
7832 /* Once all the expansions are done, sequence all the different
7833 fragments inside gimple_omp_body. */
7835 new_body = NULL;
7837 if (ctx->record_type)
7839 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7840 /* fixup_child_record_type might have changed receiver_decl's type. */
7841 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
7842 gimple_seq_add_stmt (&new_body,
7843 gimple_build_assign (ctx->receiver_decl, t));
7846 gimple_seq_add_seq (&new_body, par_ilist);
7847 gimple_seq_add_seq (&new_body, par_body);
7848 gimple_seq_add_seq (&new_body, par_olist);
7849 new_body = maybe_catch_exception (new_body);
7850 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7851 gimple_omp_set_body (stmt, new_body);
7853 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
7854 gsi_replace (gsi_p, bind, true);
7855 gimple_bind_add_seq (bind, ilist);
7856 gimple_bind_add_stmt (bind, stmt);
7857 gimple_bind_add_seq (bind, olist);
7859 pop_gimplify_context (NULL);
7862 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
7863 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7864 of OpenMP context, but with task_shared_vars set. */
7866 static tree
7867 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7868 void *data)
7870 tree t = *tp;
7872 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7873 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
7874 return t;
7876 if (task_shared_vars
7877 && DECL_P (t)
7878 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
7879 return t;
7881 /* If a global variable has been privatized, TREE_CONSTANT on
7882 ADDR_EXPR might be wrong. */
7883 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
7884 recompute_tree_invariant_for_addr_expr (t);
7886 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7887 return NULL_TREE;
7890 static void
7891 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7893 gimple stmt = gsi_stmt (*gsi_p);
7894 struct walk_stmt_info wi;
7896 if (gimple_has_location (stmt))
7897 input_location = gimple_location (stmt);
7899 if (task_shared_vars)
7900 memset (&wi, '\0', sizeof (wi));
7902 /* If we have issued syntax errors, avoid doing any heavy lifting.
7903 Just replace the OpenMP directives with a NOP to avoid
7904 confusing RTL expansion. */
7905 if (seen_error () && is_gimple_omp (stmt))
7907 gsi_replace (gsi_p, gimple_build_nop (), true);
7908 return;
7911 switch (gimple_code (stmt))
7913 case GIMPLE_COND:
7914 if ((ctx || task_shared_vars)
7915 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7916 ctx ? NULL : &wi, NULL)
7917 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7918 ctx ? NULL : &wi, NULL)))
7919 gimple_regimplify_operands (stmt, gsi_p);
7920 break;
7921 case GIMPLE_CATCH:
7922 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
7923 break;
7924 case GIMPLE_EH_FILTER:
7925 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
7926 break;
7927 case GIMPLE_TRY:
7928 lower_omp (gimple_try_eval_ptr (stmt), ctx);
7929 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
7930 break;
7931 case GIMPLE_TRANSACTION:
7932 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7933 break;
7934 case GIMPLE_BIND:
7935 lower_omp (gimple_bind_body_ptr (stmt), ctx);
7936 break;
7937 case GIMPLE_OMP_PARALLEL:
7938 case GIMPLE_OMP_TASK:
7939 ctx = maybe_lookup_ctx (stmt);
7940 lower_omp_taskreg (gsi_p, ctx);
7941 break;
7942 case GIMPLE_OMP_FOR:
7943 ctx = maybe_lookup_ctx (stmt);
7944 gcc_assert (ctx);
7945 lower_omp_for (gsi_p, ctx);
7946 break;
7947 case GIMPLE_OMP_SECTIONS:
7948 ctx = maybe_lookup_ctx (stmt);
7949 gcc_assert (ctx);
7950 lower_omp_sections (gsi_p, ctx);
7951 break;
7952 case GIMPLE_OMP_SINGLE:
7953 ctx = maybe_lookup_ctx (stmt);
7954 gcc_assert (ctx);
7955 lower_omp_single (gsi_p, ctx);
7956 break;
7957 case GIMPLE_OMP_MASTER:
7958 ctx = maybe_lookup_ctx (stmt);
7959 gcc_assert (ctx);
7960 lower_omp_master (gsi_p, ctx);
7961 break;
7962 case GIMPLE_OMP_ORDERED:
7963 ctx = maybe_lookup_ctx (stmt);
7964 gcc_assert (ctx);
7965 lower_omp_ordered (gsi_p, ctx);
7966 break;
7967 case GIMPLE_OMP_CRITICAL:
7968 ctx = maybe_lookup_ctx (stmt);
7969 gcc_assert (ctx);
7970 lower_omp_critical (gsi_p, ctx);
7971 break;
7972 case GIMPLE_OMP_ATOMIC_LOAD:
7973 if ((ctx || task_shared_vars)
7974 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7975 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7976 gimple_regimplify_operands (stmt, gsi_p);
7977 break;
7978 default:
7979 if ((ctx || task_shared_vars)
7980 && walk_gimple_op (stmt, lower_omp_regimplify_p,
7981 ctx ? NULL : &wi))
7982 gimple_regimplify_operands (stmt, gsi_p);
7983 break;
7987 static void
7988 lower_omp (gimple_seq *body, omp_context *ctx)
7990 location_t saved_location = input_location;
7991 gimple_stmt_iterator gsi;
7992 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
7993 lower_omp_1 (&gsi, ctx);
7994 input_location = saved_location;
7997 /* Main entry point. */
7999 static unsigned int
8000 execute_lower_omp (void)
8002 gimple_seq body;
8004 /* This pass always runs, to provide PROP_gimple_lomp.
8005 But there is nothing to do unless -fopenmp is given. */
8006 if (flag_openmp == 0)
8007 return 0;
8009 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
8010 delete_omp_context);
8012 body = gimple_body (current_function_decl);
8013 scan_omp (&body, NULL);
8014 gcc_assert (taskreg_nesting_level == 0);
8016 if (all_contexts->root)
8018 struct gimplify_ctx gctx;
8020 if (task_shared_vars)
8021 push_gimplify_context (&gctx);
8022 lower_omp (&body, NULL);
8023 if (task_shared_vars)
8024 pop_gimplify_context (NULL);
8027 if (all_contexts)
8029 splay_tree_delete (all_contexts);
8030 all_contexts = NULL;
8032 BITMAP_FREE (task_shared_vars);
8033 return 0;
8036 namespace {
8038 const pass_data pass_data_lower_omp =
8040 GIMPLE_PASS, /* type */
8041 "omplower", /* name */
8042 OPTGROUP_NONE, /* optinfo_flags */
8043 false, /* has_gate */
8044 true, /* has_execute */
8045 TV_NONE, /* tv_id */
8046 PROP_gimple_any, /* properties_required */
8047 PROP_gimple_lomp, /* properties_provided */
8048 0, /* properties_destroyed */
8049 0, /* todo_flags_start */
8050 0, /* todo_flags_finish */
8053 class pass_lower_omp : public gimple_opt_pass
8055 public:
8056 pass_lower_omp(gcc::context *ctxt)
8057 : gimple_opt_pass(pass_data_lower_omp, ctxt)
8060 /* opt_pass methods: */
8061 unsigned int execute () { return execute_lower_omp (); }
8063 }; // class pass_lower_omp
8065 } // anon namespace
8067 gimple_opt_pass *
8068 make_pass_lower_omp (gcc::context *ctxt)
8070 return new pass_lower_omp (ctxt);
8073 /* The following is a utility to diagnose OpenMP structured block violations.
8074 It is not part of the "omplower" pass, as that's invoked too late. It
8075 should be invoked by the respective front ends after gimplification. */
8077 static splay_tree all_labels;
8079 /* Check for mismatched contexts and generate an error if needed. Return
8080 true if an error is detected. */
8082 static bool
8083 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
8084 gimple branch_ctx, gimple label_ctx)
8086 if (label_ctx == branch_ctx)
8087 return false;
8091 Previously we kept track of the label's entire context in diagnose_sb_[12]
8092 so we could traverse it and issue a correct "exit" or "enter" error
8093 message upon a structured block violation.
8095 We built the context by building a list with tree_cons'ing, but there is
8096 no easy counterpart in gimple tuples. It seems like far too much work
8097 for issuing exit/enter error messages. If someone really misses the
8098 distinct error message... patches welcome.
8101 #if 0
8102 /* Try to avoid confusing the user by producing and error message
8103 with correct "exit" or "enter" verbiage. We prefer "exit"
8104 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
8105 if (branch_ctx == NULL)
8106 exit_p = false;
8107 else
8109 while (label_ctx)
8111 if (TREE_VALUE (label_ctx) == branch_ctx)
8113 exit_p = false;
8114 break;
8116 label_ctx = TREE_CHAIN (label_ctx);
8120 if (exit_p)
8121 error ("invalid exit from OpenMP structured block");
8122 else
8123 error ("invalid entry to OpenMP structured block");
8124 #endif
8126 /* If it's obvious we have an invalid entry, be specific about the error. */
8127 if (branch_ctx == NULL)
8128 error ("invalid entry to OpenMP structured block");
8129 else
8130 /* Otherwise, be vague and lazy, but efficient. */
8131 error ("invalid branch to/from an OpenMP structured block");
8133 gsi_replace (gsi_p, gimple_build_nop (), false);
8134 return true;
8137 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
8138 where each label is found. */
8140 static tree
8141 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8142 struct walk_stmt_info *wi)
8144 gimple context = (gimple) wi->info;
8145 gimple inner_context;
8146 gimple stmt = gsi_stmt (*gsi_p);
8148 *handled_ops_p = true;
8150 switch (gimple_code (stmt))
8152 WALK_SUBSTMTS;
8154 case GIMPLE_OMP_PARALLEL:
8155 case GIMPLE_OMP_TASK:
8156 case GIMPLE_OMP_SECTIONS:
8157 case GIMPLE_OMP_SINGLE:
8158 case GIMPLE_OMP_SECTION:
8159 case GIMPLE_OMP_MASTER:
8160 case GIMPLE_OMP_ORDERED:
8161 case GIMPLE_OMP_CRITICAL:
8162 /* The minimal context here is just the current OMP construct. */
8163 inner_context = stmt;
8164 wi->info = inner_context;
8165 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8166 wi->info = context;
8167 break;
8169 case GIMPLE_OMP_FOR:
8170 inner_context = stmt;
8171 wi->info = inner_context;
8172 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8173 walk them. */
8174 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
8175 diagnose_sb_1, NULL, wi);
8176 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8177 wi->info = context;
8178 break;
8180 case GIMPLE_LABEL:
8181 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
8182 (splay_tree_value) context);
8183 break;
8185 default:
8186 break;
8189 return NULL_TREE;
8192 /* Pass 2: Check each branch and see if its context differs from that of
8193 the destination label's context. */
8195 static tree
8196 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8197 struct walk_stmt_info *wi)
8199 gimple context = (gimple) wi->info;
8200 splay_tree_node n;
8201 gimple stmt = gsi_stmt (*gsi_p);
8203 *handled_ops_p = true;
8205 switch (gimple_code (stmt))
8207 WALK_SUBSTMTS;
8209 case GIMPLE_OMP_PARALLEL:
8210 case GIMPLE_OMP_TASK:
8211 case GIMPLE_OMP_SECTIONS:
8212 case GIMPLE_OMP_SINGLE:
8213 case GIMPLE_OMP_SECTION:
8214 case GIMPLE_OMP_MASTER:
8215 case GIMPLE_OMP_ORDERED:
8216 case GIMPLE_OMP_CRITICAL:
8217 wi->info = stmt;
8218 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8219 wi->info = context;
8220 break;
8222 case GIMPLE_OMP_FOR:
8223 wi->info = stmt;
8224 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8225 walk them. */
8226 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
8227 diagnose_sb_2, NULL, wi);
8228 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8229 wi->info = context;
8230 break;
8232 case GIMPLE_COND:
8234 tree lab = gimple_cond_true_label (stmt);
8235 if (lab)
8237 n = splay_tree_lookup (all_labels,
8238 (splay_tree_key) lab);
8239 diagnose_sb_0 (gsi_p, context,
8240 n ? (gimple) n->value : NULL);
8242 lab = gimple_cond_false_label (stmt);
8243 if (lab)
8245 n = splay_tree_lookup (all_labels,
8246 (splay_tree_key) lab);
8247 diagnose_sb_0 (gsi_p, context,
8248 n ? (gimple) n->value : NULL);
8251 break;
8253 case GIMPLE_GOTO:
8255 tree lab = gimple_goto_dest (stmt);
8256 if (TREE_CODE (lab) != LABEL_DECL)
8257 break;
8259 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8260 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
8262 break;
8264 case GIMPLE_SWITCH:
8266 unsigned int i;
8267 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
8269 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
8270 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8271 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
8272 break;
8275 break;
8277 case GIMPLE_RETURN:
8278 diagnose_sb_0 (gsi_p, context, NULL);
8279 break;
8281 default:
8282 break;
8285 return NULL_TREE;
8288 static unsigned int
8289 diagnose_omp_structured_block_errors (void)
8291 struct walk_stmt_info wi;
8292 gimple_seq body = gimple_body (current_function_decl);
8294 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
8296 memset (&wi, 0, sizeof (wi));
8297 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
8299 memset (&wi, 0, sizeof (wi));
8300 wi.want_locations = true;
8301 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
8303 gimple_set_body (current_function_decl, body);
8305 splay_tree_delete (all_labels);
8306 all_labels = NULL;
8308 return 0;
8311 static bool
8312 gate_diagnose_omp_blocks (void)
8314 return flag_openmp != 0;
8317 namespace {
8319 const pass_data pass_data_diagnose_omp_blocks =
8321 GIMPLE_PASS, /* type */
8322 "*diagnose_omp_blocks", /* name */
8323 OPTGROUP_NONE, /* optinfo_flags */
8324 true, /* has_gate */
8325 true, /* has_execute */
8326 TV_NONE, /* tv_id */
8327 PROP_gimple_any, /* properties_required */
8328 0, /* properties_provided */
8329 0, /* properties_destroyed */
8330 0, /* todo_flags_start */
8331 0, /* todo_flags_finish */
8334 class pass_diagnose_omp_blocks : public gimple_opt_pass
8336 public:
8337 pass_diagnose_omp_blocks(gcc::context *ctxt)
8338 : gimple_opt_pass(pass_data_diagnose_omp_blocks, ctxt)
8341 /* opt_pass methods: */
8342 bool gate () { return gate_diagnose_omp_blocks (); }
8343 unsigned int execute () {
8344 return diagnose_omp_structured_block_errors ();
8347 }; // class pass_diagnose_omp_blocks
8349 } // anon namespace
8351 gimple_opt_pass *
8352 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
8354 return new pass_diagnose_omp_blocks (ctxt);
8357 #include "gt-omp-low.h"