make recog () take a rtx_insn *
[official-gcc.git] / gcc / omp-low.c
blob7c58c033ded4168bc0080698c7f3d5936c47195e
1 /* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
5 Contributed by Diego Novillo <dnovillo@redhat.com>
7 Copyright (C) 2005-2016 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "memmodel.h"
29 #include "backend.h"
30 #include "target.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "gimple.h"
34 #include "cfghooks.h"
35 #include "alloc-pool.h"
36 #include "tree-pass.h"
37 #include "ssa.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "emit-rtl.h"
41 #include "cgraph.h"
42 #include "pretty-print.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "cfganal.h"
48 #include "internal-fn.h"
49 #include "gimple-fold.h"
50 #include "gimplify.h"
51 #include "gimple-iterator.h"
52 #include "gimplify-me.h"
53 #include "gimple-walk.h"
54 #include "tree-iterator.h"
55 #include "tree-inline.h"
56 #include "langhooks.h"
57 #include "tree-cfg.h"
58 #include "tree-into-ssa.h"
59 #include "flags.h"
60 #include "dojump.h"
61 #include "explow.h"
62 #include "calls.h"
63 #include "varasm.h"
64 #include "stmt.h"
65 #include "expr.h"
66 #include "tree-dfa.h"
67 #include "tree-ssa.h"
68 #include "except.h"
69 #include "splay-tree.h"
70 #include "cfgloop.h"
71 #include "common/common-target.h"
72 #include "omp-low.h"
73 #include "gimple-low.h"
74 #include "tree-cfgcleanup.h"
75 #include "symbol-summary.h"
76 #include "ipa-prop.h"
77 #include "tree-nested.h"
78 #include "tree-eh.h"
79 #include "cilk.h"
80 #include "context.h"
81 #include "lto-section-names.h"
82 #include "gomp-constants.h"
83 #include "gimple-pretty-print.h"
84 #include "symbol-summary.h"
85 #include "hsa.h"
86 #include "params.h"
88 /* Lowering of OMP parallel and workshare constructs proceeds in two
89 phases. The first phase scans the function looking for OMP statements
90 and then for variables that must be replaced to satisfy data sharing
91 clauses. The second phase expands code for the constructs, as well as
92 re-gimplifying things when variables have been replaced with complex
93 expressions.
95 Final code generation is done by pass_expand_omp. The flowgraph is
96 scanned for regions which are then moved to a new
97 function, to be invoked by the thread library, or offloaded. */
99 /* OMP region information. Every parallel and workshare
100 directive is enclosed between two markers, the OMP_* directive
101 and a corresponding GIMPLE_OMP_RETURN statement. */
103 struct omp_region
105 /* The enclosing region. */
106 struct omp_region *outer;
108 /* First child region. */
109 struct omp_region *inner;
111 /* Next peer region. */
112 struct omp_region *next;
114 /* Block containing the omp directive as its last stmt. */
115 basic_block entry;
117 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
118 basic_block exit;
120 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
121 basic_block cont;
123 /* If this is a combined parallel+workshare region, this is a list
124 of additional arguments needed by the combined parallel+workshare
125 library call. */
126 vec<tree, va_gc> *ws_args;
128 /* The code for the omp directive of this region. */
129 enum gimple_code type;
131 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
132 enum omp_clause_schedule_kind sched_kind;
134 /* Schedule modifiers. */
135 unsigned char sched_modifiers;
137 /* True if this is a combined parallel+workshare region. */
138 bool is_combined_parallel;
140 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
141 a depend clause. */
142 gomp_ordered *ord_stmt;
145 /* Context structure. Used to store information about each parallel
146 directive in the code. */
148 struct omp_context
150 /* This field must be at the beginning, as we do "inheritance": Some
151 callback functions for tree-inline.c (e.g., omp_copy_decl)
152 receive a copy_body_data pointer that is up-casted to an
153 omp_context pointer. */
154 copy_body_data cb;
156 /* The tree of contexts corresponding to the encountered constructs. */
157 struct omp_context *outer;
158 gimple *stmt;
160 /* Map variables to fields in a structure that allows communication
161 between sending and receiving threads. */
162 splay_tree field_map;
163 tree record_type;
164 tree sender_decl;
165 tree receiver_decl;
167 /* These are used just by task contexts, if task firstprivate fn is
168 needed. srecord_type is used to communicate from the thread
169 that encountered the task construct to task firstprivate fn,
170 record_type is allocated by GOMP_task, initialized by task firstprivate
171 fn and passed to the task body fn. */
172 splay_tree sfield_map;
173 tree srecord_type;
175 /* A chain of variables to add to the top-level block surrounding the
176 construct. In the case of a parallel, this is in the child function. */
177 tree block_vars;
179 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
180 barriers should jump to during omplower pass. */
181 tree cancel_label;
183 /* What to do with variables with implicitly determined sharing
184 attributes. */
185 enum omp_clause_default_kind default_kind;
187 /* Nesting depth of this context. Used to beautify error messages re
188 invalid gotos. The outermost ctx is depth 1, with depth 0 being
189 reserved for the main body of the function. */
190 int depth;
192 /* True if this parallel directive is nested within another. */
193 bool is_nested;
195 /* True if this construct can be cancelled. */
196 bool cancellable;
199 /* A structure holding the elements of:
200 for (V = N1; V cond N2; V += STEP) [...] */
202 struct omp_for_data_loop
204 tree v, n1, n2, step;
205 enum tree_code cond_code;
208 /* A structure describing the main elements of a parallel loop. */
210 struct omp_for_data
212 struct omp_for_data_loop loop;
213 tree chunk_size;
214 gomp_for *for_stmt;
215 tree pre, iter_type;
216 int collapse;
217 int ordered;
218 bool have_nowait, have_ordered, simd_schedule;
219 unsigned char sched_modifiers;
220 enum omp_clause_schedule_kind sched_kind;
221 struct omp_for_data_loop *loops;
224 /* Describe the OpenACC looping structure of a function. The entire
225 function is held in a 'NULL' loop. */
227 struct oacc_loop
229 oacc_loop *parent; /* Containing loop. */
231 oacc_loop *child; /* First inner loop. */
233 oacc_loop *sibling; /* Next loop within same parent. */
235 location_t loc; /* Location of the loop start. */
237 gcall *marker; /* Initial head marker. */
239 gcall *heads[GOMP_DIM_MAX]; /* Head marker functions. */
240 gcall *tails[GOMP_DIM_MAX]; /* Tail marker functions. */
242 tree routine; /* Pseudo-loop enclosing a routine. */
244 unsigned mask; /* Partitioning mask. */
245 unsigned inner; /* Partitioning of inner loops. */
246 unsigned flags; /* Partitioning flags. */
247 unsigned ifns; /* Contained loop abstraction functions. */
248 tree chunk_size; /* Chunk size. */
249 gcall *head_end; /* Final marker of head sequence. */
252 /* Flags for an OpenACC loop. */
254 enum oacc_loop_flags {
255 OLF_SEQ = 1u << 0, /* Explicitly sequential */
256 OLF_AUTO = 1u << 1, /* Compiler chooses axes. */
257 OLF_INDEPENDENT = 1u << 2, /* Iterations are known independent. */
258 OLF_GANG_STATIC = 1u << 3, /* Gang partitioning is static (has op). */
260 /* Explicitly specified loop axes. */
261 OLF_DIM_BASE = 4,
262 OLF_DIM_GANG = 1u << (OLF_DIM_BASE + GOMP_DIM_GANG),
263 OLF_DIM_WORKER = 1u << (OLF_DIM_BASE + GOMP_DIM_WORKER),
264 OLF_DIM_VECTOR = 1u << (OLF_DIM_BASE + GOMP_DIM_VECTOR),
266 OLF_MAX = OLF_DIM_BASE + GOMP_DIM_MAX
270 static splay_tree all_contexts;
271 static int taskreg_nesting_level;
272 static int target_nesting_level;
273 static struct omp_region *root_omp_region;
274 static bitmap task_shared_vars;
275 static vec<omp_context *> taskreg_contexts;
276 static bool omp_any_child_fn_dumped;
278 static void scan_omp (gimple_seq *, omp_context *);
279 static tree scan_omp_1_op (tree *, int *, void *);
280 static gphi *find_phi_with_arg_on_edge (tree, edge);
282 #define WALK_SUBSTMTS \
283 case GIMPLE_BIND: \
284 case GIMPLE_TRY: \
285 case GIMPLE_CATCH: \
286 case GIMPLE_EH_FILTER: \
287 case GIMPLE_TRANSACTION: \
288 /* The sub-statements for these should be walked. */ \
289 *handled_ops_p = false; \
290 break;
292 /* Return true if CTX corresponds to an oacc parallel region. */
294 static bool
295 is_oacc_parallel (omp_context *ctx)
297 enum gimple_code outer_type = gimple_code (ctx->stmt);
298 return ((outer_type == GIMPLE_OMP_TARGET)
299 && (gimple_omp_target_kind (ctx->stmt)
300 == GF_OMP_TARGET_KIND_OACC_PARALLEL));
303 /* Return true if CTX corresponds to an oacc kernels region. */
305 static bool
306 is_oacc_kernels (omp_context *ctx)
308 enum gimple_code outer_type = gimple_code (ctx->stmt);
309 return ((outer_type == GIMPLE_OMP_TARGET)
310 && (gimple_omp_target_kind (ctx->stmt)
311 == GF_OMP_TARGET_KIND_OACC_KERNELS));
314 /* If DECL is the artificial dummy VAR_DECL created for non-static
315 data member privatization, return the underlying "this" parameter,
316 otherwise return NULL. */
318 tree
319 omp_member_access_dummy_var (tree decl)
321 if (!VAR_P (decl)
322 || !DECL_ARTIFICIAL (decl)
323 || !DECL_IGNORED_P (decl)
324 || !DECL_HAS_VALUE_EXPR_P (decl)
325 || !lang_hooks.decls.omp_disregard_value_expr (decl, false))
326 return NULL_TREE;
328 tree v = DECL_VALUE_EXPR (decl);
329 if (TREE_CODE (v) != COMPONENT_REF)
330 return NULL_TREE;
332 while (1)
333 switch (TREE_CODE (v))
335 case COMPONENT_REF:
336 case MEM_REF:
337 case INDIRECT_REF:
338 CASE_CONVERT:
339 case POINTER_PLUS_EXPR:
340 v = TREE_OPERAND (v, 0);
341 continue;
342 case PARM_DECL:
343 if (DECL_CONTEXT (v) == current_function_decl
344 && DECL_ARTIFICIAL (v)
345 && TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
346 return v;
347 return NULL_TREE;
348 default:
349 return NULL_TREE;
353 /* Helper for unshare_and_remap, called through walk_tree. */
355 static tree
356 unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
358 tree *pair = (tree *) data;
359 if (*tp == pair[0])
361 *tp = unshare_expr (pair[1]);
362 *walk_subtrees = 0;
364 else if (IS_TYPE_OR_DECL_P (*tp))
365 *walk_subtrees = 0;
366 return NULL_TREE;
369 /* Return unshare_expr (X) with all occurrences of FROM
370 replaced with TO. */
372 static tree
373 unshare_and_remap (tree x, tree from, tree to)
375 tree pair[2] = { from, to };
376 x = unshare_expr (x);
377 walk_tree (&x, unshare_and_remap_1, pair, NULL);
378 return x;
381 /* Holds offload tables with decls. */
382 vec<tree, va_gc> *offload_funcs, *offload_vars;
384 /* Convenience function for calling scan_omp_1_op on tree operands. */
386 static inline tree
387 scan_omp_op (tree *tp, omp_context *ctx)
389 struct walk_stmt_info wi;
391 memset (&wi, 0, sizeof (wi));
392 wi.info = ctx;
393 wi.want_locations = true;
395 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
398 static void lower_omp (gimple_seq *, omp_context *);
399 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
400 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
402 /* Find an OMP clause of type KIND within CLAUSES. */
404 tree
405 find_omp_clause (tree clauses, enum omp_clause_code kind)
407 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
408 if (OMP_CLAUSE_CODE (clauses) == kind)
409 return clauses;
411 return NULL_TREE;
414 /* Return true if CTX is for an omp parallel. */
416 static inline bool
417 is_parallel_ctx (omp_context *ctx)
419 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
423 /* Return true if CTX is for an omp task. */
425 static inline bool
426 is_task_ctx (omp_context *ctx)
428 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
432 /* Return true if CTX is for an omp taskloop. */
434 static inline bool
435 is_taskloop_ctx (omp_context *ctx)
437 return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
438 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
442 /* Return true if CTX is for an omp parallel or omp task. */
444 static inline bool
445 is_taskreg_ctx (omp_context *ctx)
447 return is_parallel_ctx (ctx) || is_task_ctx (ctx);
451 /* Return true if REGION is a combined parallel+workshare region. */
453 static inline bool
454 is_combined_parallel (struct omp_region *region)
456 return region->is_combined_parallel;
459 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or
460 GT_EXPR. */
462 static void
463 adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2)
465 switch (*cond_code)
467 case LT_EXPR:
468 case GT_EXPR:
469 case NE_EXPR:
470 break;
471 case LE_EXPR:
472 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
473 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
474 else
475 *n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
476 build_int_cst (TREE_TYPE (*n2), 1));
477 *cond_code = LT_EXPR;
478 break;
479 case GE_EXPR:
480 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
481 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
482 else
483 *n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
484 build_int_cst (TREE_TYPE (*n2), 1));
485 *cond_code = GT_EXPR;
486 break;
487 default:
488 gcc_unreachable ();
492 /* Return the looping step from INCR, extracted from the step of a gimple omp
493 for statement. */
495 static tree
496 get_omp_for_step_from_incr (location_t loc, tree incr)
498 tree step;
499 switch (TREE_CODE (incr))
501 case PLUS_EXPR:
502 step = TREE_OPERAND (incr, 1);
503 break;
504 case POINTER_PLUS_EXPR:
505 step = fold_convert (ssizetype, TREE_OPERAND (incr, 1));
506 break;
507 case MINUS_EXPR:
508 step = TREE_OPERAND (incr, 1);
509 step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
510 break;
511 default:
512 gcc_unreachable ();
514 return step;
517 /* Extract the header elements of parallel loop FOR_STMT and store
518 them into *FD. */
520 static void
521 extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
522 struct omp_for_data_loop *loops)
524 tree t, var, *collapse_iter, *collapse_count;
525 tree count = NULL_TREE, iter_type = long_integer_type_node;
526 struct omp_for_data_loop *loop;
527 int i;
528 struct omp_for_data_loop dummy_loop;
529 location_t loc = gimple_location (for_stmt);
530 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
531 bool distribute = gimple_omp_for_kind (for_stmt)
532 == GF_OMP_FOR_KIND_DISTRIBUTE;
533 bool taskloop = gimple_omp_for_kind (for_stmt)
534 == GF_OMP_FOR_KIND_TASKLOOP;
535 tree iterv, countv;
537 fd->for_stmt = for_stmt;
538 fd->pre = NULL;
539 if (gimple_omp_for_collapse (for_stmt) > 1)
540 fd->loops = loops;
541 else
542 fd->loops = &fd->loop;
544 fd->have_nowait = distribute || simd;
545 fd->have_ordered = false;
546 fd->collapse = 1;
547 fd->ordered = 0;
548 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
549 fd->sched_modifiers = 0;
550 fd->chunk_size = NULL_TREE;
551 fd->simd_schedule = false;
552 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
553 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
554 collapse_iter = NULL;
555 collapse_count = NULL;
557 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
558 switch (OMP_CLAUSE_CODE (t))
560 case OMP_CLAUSE_NOWAIT:
561 fd->have_nowait = true;
562 break;
563 case OMP_CLAUSE_ORDERED:
564 fd->have_ordered = true;
565 if (OMP_CLAUSE_ORDERED_EXPR (t))
566 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
567 break;
568 case OMP_CLAUSE_SCHEDULE:
569 gcc_assert (!distribute && !taskloop);
570 fd->sched_kind
571 = (enum omp_clause_schedule_kind)
572 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
573 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
574 & ~OMP_CLAUSE_SCHEDULE_MASK);
575 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
576 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
577 break;
578 case OMP_CLAUSE_DIST_SCHEDULE:
579 gcc_assert (distribute);
580 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
581 break;
582 case OMP_CLAUSE_COLLAPSE:
583 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
584 if (fd->collapse > 1)
586 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
587 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
589 break;
590 default:
591 break;
593 if (fd->ordered && fd->collapse == 1 && loops != NULL)
595 fd->loops = loops;
596 iterv = NULL_TREE;
597 countv = NULL_TREE;
598 collapse_iter = &iterv;
599 collapse_count = &countv;
602 /* FIXME: for now map schedule(auto) to schedule(static).
603 There should be analysis to determine whether all iterations
604 are approximately the same amount of work (then schedule(static)
605 is best) or if it varies (then schedule(dynamic,N) is better). */
606 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
608 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
609 gcc_assert (fd->chunk_size == NULL);
611 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
612 if (taskloop)
613 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
614 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
615 gcc_assert (fd->chunk_size == NULL);
616 else if (fd->chunk_size == NULL)
618 /* We only need to compute a default chunk size for ordered
619 static loops and dynamic loops. */
620 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
621 || fd->have_ordered)
622 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
623 ? integer_zero_node : integer_one_node;
626 int cnt = fd->ordered ? fd->ordered : fd->collapse;
627 for (i = 0; i < cnt; i++)
629 if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL))
630 loop = &fd->loop;
631 else if (loops != NULL)
632 loop = loops + i;
633 else
634 loop = &dummy_loop;
636 loop->v = gimple_omp_for_index (for_stmt, i);
637 gcc_assert (SSA_VAR_P (loop->v));
638 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
639 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
640 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
641 loop->n1 = gimple_omp_for_initial (for_stmt, i);
643 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
644 loop->n2 = gimple_omp_for_final (for_stmt, i);
645 gcc_assert (loop->cond_code != NE_EXPR
646 || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD
647 || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR);
648 adjust_for_condition (loc, &loop->cond_code, &loop->n2);
650 t = gimple_omp_for_incr (for_stmt, i);
651 gcc_assert (TREE_OPERAND (t, 0) == var);
652 loop->step = get_omp_for_step_from_incr (loc, t);
654 if (simd
655 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
656 && !fd->have_ordered))
658 if (fd->collapse == 1)
659 iter_type = TREE_TYPE (loop->v);
660 else if (i == 0
661 || TYPE_PRECISION (iter_type)
662 < TYPE_PRECISION (TREE_TYPE (loop->v)))
663 iter_type
664 = build_nonstandard_integer_type
665 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
667 else if (iter_type != long_long_unsigned_type_node)
669 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
670 iter_type = long_long_unsigned_type_node;
671 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
672 && TYPE_PRECISION (TREE_TYPE (loop->v))
673 >= TYPE_PRECISION (iter_type))
675 tree n;
677 if (loop->cond_code == LT_EXPR)
678 n = fold_build2_loc (loc,
679 PLUS_EXPR, TREE_TYPE (loop->v),
680 loop->n2, loop->step);
681 else
682 n = loop->n1;
683 if (TREE_CODE (n) != INTEGER_CST
684 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
685 iter_type = long_long_unsigned_type_node;
687 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
688 > TYPE_PRECISION (iter_type))
690 tree n1, n2;
692 if (loop->cond_code == LT_EXPR)
694 n1 = loop->n1;
695 n2 = fold_build2_loc (loc,
696 PLUS_EXPR, TREE_TYPE (loop->v),
697 loop->n2, loop->step);
699 else
701 n1 = fold_build2_loc (loc,
702 MINUS_EXPR, TREE_TYPE (loop->v),
703 loop->n2, loop->step);
704 n2 = loop->n1;
706 if (TREE_CODE (n1) != INTEGER_CST
707 || TREE_CODE (n2) != INTEGER_CST
708 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
709 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
710 iter_type = long_long_unsigned_type_node;
714 if (i >= fd->collapse)
715 continue;
717 if (collapse_count && *collapse_count == NULL)
719 t = fold_binary (loop->cond_code, boolean_type_node,
720 fold_convert (TREE_TYPE (loop->v), loop->n1),
721 fold_convert (TREE_TYPE (loop->v), loop->n2));
722 if (t && integer_zerop (t))
723 count = build_zero_cst (long_long_unsigned_type_node);
724 else if ((i == 0 || count != NULL_TREE)
725 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
726 && TREE_CONSTANT (loop->n1)
727 && TREE_CONSTANT (loop->n2)
728 && TREE_CODE (loop->step) == INTEGER_CST)
730 tree itype = TREE_TYPE (loop->v);
732 if (POINTER_TYPE_P (itype))
733 itype = signed_type_for (itype);
734 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
735 t = fold_build2_loc (loc,
736 PLUS_EXPR, itype,
737 fold_convert_loc (loc, itype, loop->step), t);
738 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
739 fold_convert_loc (loc, itype, loop->n2));
740 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
741 fold_convert_loc (loc, itype, loop->n1));
742 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
743 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
744 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
745 fold_build1_loc (loc, NEGATE_EXPR, itype,
746 fold_convert_loc (loc, itype,
747 loop->step)));
748 else
749 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
750 fold_convert_loc (loc, itype, loop->step));
751 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
752 if (count != NULL_TREE)
753 count = fold_build2_loc (loc,
754 MULT_EXPR, long_long_unsigned_type_node,
755 count, t);
756 else
757 count = t;
758 if (TREE_CODE (count) != INTEGER_CST)
759 count = NULL_TREE;
761 else if (count && !integer_zerop (count))
762 count = NULL_TREE;
766 if (count
767 && !simd
768 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
769 || fd->have_ordered))
771 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
772 iter_type = long_long_unsigned_type_node;
773 else
774 iter_type = long_integer_type_node;
776 else if (collapse_iter && *collapse_iter != NULL)
777 iter_type = TREE_TYPE (*collapse_iter);
778 fd->iter_type = iter_type;
779 if (collapse_iter && *collapse_iter == NULL)
780 *collapse_iter = create_tmp_var (iter_type, ".iter");
781 if (collapse_count && *collapse_count == NULL)
783 if (count)
784 *collapse_count = fold_convert_loc (loc, iter_type, count);
785 else
786 *collapse_count = create_tmp_var (iter_type, ".count");
789 if (fd->collapse > 1 || (fd->ordered && loops))
791 fd->loop.v = *collapse_iter;
792 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
793 fd->loop.n2 = *collapse_count;
794 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
795 fd->loop.cond_code = LT_EXPR;
797 else if (loops)
798 loops[0] = fd->loop;
802 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
803 is the immediate dominator of PAR_ENTRY_BB, return true if there
804 are no data dependencies that would prevent expanding the parallel
805 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
807 When expanding a combined parallel+workshare region, the call to
808 the child function may need additional arguments in the case of
809 GIMPLE_OMP_FOR regions. In some cases, these arguments are
810 computed out of variables passed in from the parent to the child
811 via 'struct .omp_data_s'. For instance:
813 #pragma omp parallel for schedule (guided, i * 4)
814 for (j ...)
816 Is lowered into:
818 # BLOCK 2 (PAR_ENTRY_BB)
819 .omp_data_o.i = i;
820 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
822 # BLOCK 3 (WS_ENTRY_BB)
823 .omp_data_i = &.omp_data_o;
824 D.1667 = .omp_data_i->i;
825 D.1598 = D.1667 * 4;
826 #pragma omp for schedule (guided, D.1598)
828 When we outline the parallel region, the call to the child function
829 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
830 that value is computed *after* the call site. So, in principle we
831 cannot do the transformation.
833 To see whether the code in WS_ENTRY_BB blocks the combined
834 parallel+workshare call, we collect all the variables used in the
835 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
836 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
837 call.
839 FIXME. If we had the SSA form built at this point, we could merely
840 hoist the code in block 3 into block 2 and be done with it. But at
841 this point we don't have dataflow information and though we could
842 hack something up here, it is really not worth the aggravation. */
844 static bool
845 workshare_safe_to_combine_p (basic_block ws_entry_bb)
847 struct omp_for_data fd;
848 gimple *ws_stmt = last_stmt (ws_entry_bb);
850 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
851 return true;
853 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
855 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
857 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
858 return false;
859 if (fd.iter_type != long_integer_type_node)
860 return false;
862 /* FIXME. We give up too easily here. If any of these arguments
863 are not constants, they will likely involve variables that have
864 been mapped into fields of .omp_data_s for sharing with the child
865 function. With appropriate data flow, it would be possible to
866 see through this. */
867 if (!is_gimple_min_invariant (fd.loop.n1)
868 || !is_gimple_min_invariant (fd.loop.n2)
869 || !is_gimple_min_invariant (fd.loop.step)
870 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
871 return false;
873 return true;
877 static int omp_max_vf (void);
879 /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
880 presence (SIMD_SCHEDULE). */
882 static tree
883 omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
885 if (!simd_schedule)
886 return chunk_size;
888 int vf = omp_max_vf ();
889 if (vf == 1)
890 return chunk_size;
892 tree type = TREE_TYPE (chunk_size);
893 chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
894 build_int_cst (type, vf - 1));
895 return fold_build2 (BIT_AND_EXPR, type, chunk_size,
896 build_int_cst (type, -vf));
900 /* Collect additional arguments needed to emit a combined
901 parallel+workshare call. WS_STMT is the workshare directive being
902 expanded. */
904 static vec<tree, va_gc> *
905 get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
907 tree t;
908 location_t loc = gimple_location (ws_stmt);
909 vec<tree, va_gc> *ws_args;
911 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
913 struct omp_for_data fd;
914 tree n1, n2;
916 extract_omp_for_data (for_stmt, &fd, NULL);
917 n1 = fd.loop.n1;
918 n2 = fd.loop.n2;
920 if (gimple_omp_for_combined_into_p (for_stmt))
922 tree innerc
923 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
924 OMP_CLAUSE__LOOPTEMP_);
925 gcc_assert (innerc);
926 n1 = OMP_CLAUSE_DECL (innerc);
927 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
928 OMP_CLAUSE__LOOPTEMP_);
929 gcc_assert (innerc);
930 n2 = OMP_CLAUSE_DECL (innerc);
933 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
935 t = fold_convert_loc (loc, long_integer_type_node, n1);
936 ws_args->quick_push (t);
938 t = fold_convert_loc (loc, long_integer_type_node, n2);
939 ws_args->quick_push (t);
941 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
942 ws_args->quick_push (t);
944 if (fd.chunk_size)
946 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
947 t = omp_adjust_chunk_size (t, fd.simd_schedule);
948 ws_args->quick_push (t);
951 return ws_args;
953 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
955 /* Number of sections is equal to the number of edges from the
956 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
957 the exit of the sections region. */
958 basic_block bb = single_succ (gimple_bb (ws_stmt));
959 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
960 vec_alloc (ws_args, 1);
961 ws_args->quick_push (t);
962 return ws_args;
965 gcc_unreachable ();
969 /* Discover whether REGION is a combined parallel+workshare region. */
971 static void
972 determine_parallel_type (struct omp_region *region)
974 basic_block par_entry_bb, par_exit_bb;
975 basic_block ws_entry_bb, ws_exit_bb;
977 if (region == NULL || region->inner == NULL
978 || region->exit == NULL || region->inner->exit == NULL
979 || region->inner->cont == NULL)
980 return;
982 /* We only support parallel+for and parallel+sections. */
983 if (region->type != GIMPLE_OMP_PARALLEL
984 || (region->inner->type != GIMPLE_OMP_FOR
985 && region->inner->type != GIMPLE_OMP_SECTIONS))
986 return;
988 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
989 WS_EXIT_BB -> PAR_EXIT_BB. */
990 par_entry_bb = region->entry;
991 par_exit_bb = region->exit;
992 ws_entry_bb = region->inner->entry;
993 ws_exit_bb = region->inner->exit;
995 if (single_succ (par_entry_bb) == ws_entry_bb
996 && single_succ (ws_exit_bb) == par_exit_bb
997 && workshare_safe_to_combine_p (ws_entry_bb)
998 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
999 || (last_and_only_stmt (ws_entry_bb)
1000 && last_and_only_stmt (par_exit_bb))))
1002 gimple *par_stmt = last_stmt (par_entry_bb);
1003 gimple *ws_stmt = last_stmt (ws_entry_bb);
1005 if (region->inner->type == GIMPLE_OMP_FOR)
1007 /* If this is a combined parallel loop, we need to determine
1008 whether or not to use the combined library calls. There
1009 are two cases where we do not apply the transformation:
1010 static loops and any kind of ordered loop. In the first
1011 case, we already open code the loop so there is no need
1012 to do anything else. In the latter case, the combined
1013 parallel loop call would still need extra synchronization
1014 to implement ordered semantics, so there would not be any
1015 gain in using the combined call. */
1016 tree clauses = gimple_omp_for_clauses (ws_stmt);
1017 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
1018 if (c == NULL
1019 || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
1020 == OMP_CLAUSE_SCHEDULE_STATIC)
1021 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
1023 region->is_combined_parallel = false;
1024 region->inner->is_combined_parallel = false;
1025 return;
1029 region->is_combined_parallel = true;
1030 region->inner->is_combined_parallel = true;
1031 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
1036 /* Return true if EXPR is variable sized. */
1038 static inline bool
1039 is_variable_sized (const_tree expr)
1041 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1044 /* Return true if DECL is a reference type. */
1046 static inline bool
1047 is_reference (tree decl)
1049 return lang_hooks.decls.omp_privatize_by_reference (decl);
1052 /* Return the type of a decl. If the decl is reference type,
1053 return its base type. */
1054 static inline tree
1055 get_base_type (tree decl)
1057 tree type = TREE_TYPE (decl);
1058 if (is_reference (decl))
1059 type = TREE_TYPE (type);
1060 return type;
1063 /* Lookup variables. The "maybe" form
1064 allows for the variable form to not have been entered, otherwise we
1065 assert that the variable must have been entered. */
1067 static inline tree
1068 lookup_decl (tree var, omp_context *ctx)
1070 tree *n = ctx->cb.decl_map->get (var);
1071 return *n;
1074 static inline tree
1075 maybe_lookup_decl (const_tree var, omp_context *ctx)
1077 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
1078 return n ? *n : NULL_TREE;
1081 static inline tree
1082 lookup_field (tree var, omp_context *ctx)
1084 splay_tree_node n;
1085 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
1086 return (tree) n->value;
1089 static inline tree
1090 lookup_sfield (splay_tree_key key, omp_context *ctx)
1092 splay_tree_node n;
1093 n = splay_tree_lookup (ctx->sfield_map
1094 ? ctx->sfield_map : ctx->field_map, key);
1095 return (tree) n->value;
1098 static inline tree
1099 lookup_sfield (tree var, omp_context *ctx)
1101 return lookup_sfield ((splay_tree_key) var, ctx);
1104 static inline tree
1105 maybe_lookup_field (splay_tree_key key, omp_context *ctx)
1107 splay_tree_node n;
1108 n = splay_tree_lookup (ctx->field_map, key);
1109 return n ? (tree) n->value : NULL_TREE;
1112 static inline tree
1113 maybe_lookup_field (tree var, omp_context *ctx)
1115 return maybe_lookup_field ((splay_tree_key) var, ctx);
1118 /* Return true if DECL should be copied by pointer. SHARED_CTX is
1119 the parallel context if DECL is to be shared. */
1121 static bool
1122 use_pointer_for_field (tree decl, omp_context *shared_ctx)
1124 if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
1125 || TYPE_ATOMIC (TREE_TYPE (decl)))
1126 return true;
1128 /* We can only use copy-in/copy-out semantics for shared variables
1129 when we know the value is not accessible from an outer scope. */
1130 if (shared_ctx)
1132 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1134 /* ??? Trivially accessible from anywhere. But why would we even
1135 be passing an address in this case? Should we simply assert
1136 this to be false, or should we have a cleanup pass that removes
1137 these from the list of mappings? */
1138 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1139 return true;
1141 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1142 without analyzing the expression whether or not its location
1143 is accessible to anyone else. In the case of nested parallel
1144 regions it certainly may be. */
1145 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1146 return true;
1148 /* Do not use copy-in/copy-out for variables that have their
1149 address taken. */
1150 if (TREE_ADDRESSABLE (decl))
1151 return true;
1153 /* lower_send_shared_vars only uses copy-in, but not copy-out
1154 for these. */
1155 if (TREE_READONLY (decl)
1156 || ((TREE_CODE (decl) == RESULT_DECL
1157 || TREE_CODE (decl) == PARM_DECL)
1158 && DECL_BY_REFERENCE (decl)))
1159 return false;
1161 /* Disallow copy-in/out in nested parallel if
1162 decl is shared in outer parallel, otherwise
1163 each thread could store the shared variable
1164 in its own copy-in location, making the
1165 variable no longer really shared. */
1166 if (shared_ctx->is_nested)
1168 omp_context *up;
1170 for (up = shared_ctx->outer; up; up = up->outer)
1171 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1172 break;
1174 if (up)
1176 tree c;
1178 for (c = gimple_omp_taskreg_clauses (up->stmt);
1179 c; c = OMP_CLAUSE_CHAIN (c))
1180 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1181 && OMP_CLAUSE_DECL (c) == decl)
1182 break;
1184 if (c)
1185 goto maybe_mark_addressable_and_ret;
1189 /* For tasks avoid using copy-in/out. As tasks can be
1190 deferred or executed in different thread, when GOMP_task
1191 returns, the task hasn't necessarily terminated. */
1192 if (is_task_ctx (shared_ctx))
1194 tree outer;
1195 maybe_mark_addressable_and_ret:
1196 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1197 if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
1199 /* Taking address of OUTER in lower_send_shared_vars
1200 might need regimplification of everything that uses the
1201 variable. */
1202 if (!task_shared_vars)
1203 task_shared_vars = BITMAP_ALLOC (NULL);
1204 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1205 TREE_ADDRESSABLE (outer) = 1;
1207 return true;
1211 return false;
1214 /* Construct a new automatic decl similar to VAR. */
1216 static tree
1217 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1219 tree copy = copy_var_decl (var, name, type);
1221 DECL_CONTEXT (copy) = current_function_decl;
1222 DECL_CHAIN (copy) = ctx->block_vars;
1223 /* If VAR is listed in task_shared_vars, it means it wasn't
1224 originally addressable and is just because task needs to take
1225 it's address. But we don't need to take address of privatizations
1226 from that var. */
1227 if (TREE_ADDRESSABLE (var)
1228 && task_shared_vars
1229 && bitmap_bit_p (task_shared_vars, DECL_UID (var)))
1230 TREE_ADDRESSABLE (copy) = 0;
1231 ctx->block_vars = copy;
1233 return copy;
1236 static tree
1237 omp_copy_decl_1 (tree var, omp_context *ctx)
1239 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1242 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1243 as appropriate. */
1244 static tree
1245 omp_build_component_ref (tree obj, tree field)
1247 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1248 if (TREE_THIS_VOLATILE (field))
1249 TREE_THIS_VOLATILE (ret) |= 1;
1250 if (TREE_READONLY (field))
1251 TREE_READONLY (ret) |= 1;
1252 return ret;
1255 /* Build tree nodes to access the field for VAR on the receiver side. */
1257 static tree
1258 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1260 tree x, field = lookup_field (var, ctx);
1262 /* If the receiver record type was remapped in the child function,
1263 remap the field into the new record type. */
1264 x = maybe_lookup_field (field, ctx);
1265 if (x != NULL)
1266 field = x;
1268 x = build_simple_mem_ref (ctx->receiver_decl);
1269 TREE_THIS_NOTRAP (x) = 1;
1270 x = omp_build_component_ref (x, field);
1271 if (by_ref)
1273 x = build_simple_mem_ref (x);
1274 TREE_THIS_NOTRAP (x) = 1;
1277 return x;
1280 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1281 of a parallel, this is a component reference; for workshare constructs
1282 this is some variable. */
1284 static tree
1285 build_outer_var_ref (tree var, omp_context *ctx, bool lastprivate = false)
1287 tree x;
1289 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1290 x = var;
1291 else if (is_variable_sized (var))
1293 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1294 x = build_outer_var_ref (x, ctx, lastprivate);
1295 x = build_simple_mem_ref (x);
1297 else if (is_taskreg_ctx (ctx))
1299 bool by_ref = use_pointer_for_field (var, NULL);
1300 x = build_receiver_ref (var, by_ref, ctx);
1302 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1303 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1305 /* #pragma omp simd isn't a worksharing construct, and can reference even
1306 private vars in its linear etc. clauses. */
1307 x = NULL_TREE;
1308 if (ctx->outer && is_taskreg_ctx (ctx))
1309 x = lookup_decl (var, ctx->outer);
1310 else if (ctx->outer)
1311 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1312 if (x == NULL_TREE)
1313 x = var;
1315 else if (lastprivate && is_taskloop_ctx (ctx))
1317 gcc_assert (ctx->outer);
1318 splay_tree_node n
1319 = splay_tree_lookup (ctx->outer->field_map,
1320 (splay_tree_key) &DECL_UID (var));
1321 if (n == NULL)
1323 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
1324 x = var;
1325 else
1326 x = lookup_decl (var, ctx->outer);
1328 else
1330 tree field = (tree) n->value;
1331 /* If the receiver record type was remapped in the child function,
1332 remap the field into the new record type. */
1333 x = maybe_lookup_field (field, ctx->outer);
1334 if (x != NULL)
1335 field = x;
1337 x = build_simple_mem_ref (ctx->outer->receiver_decl);
1338 x = omp_build_component_ref (x, field);
1339 if (use_pointer_for_field (var, ctx->outer))
1340 x = build_simple_mem_ref (x);
1343 else if (ctx->outer)
1345 omp_context *outer = ctx->outer;
1346 if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
1348 outer = outer->outer;
1349 gcc_assert (outer
1350 && gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
1352 x = lookup_decl (var, outer);
1354 else if (is_reference (var))
1355 /* This can happen with orphaned constructs. If var is reference, it is
1356 possible it is shared and as such valid. */
1357 x = var;
1358 else if (omp_member_access_dummy_var (var))
1359 x = var;
1360 else
1361 gcc_unreachable ();
1363 if (x == var)
1365 tree t = omp_member_access_dummy_var (var);
1366 if (t)
1368 x = DECL_VALUE_EXPR (var);
1369 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
1370 if (o != t)
1371 x = unshare_and_remap (x, t, o);
1372 else
1373 x = unshare_expr (x);
1377 if (is_reference (var))
1378 x = build_simple_mem_ref (x);
1380 return x;
1383 /* Build tree nodes to access the field for VAR on the sender side. */
1385 static tree
1386 build_sender_ref (splay_tree_key key, omp_context *ctx)
1388 tree field = lookup_sfield (key, ctx);
1389 return omp_build_component_ref (ctx->sender_decl, field);
1392 static tree
1393 build_sender_ref (tree var, omp_context *ctx)
1395 return build_sender_ref ((splay_tree_key) var, ctx);
1398 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
1399 BASE_POINTERS_RESTRICT, declare the field with restrict. */
1401 static void
1402 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx,
1403 bool base_pointers_restrict = false)
1405 tree field, type, sfield = NULL_TREE;
1406 splay_tree_key key = (splay_tree_key) var;
1408 if ((mask & 8) != 0)
1410 key = (splay_tree_key) &DECL_UID (var);
1411 gcc_checking_assert (key != (splay_tree_key) var);
1413 gcc_assert ((mask & 1) == 0
1414 || !splay_tree_lookup (ctx->field_map, key));
1415 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1416 || !splay_tree_lookup (ctx->sfield_map, key));
1417 gcc_assert ((mask & 3) == 3
1418 || !is_gimple_omp_oacc (ctx->stmt));
1420 type = TREE_TYPE (var);
1421 /* Prevent redeclaring the var in the split-off function with a restrict
1422 pointer type. Note that we only clear type itself, restrict qualifiers in
1423 the pointed-to type will be ignored by points-to analysis. */
1424 if (POINTER_TYPE_P (type)
1425 && TYPE_RESTRICT (type))
1426 type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
1428 if (mask & 4)
1430 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1431 type = build_pointer_type (build_pointer_type (type));
1433 else if (by_ref)
1435 type = build_pointer_type (type);
1436 if (base_pointers_restrict)
1437 type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
1439 else if ((mask & 3) == 1 && is_reference (var))
1440 type = TREE_TYPE (type);
1442 field = build_decl (DECL_SOURCE_LOCATION (var),
1443 FIELD_DECL, DECL_NAME (var), type);
1445 /* Remember what variable this field was created for. This does have a
1446 side effect of making dwarf2out ignore this member, so for helpful
1447 debugging we clear it later in delete_omp_context. */
1448 DECL_ABSTRACT_ORIGIN (field) = var;
1449 if (type == TREE_TYPE (var))
1451 SET_DECL_ALIGN (field, DECL_ALIGN (var));
1452 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1453 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1455 else
1456 SET_DECL_ALIGN (field, TYPE_ALIGN (type));
1458 if ((mask & 3) == 3)
1460 insert_field_into_struct (ctx->record_type, field);
1461 if (ctx->srecord_type)
1463 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1464 FIELD_DECL, DECL_NAME (var), type);
1465 DECL_ABSTRACT_ORIGIN (sfield) = var;
1466 SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
1467 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1468 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1469 insert_field_into_struct (ctx->srecord_type, sfield);
1472 else
1474 if (ctx->srecord_type == NULL_TREE)
1476 tree t;
1478 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1479 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1480 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1482 sfield = build_decl (DECL_SOURCE_LOCATION (t),
1483 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1484 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1485 insert_field_into_struct (ctx->srecord_type, sfield);
1486 splay_tree_insert (ctx->sfield_map,
1487 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1488 (splay_tree_value) sfield);
1491 sfield = field;
1492 insert_field_into_struct ((mask & 1) ? ctx->record_type
1493 : ctx->srecord_type, field);
1496 if (mask & 1)
1497 splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
1498 if ((mask & 2) && ctx->sfield_map)
1499 splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
1502 static tree
1503 install_var_local (tree var, omp_context *ctx)
1505 tree new_var = omp_copy_decl_1 (var, ctx);
1506 insert_decl_map (&ctx->cb, var, new_var);
1507 return new_var;
1510 /* Adjust the replacement for DECL in CTX for the new context. This means
1511 copying the DECL_VALUE_EXPR, and fixing up the type. */
1513 static void
1514 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1516 tree new_decl, size;
1518 new_decl = lookup_decl (decl, ctx);
1520 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1522 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1523 && DECL_HAS_VALUE_EXPR_P (decl))
1525 tree ve = DECL_VALUE_EXPR (decl);
1526 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1527 SET_DECL_VALUE_EXPR (new_decl, ve);
1528 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1531 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1533 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1534 if (size == error_mark_node)
1535 size = TYPE_SIZE (TREE_TYPE (new_decl));
1536 DECL_SIZE (new_decl) = size;
1538 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1539 if (size == error_mark_node)
1540 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1541 DECL_SIZE_UNIT (new_decl) = size;
1545 /* The callback for remap_decl. Search all containing contexts for a
1546 mapping of the variable; this avoids having to duplicate the splay
1547 tree ahead of time. We know a mapping doesn't already exist in the
1548 given context. Create new mappings to implement default semantics. */
1550 static tree
1551 omp_copy_decl (tree var, copy_body_data *cb)
1553 omp_context *ctx = (omp_context *) cb;
1554 tree new_var;
1556 if (TREE_CODE (var) == LABEL_DECL)
1558 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1559 DECL_CONTEXT (new_var) = current_function_decl;
1560 insert_decl_map (&ctx->cb, var, new_var);
1561 return new_var;
1564 while (!is_taskreg_ctx (ctx))
1566 ctx = ctx->outer;
1567 if (ctx == NULL)
1568 return var;
1569 new_var = maybe_lookup_decl (var, ctx);
1570 if (new_var)
1571 return new_var;
1574 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1575 return var;
1577 return error_mark_node;
1581 /* Debugging dumps for parallel regions. */
1582 void dump_omp_region (FILE *, struct omp_region *, int);
1583 void debug_omp_region (struct omp_region *);
1584 void debug_all_omp_regions (void);
1586 /* Dump the parallel region tree rooted at REGION. */
1588 void
1589 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1591 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1592 gimple_code_name[region->type]);
1594 if (region->inner)
1595 dump_omp_region (file, region->inner, indent + 4);
1597 if (region->cont)
1599 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1600 region->cont->index);
1603 if (region->exit)
1604 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1605 region->exit->index);
1606 else
1607 fprintf (file, "%*s[no exit marker]\n", indent, "");
1609 if (region->next)
1610 dump_omp_region (file, region->next, indent);
1613 DEBUG_FUNCTION void
1614 debug_omp_region (struct omp_region *region)
1616 dump_omp_region (stderr, region, 0);
1619 DEBUG_FUNCTION void
1620 debug_all_omp_regions (void)
1622 dump_omp_region (stderr, root_omp_region, 0);
1626 /* Create a new parallel region starting at STMT inside region PARENT. */
1628 static struct omp_region *
1629 new_omp_region (basic_block bb, enum gimple_code type,
1630 struct omp_region *parent)
1632 struct omp_region *region = XCNEW (struct omp_region);
1634 region->outer = parent;
1635 region->entry = bb;
1636 region->type = type;
1638 if (parent)
1640 /* This is a nested region. Add it to the list of inner
1641 regions in PARENT. */
1642 region->next = parent->inner;
1643 parent->inner = region;
1645 else
1647 /* This is a toplevel region. Add it to the list of toplevel
1648 regions in ROOT_OMP_REGION. */
1649 region->next = root_omp_region;
1650 root_omp_region = region;
1653 return region;
1656 /* Release the memory associated with the region tree rooted at REGION. */
1658 static void
1659 free_omp_region_1 (struct omp_region *region)
1661 struct omp_region *i, *n;
1663 for (i = region->inner; i ; i = n)
1665 n = i->next;
1666 free_omp_region_1 (i);
1669 free (region);
1672 /* Release the memory for the entire omp region tree. */
1674 void
1675 free_omp_regions (void)
1677 struct omp_region *r, *n;
1678 for (r = root_omp_region; r ; r = n)
1680 n = r->next;
1681 free_omp_region_1 (r);
1683 root_omp_region = NULL;
1687 /* Create a new context, with OUTER_CTX being the surrounding context. */
1689 static omp_context *
1690 new_omp_context (gimple *stmt, omp_context *outer_ctx)
1692 omp_context *ctx = XCNEW (omp_context);
1694 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1695 (splay_tree_value) ctx);
1696 ctx->stmt = stmt;
1698 if (outer_ctx)
1700 ctx->outer = outer_ctx;
1701 ctx->cb = outer_ctx->cb;
1702 ctx->cb.block = NULL;
1703 ctx->depth = outer_ctx->depth + 1;
1705 else
1707 ctx->cb.src_fn = current_function_decl;
1708 ctx->cb.dst_fn = current_function_decl;
1709 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1710 gcc_checking_assert (ctx->cb.src_node);
1711 ctx->cb.dst_node = ctx->cb.src_node;
1712 ctx->cb.src_cfun = cfun;
1713 ctx->cb.copy_decl = omp_copy_decl;
1714 ctx->cb.eh_lp_nr = 0;
1715 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1716 ctx->depth = 1;
1719 ctx->cb.decl_map = new hash_map<tree, tree>;
1721 return ctx;
1724 static gimple_seq maybe_catch_exception (gimple_seq);
1726 /* Finalize task copyfn. */
1728 static void
1729 finalize_task_copyfn (gomp_task *task_stmt)
1731 struct function *child_cfun;
1732 tree child_fn;
1733 gimple_seq seq = NULL, new_seq;
1734 gbind *bind;
1736 child_fn = gimple_omp_task_copy_fn (task_stmt);
1737 if (child_fn == NULL_TREE)
1738 return;
1740 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1741 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1743 push_cfun (child_cfun);
1744 bind = gimplify_body (child_fn, false);
1745 gimple_seq_add_stmt (&seq, bind);
1746 new_seq = maybe_catch_exception (seq);
1747 if (new_seq != seq)
1749 bind = gimple_build_bind (NULL, new_seq, NULL);
1750 seq = NULL;
1751 gimple_seq_add_stmt (&seq, bind);
1753 gimple_set_body (child_fn, seq);
1754 pop_cfun ();
1756 /* Inform the callgraph about the new function. */
1757 cgraph_node *node = cgraph_node::get_create (child_fn);
1758 node->parallelized_function = 1;
1759 cgraph_node::add_new_function (child_fn, false);
1762 /* Destroy a omp_context data structures. Called through the splay tree
1763 value delete callback. */
1765 static void
1766 delete_omp_context (splay_tree_value value)
1768 omp_context *ctx = (omp_context *) value;
1770 delete ctx->cb.decl_map;
1772 if (ctx->field_map)
1773 splay_tree_delete (ctx->field_map);
1774 if (ctx->sfield_map)
1775 splay_tree_delete (ctx->sfield_map);
1777 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1778 it produces corrupt debug information. */
1779 if (ctx->record_type)
1781 tree t;
1782 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1783 DECL_ABSTRACT_ORIGIN (t) = NULL;
1785 if (ctx->srecord_type)
1787 tree t;
1788 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1789 DECL_ABSTRACT_ORIGIN (t) = NULL;
1792 if (is_task_ctx (ctx))
1793 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1795 XDELETE (ctx);
1798 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1799 context. */
1801 static void
1802 fixup_child_record_type (omp_context *ctx)
1804 tree f, type = ctx->record_type;
1806 if (!ctx->receiver_decl)
1807 return;
1808 /* ??? It isn't sufficient to just call remap_type here, because
1809 variably_modified_type_p doesn't work the way we expect for
1810 record types. Testing each field for whether it needs remapping
1811 and creating a new record by hand works, however. */
1812 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1813 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1814 break;
1815 if (f)
1817 tree name, new_fields = NULL;
1819 type = lang_hooks.types.make_type (RECORD_TYPE);
1820 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1821 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1822 TYPE_DECL, name, type);
1823 TYPE_NAME (type) = name;
1825 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1827 tree new_f = copy_node (f);
1828 DECL_CONTEXT (new_f) = type;
1829 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1830 DECL_CHAIN (new_f) = new_fields;
1831 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1832 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1833 &ctx->cb, NULL);
1834 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1835 &ctx->cb, NULL);
1836 new_fields = new_f;
1838 /* Arrange to be able to look up the receiver field
1839 given the sender field. */
1840 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1841 (splay_tree_value) new_f);
1843 TYPE_FIELDS (type) = nreverse (new_fields);
1844 layout_type (type);
1847 /* In a target region we never modify any of the pointers in *.omp_data_i,
1848 so attempt to help the optimizers. */
1849 if (is_gimple_omp_offloaded (ctx->stmt))
1850 type = build_qualified_type (type, TYPE_QUAL_CONST);
1852 TREE_TYPE (ctx->receiver_decl)
1853 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1856 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1857 specified by CLAUSES. If BASE_POINTERS_RESTRICT, install var field with
1858 restrict. */
1860 static void
1861 scan_sharing_clauses (tree clauses, omp_context *ctx,
1862 bool base_pointers_restrict = false)
1864 tree c, decl;
1865 bool scan_array_reductions = false;
1867 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1869 bool by_ref;
1871 switch (OMP_CLAUSE_CODE (c))
1873 case OMP_CLAUSE_PRIVATE:
1874 decl = OMP_CLAUSE_DECL (c);
1875 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1876 goto do_private;
1877 else if (!is_variable_sized (decl))
1878 install_var_local (decl, ctx);
1879 break;
1881 case OMP_CLAUSE_SHARED:
1882 decl = OMP_CLAUSE_DECL (c);
1883 /* Ignore shared directives in teams construct. */
1884 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1886 /* Global variables don't need to be copied,
1887 the receiver side will use them directly. */
1888 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1889 if (is_global_var (odecl))
1890 break;
1891 insert_decl_map (&ctx->cb, decl, odecl);
1892 break;
1894 gcc_assert (is_taskreg_ctx (ctx));
1895 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1896 || !is_variable_sized (decl));
1897 /* Global variables don't need to be copied,
1898 the receiver side will use them directly. */
1899 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1900 break;
1901 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
1903 use_pointer_for_field (decl, ctx);
1904 break;
1906 by_ref = use_pointer_for_field (decl, NULL);
1907 if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
1908 || TREE_ADDRESSABLE (decl)
1909 || by_ref
1910 || is_reference (decl))
1912 by_ref = use_pointer_for_field (decl, ctx);
1913 install_var_field (decl, by_ref, 3, ctx);
1914 install_var_local (decl, ctx);
1915 break;
1917 /* We don't need to copy const scalar vars back. */
1918 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1919 goto do_private;
1921 case OMP_CLAUSE_REDUCTION:
1922 decl = OMP_CLAUSE_DECL (c);
1923 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1924 && TREE_CODE (decl) == MEM_REF)
1926 tree t = TREE_OPERAND (decl, 0);
1927 if (TREE_CODE (t) == POINTER_PLUS_EXPR)
1928 t = TREE_OPERAND (t, 0);
1929 if (TREE_CODE (t) == INDIRECT_REF
1930 || TREE_CODE (t) == ADDR_EXPR)
1931 t = TREE_OPERAND (t, 0);
1932 install_var_local (t, ctx);
1933 if (is_taskreg_ctx (ctx)
1934 && !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
1935 && !is_variable_sized (t))
1937 by_ref = use_pointer_for_field (t, ctx);
1938 install_var_field (t, by_ref, 3, ctx);
1940 break;
1942 goto do_private;
1944 case OMP_CLAUSE_LASTPRIVATE:
1945 /* Let the corresponding firstprivate clause create
1946 the variable. */
1947 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1948 break;
1949 /* FALLTHRU */
1951 case OMP_CLAUSE_FIRSTPRIVATE:
1952 case OMP_CLAUSE_LINEAR:
1953 decl = OMP_CLAUSE_DECL (c);
1954 do_private:
1955 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
1956 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
1957 && is_gimple_omp_offloaded (ctx->stmt))
1959 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
1960 install_var_field (decl, !is_reference (decl), 3, ctx);
1961 else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1962 install_var_field (decl, true, 3, ctx);
1963 else
1964 install_var_field (decl, false, 3, ctx);
1966 if (is_variable_sized (decl))
1968 if (is_task_ctx (ctx))
1969 install_var_field (decl, false, 1, ctx);
1970 break;
1972 else if (is_taskreg_ctx (ctx))
1974 bool global
1975 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1976 by_ref = use_pointer_for_field (decl, NULL);
1978 if (is_task_ctx (ctx)
1979 && (global || by_ref || is_reference (decl)))
1981 install_var_field (decl, false, 1, ctx);
1982 if (!global)
1983 install_var_field (decl, by_ref, 2, ctx);
1985 else if (!global)
1986 install_var_field (decl, by_ref, 3, ctx);
1988 install_var_local (decl, ctx);
1989 break;
1991 case OMP_CLAUSE_USE_DEVICE_PTR:
1992 decl = OMP_CLAUSE_DECL (c);
1993 if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1994 install_var_field (decl, true, 3, ctx);
1995 else
1996 install_var_field (decl, false, 3, ctx);
1997 if (DECL_SIZE (decl)
1998 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2000 tree decl2 = DECL_VALUE_EXPR (decl);
2001 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2002 decl2 = TREE_OPERAND (decl2, 0);
2003 gcc_assert (DECL_P (decl2));
2004 install_var_local (decl2, ctx);
2006 install_var_local (decl, ctx);
2007 break;
2009 case OMP_CLAUSE_IS_DEVICE_PTR:
2010 decl = OMP_CLAUSE_DECL (c);
2011 goto do_private;
2013 case OMP_CLAUSE__LOOPTEMP_:
2014 gcc_assert (is_taskreg_ctx (ctx));
2015 decl = OMP_CLAUSE_DECL (c);
2016 install_var_field (decl, false, 3, ctx);
2017 install_var_local (decl, ctx);
2018 break;
2020 case OMP_CLAUSE_COPYPRIVATE:
2021 case OMP_CLAUSE_COPYIN:
2022 decl = OMP_CLAUSE_DECL (c);
2023 by_ref = use_pointer_for_field (decl, NULL);
2024 install_var_field (decl, by_ref, 3, ctx);
2025 break;
2027 case OMP_CLAUSE_DEFAULT:
2028 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
2029 break;
2031 case OMP_CLAUSE_FINAL:
2032 case OMP_CLAUSE_IF:
2033 case OMP_CLAUSE_NUM_THREADS:
2034 case OMP_CLAUSE_NUM_TEAMS:
2035 case OMP_CLAUSE_THREAD_LIMIT:
2036 case OMP_CLAUSE_DEVICE:
2037 case OMP_CLAUSE_SCHEDULE:
2038 case OMP_CLAUSE_DIST_SCHEDULE:
2039 case OMP_CLAUSE_DEPEND:
2040 case OMP_CLAUSE_PRIORITY:
2041 case OMP_CLAUSE_GRAINSIZE:
2042 case OMP_CLAUSE_NUM_TASKS:
2043 case OMP_CLAUSE__CILK_FOR_COUNT_:
2044 case OMP_CLAUSE_NUM_GANGS:
2045 case OMP_CLAUSE_NUM_WORKERS:
2046 case OMP_CLAUSE_VECTOR_LENGTH:
2047 if (ctx->outer)
2048 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
2049 break;
2051 case OMP_CLAUSE_TO:
2052 case OMP_CLAUSE_FROM:
2053 case OMP_CLAUSE_MAP:
2054 if (ctx->outer)
2055 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
2056 decl = OMP_CLAUSE_DECL (c);
2057 /* Global variables with "omp declare target" attribute
2058 don't need to be copied, the receiver side will use them
2059 directly. However, global variables with "omp declare target link"
2060 attribute need to be copied. */
2061 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2062 && DECL_P (decl)
2063 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
2064 && (OMP_CLAUSE_MAP_KIND (c)
2065 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
2066 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2067 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
2068 && varpool_node::get_create (decl)->offloadable
2069 && !lookup_attribute ("omp declare target link",
2070 DECL_ATTRIBUTES (decl)))
2071 break;
2072 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2073 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
2075 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
2076 not offloaded; there is nothing to map for those. */
2077 if (!is_gimple_omp_offloaded (ctx->stmt)
2078 && !POINTER_TYPE_P (TREE_TYPE (decl))
2079 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
2080 break;
2082 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2083 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
2084 || (OMP_CLAUSE_MAP_KIND (c)
2085 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
2087 if (TREE_CODE (decl) == COMPONENT_REF
2088 || (TREE_CODE (decl) == INDIRECT_REF
2089 && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
2090 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
2091 == REFERENCE_TYPE)))
2092 break;
2093 if (DECL_SIZE (decl)
2094 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2096 tree decl2 = DECL_VALUE_EXPR (decl);
2097 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2098 decl2 = TREE_OPERAND (decl2, 0);
2099 gcc_assert (DECL_P (decl2));
2100 install_var_local (decl2, ctx);
2102 install_var_local (decl, ctx);
2103 break;
2105 if (DECL_P (decl))
2107 if (DECL_SIZE (decl)
2108 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2110 tree decl2 = DECL_VALUE_EXPR (decl);
2111 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2112 decl2 = TREE_OPERAND (decl2, 0);
2113 gcc_assert (DECL_P (decl2));
2114 install_var_field (decl2, true, 3, ctx);
2115 install_var_local (decl2, ctx);
2116 install_var_local (decl, ctx);
2118 else
2120 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2121 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
2122 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
2123 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2124 install_var_field (decl, true, 7, ctx);
2125 else
2126 install_var_field (decl, true, 3, ctx,
2127 base_pointers_restrict);
2128 if (is_gimple_omp_offloaded (ctx->stmt)
2129 && !OMP_CLAUSE_MAP_IN_REDUCTION (c))
2130 install_var_local (decl, ctx);
2133 else
2135 tree base = get_base_address (decl);
2136 tree nc = OMP_CLAUSE_CHAIN (c);
2137 if (DECL_P (base)
2138 && nc != NULL_TREE
2139 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
2140 && OMP_CLAUSE_DECL (nc) == base
2141 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
2142 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
2144 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
2145 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
2147 else
2149 if (ctx->outer)
2151 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
2152 decl = OMP_CLAUSE_DECL (c);
2154 gcc_assert (!splay_tree_lookup (ctx->field_map,
2155 (splay_tree_key) decl));
2156 tree field
2157 = build_decl (OMP_CLAUSE_LOCATION (c),
2158 FIELD_DECL, NULL_TREE, ptr_type_node);
2159 SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
2160 insert_field_into_struct (ctx->record_type, field);
2161 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
2162 (splay_tree_value) field);
2165 break;
2167 case OMP_CLAUSE__GRIDDIM_:
2168 if (ctx->outer)
2170 scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
2171 scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
2173 break;
2175 case OMP_CLAUSE_NOWAIT:
2176 case OMP_CLAUSE_ORDERED:
2177 case OMP_CLAUSE_COLLAPSE:
2178 case OMP_CLAUSE_UNTIED:
2179 case OMP_CLAUSE_MERGEABLE:
2180 case OMP_CLAUSE_PROC_BIND:
2181 case OMP_CLAUSE_SAFELEN:
2182 case OMP_CLAUSE_SIMDLEN:
2183 case OMP_CLAUSE_THREADS:
2184 case OMP_CLAUSE_SIMD:
2185 case OMP_CLAUSE_NOGROUP:
2186 case OMP_CLAUSE_DEFAULTMAP:
2187 case OMP_CLAUSE_ASYNC:
2188 case OMP_CLAUSE_WAIT:
2189 case OMP_CLAUSE_GANG:
2190 case OMP_CLAUSE_WORKER:
2191 case OMP_CLAUSE_VECTOR:
2192 case OMP_CLAUSE_INDEPENDENT:
2193 case OMP_CLAUSE_AUTO:
2194 case OMP_CLAUSE_SEQ:
2195 break;
2197 case OMP_CLAUSE_ALIGNED:
2198 decl = OMP_CLAUSE_DECL (c);
2199 if (is_global_var (decl)
2200 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2201 install_var_local (decl, ctx);
2202 break;
2204 case OMP_CLAUSE_TILE:
2205 case OMP_CLAUSE__CACHE_:
2206 default:
2207 gcc_unreachable ();
2211 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2213 switch (OMP_CLAUSE_CODE (c))
2215 case OMP_CLAUSE_LASTPRIVATE:
2216 /* Let the corresponding firstprivate clause create
2217 the variable. */
2218 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2219 scan_array_reductions = true;
2220 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2221 break;
2222 /* FALLTHRU */
2224 case OMP_CLAUSE_FIRSTPRIVATE:
2225 case OMP_CLAUSE_PRIVATE:
2226 case OMP_CLAUSE_LINEAR:
2227 case OMP_CLAUSE_IS_DEVICE_PTR:
2228 decl = OMP_CLAUSE_DECL (c);
2229 if (is_variable_sized (decl))
2231 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
2232 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
2233 && is_gimple_omp_offloaded (ctx->stmt))
2235 tree decl2 = DECL_VALUE_EXPR (decl);
2236 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2237 decl2 = TREE_OPERAND (decl2, 0);
2238 gcc_assert (DECL_P (decl2));
2239 install_var_local (decl2, ctx);
2240 fixup_remapped_decl (decl2, ctx, false);
2242 install_var_local (decl, ctx);
2244 fixup_remapped_decl (decl, ctx,
2245 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
2246 && OMP_CLAUSE_PRIVATE_DEBUG (c));
2247 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2248 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2249 scan_array_reductions = true;
2250 break;
2252 case OMP_CLAUSE_REDUCTION:
2253 decl = OMP_CLAUSE_DECL (c);
2254 if (TREE_CODE (decl) != MEM_REF)
2256 if (is_variable_sized (decl))
2257 install_var_local (decl, ctx);
2258 fixup_remapped_decl (decl, ctx, false);
2260 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2261 scan_array_reductions = true;
2262 break;
2264 case OMP_CLAUSE_SHARED:
2265 /* Ignore shared directives in teams construct. */
2266 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2267 break;
2268 decl = OMP_CLAUSE_DECL (c);
2269 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2270 break;
2271 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
2273 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
2274 ctx->outer)))
2275 break;
2276 bool by_ref = use_pointer_for_field (decl, ctx);
2277 install_var_field (decl, by_ref, 11, ctx);
2278 break;
2280 fixup_remapped_decl (decl, ctx, false);
2281 break;
2283 case OMP_CLAUSE_MAP:
2284 if (!is_gimple_omp_offloaded (ctx->stmt))
2285 break;
2286 decl = OMP_CLAUSE_DECL (c);
2287 if (DECL_P (decl)
2288 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
2289 && (OMP_CLAUSE_MAP_KIND (c)
2290 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
2291 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2292 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
2293 && varpool_node::get_create (decl)->offloadable)
2294 break;
2295 if (DECL_P (decl))
2297 if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
2298 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
2299 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
2300 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
2302 tree new_decl = lookup_decl (decl, ctx);
2303 TREE_TYPE (new_decl)
2304 = remap_type (TREE_TYPE (decl), &ctx->cb);
2306 else if (DECL_SIZE (decl)
2307 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2309 tree decl2 = DECL_VALUE_EXPR (decl);
2310 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2311 decl2 = TREE_OPERAND (decl2, 0);
2312 gcc_assert (DECL_P (decl2));
2313 fixup_remapped_decl (decl2, ctx, false);
2314 fixup_remapped_decl (decl, ctx, true);
2316 else
2317 fixup_remapped_decl (decl, ctx, false);
2319 break;
2321 case OMP_CLAUSE_COPYPRIVATE:
2322 case OMP_CLAUSE_COPYIN:
2323 case OMP_CLAUSE_DEFAULT:
2324 case OMP_CLAUSE_IF:
2325 case OMP_CLAUSE_NUM_THREADS:
2326 case OMP_CLAUSE_NUM_TEAMS:
2327 case OMP_CLAUSE_THREAD_LIMIT:
2328 case OMP_CLAUSE_DEVICE:
2329 case OMP_CLAUSE_SCHEDULE:
2330 case OMP_CLAUSE_DIST_SCHEDULE:
2331 case OMP_CLAUSE_NOWAIT:
2332 case OMP_CLAUSE_ORDERED:
2333 case OMP_CLAUSE_COLLAPSE:
2334 case OMP_CLAUSE_UNTIED:
2335 case OMP_CLAUSE_FINAL:
2336 case OMP_CLAUSE_MERGEABLE:
2337 case OMP_CLAUSE_PROC_BIND:
2338 case OMP_CLAUSE_SAFELEN:
2339 case OMP_CLAUSE_SIMDLEN:
2340 case OMP_CLAUSE_ALIGNED:
2341 case OMP_CLAUSE_DEPEND:
2342 case OMP_CLAUSE__LOOPTEMP_:
2343 case OMP_CLAUSE_TO:
2344 case OMP_CLAUSE_FROM:
2345 case OMP_CLAUSE_PRIORITY:
2346 case OMP_CLAUSE_GRAINSIZE:
2347 case OMP_CLAUSE_NUM_TASKS:
2348 case OMP_CLAUSE_THREADS:
2349 case OMP_CLAUSE_SIMD:
2350 case OMP_CLAUSE_NOGROUP:
2351 case OMP_CLAUSE_DEFAULTMAP:
2352 case OMP_CLAUSE_USE_DEVICE_PTR:
2353 case OMP_CLAUSE__CILK_FOR_COUNT_:
2354 case OMP_CLAUSE_ASYNC:
2355 case OMP_CLAUSE_WAIT:
2356 case OMP_CLAUSE_NUM_GANGS:
2357 case OMP_CLAUSE_NUM_WORKERS:
2358 case OMP_CLAUSE_VECTOR_LENGTH:
2359 case OMP_CLAUSE_GANG:
2360 case OMP_CLAUSE_WORKER:
2361 case OMP_CLAUSE_VECTOR:
2362 case OMP_CLAUSE_INDEPENDENT:
2363 case OMP_CLAUSE_AUTO:
2364 case OMP_CLAUSE_SEQ:
2365 case OMP_CLAUSE__GRIDDIM_:
2366 break;
2368 case OMP_CLAUSE_TILE:
2369 case OMP_CLAUSE__CACHE_:
2370 default:
2371 gcc_unreachable ();
2375 gcc_checking_assert (!scan_array_reductions
2376 || !is_gimple_omp_oacc (ctx->stmt));
2377 if (scan_array_reductions)
2379 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2380 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2381 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2383 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2384 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2386 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2387 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2388 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2389 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2390 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2391 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2395 /* Create a new name for omp child function. Returns an identifier. If
2396 IS_CILK_FOR is true then the suffix for the child function is
2397 "_cilk_for_fn." */
2399 static tree
2400 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2402 if (is_cilk_for)
2403 return clone_function_name (current_function_decl, "_cilk_for_fn");
2404 return clone_function_name (current_function_decl,
2405 task_copy ? "_omp_cpyfn" : "_omp_fn");
2408 /* Returns the type of the induction variable for the child function for
2409 _Cilk_for and the types for _high and _low variables based on TYPE. */
2411 static tree
2412 cilk_for_check_loop_diff_type (tree type)
2414 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2416 if (TYPE_UNSIGNED (type))
2417 return uint32_type_node;
2418 else
2419 return integer_type_node;
2421 else
2423 if (TYPE_UNSIGNED (type))
2424 return uint64_type_node;
2425 else
2426 return long_long_integer_type_node;
2430 /* Build a decl for the omp child function. It'll not contain a body
2431 yet, just the bare decl. */
2433 static void
2434 create_omp_child_function (omp_context *ctx, bool task_copy)
2436 tree decl, type, name, t;
2438 tree cilk_for_count
2439 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2440 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2441 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2442 tree cilk_var_type = NULL_TREE;
2444 name = create_omp_child_function_name (task_copy,
2445 cilk_for_count != NULL_TREE);
2446 if (task_copy)
2447 type = build_function_type_list (void_type_node, ptr_type_node,
2448 ptr_type_node, NULL_TREE);
2449 else if (cilk_for_count)
2451 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2452 cilk_var_type = cilk_for_check_loop_diff_type (type);
2453 type = build_function_type_list (void_type_node, ptr_type_node,
2454 cilk_var_type, cilk_var_type, NULL_TREE);
2456 else
2457 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2459 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2461 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2462 || !task_copy);
2463 if (!task_copy)
2464 ctx->cb.dst_fn = decl;
2465 else
2466 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2468 TREE_STATIC (decl) = 1;
2469 TREE_USED (decl) = 1;
2470 DECL_ARTIFICIAL (decl) = 1;
2471 DECL_IGNORED_P (decl) = 0;
2472 TREE_PUBLIC (decl) = 0;
2473 DECL_UNINLINABLE (decl) = 1;
2474 DECL_EXTERNAL (decl) = 0;
2475 DECL_CONTEXT (decl) = NULL_TREE;
2476 DECL_INITIAL (decl) = make_node (BLOCK);
2477 BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
2478 if (cgraph_node::get (current_function_decl)->offloadable)
2479 cgraph_node::get_create (decl)->offloadable = 1;
2480 else
2482 omp_context *octx;
2483 for (octx = ctx; octx; octx = octx->outer)
2484 if (is_gimple_omp_offloaded (octx->stmt))
2486 cgraph_node::get_create (decl)->offloadable = 1;
2487 if (ENABLE_OFFLOADING)
2488 g->have_offload = true;
2490 break;
2494 if (cgraph_node::get_create (decl)->offloadable
2495 && !lookup_attribute ("omp declare target",
2496 DECL_ATTRIBUTES (current_function_decl)))
2497 DECL_ATTRIBUTES (decl)
2498 = tree_cons (get_identifier ("omp target entrypoint"),
2499 NULL_TREE, DECL_ATTRIBUTES (decl));
2501 t = build_decl (DECL_SOURCE_LOCATION (decl),
2502 RESULT_DECL, NULL_TREE, void_type_node);
2503 DECL_ARTIFICIAL (t) = 1;
2504 DECL_IGNORED_P (t) = 1;
2505 DECL_CONTEXT (t) = decl;
2506 DECL_RESULT (decl) = t;
2508 /* _Cilk_for's child function requires two extra parameters called
2509 __low and __high that are set the by Cilk runtime when it calls this
2510 function. */
2511 if (cilk_for_count)
2513 t = build_decl (DECL_SOURCE_LOCATION (decl),
2514 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2515 DECL_ARTIFICIAL (t) = 1;
2516 DECL_NAMELESS (t) = 1;
2517 DECL_ARG_TYPE (t) = ptr_type_node;
2518 DECL_CONTEXT (t) = current_function_decl;
2519 TREE_USED (t) = 1;
2520 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2521 DECL_ARGUMENTS (decl) = t;
2523 t = build_decl (DECL_SOURCE_LOCATION (decl),
2524 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2525 DECL_ARTIFICIAL (t) = 1;
2526 DECL_NAMELESS (t) = 1;
2527 DECL_ARG_TYPE (t) = ptr_type_node;
2528 DECL_CONTEXT (t) = current_function_decl;
2529 TREE_USED (t) = 1;
2530 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2531 DECL_ARGUMENTS (decl) = t;
2534 tree data_name = get_identifier (".omp_data_i");
2535 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2536 ptr_type_node);
2537 DECL_ARTIFICIAL (t) = 1;
2538 DECL_NAMELESS (t) = 1;
2539 DECL_ARG_TYPE (t) = ptr_type_node;
2540 DECL_CONTEXT (t) = current_function_decl;
2541 TREE_USED (t) = 1;
2542 TREE_READONLY (t) = 1;
2543 if (cilk_for_count)
2544 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2545 DECL_ARGUMENTS (decl) = t;
2546 if (!task_copy)
2547 ctx->receiver_decl = t;
2548 else
2550 t = build_decl (DECL_SOURCE_LOCATION (decl),
2551 PARM_DECL, get_identifier (".omp_data_o"),
2552 ptr_type_node);
2553 DECL_ARTIFICIAL (t) = 1;
2554 DECL_NAMELESS (t) = 1;
2555 DECL_ARG_TYPE (t) = ptr_type_node;
2556 DECL_CONTEXT (t) = current_function_decl;
2557 TREE_USED (t) = 1;
2558 TREE_ADDRESSABLE (t) = 1;
2559 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2560 DECL_ARGUMENTS (decl) = t;
2563 /* Allocate memory for the function structure. The call to
2564 allocate_struct_function clobbers CFUN, so we need to restore
2565 it afterward. */
2566 push_struct_function (decl);
2567 cfun->function_end_locus = gimple_location (ctx->stmt);
2568 init_tree_ssa (cfun);
2569 pop_cfun ();
2572 /* Callback for walk_gimple_seq. Check if combined parallel
2573 contains gimple_omp_for_combined_into_p OMP_FOR. */
2575 static tree
2576 find_combined_for (gimple_stmt_iterator *gsi_p,
2577 bool *handled_ops_p,
2578 struct walk_stmt_info *wi)
2580 gimple *stmt = gsi_stmt (*gsi_p);
2582 *handled_ops_p = true;
2583 switch (gimple_code (stmt))
2585 WALK_SUBSTMTS;
2587 case GIMPLE_OMP_FOR:
2588 if (gimple_omp_for_combined_into_p (stmt)
2589 && gimple_omp_for_kind (stmt)
2590 == *(const enum gf_mask *) (wi->info))
2592 wi->info = stmt;
2593 return integer_zero_node;
2595 break;
2596 default:
2597 break;
2599 return NULL;
2602 /* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
2604 static void
2605 add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
2606 omp_context *outer_ctx)
2608 struct walk_stmt_info wi;
2610 memset (&wi, 0, sizeof (wi));
2611 wi.val_only = true;
2612 wi.info = (void *) &msk;
2613 walk_gimple_seq (gimple_omp_body (stmt), find_combined_for, NULL, &wi);
2614 if (wi.info != (void *) &msk)
2616 gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
2617 struct omp_for_data fd;
2618 extract_omp_for_data (for_stmt, &fd, NULL);
2619 /* We need two temporaries with fd.loop.v type (istart/iend)
2620 and then (fd.collapse - 1) temporaries with the same
2621 type for count2 ... countN-1 vars if not constant. */
2622 size_t count = 2, i;
2623 tree type = fd.iter_type;
2624 if (fd.collapse > 1
2625 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2627 count += fd.collapse - 1;
2628 /* If there are lastprivate clauses on the inner
2629 GIMPLE_OMP_FOR, add one more temporaries for the total number
2630 of iterations (product of count1 ... countN-1). */
2631 if (find_omp_clause (gimple_omp_for_clauses (for_stmt),
2632 OMP_CLAUSE_LASTPRIVATE))
2633 count++;
2634 else if (msk == GF_OMP_FOR_KIND_FOR
2635 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2636 OMP_CLAUSE_LASTPRIVATE))
2637 count++;
2639 for (i = 0; i < count; i++)
2641 tree temp = create_tmp_var (type);
2642 tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
2643 insert_decl_map (&outer_ctx->cb, temp, temp);
2644 OMP_CLAUSE_DECL (c) = temp;
2645 OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
2646 gimple_omp_taskreg_set_clauses (stmt, c);
2651 /* Scan an OpenMP parallel directive. */
2653 static void
2654 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2656 omp_context *ctx;
2657 tree name;
2658 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2660 /* Ignore parallel directives with empty bodies, unless there
2661 are copyin clauses. */
2662 if (optimize > 0
2663 && empty_body_p (gimple_omp_body (stmt))
2664 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2665 OMP_CLAUSE_COPYIN) == NULL)
2667 gsi_replace (gsi, gimple_build_nop (), false);
2668 return;
2671 if (gimple_omp_parallel_combined_p (stmt))
2672 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
2674 ctx = new_omp_context (stmt, outer_ctx);
2675 taskreg_contexts.safe_push (ctx);
2676 if (taskreg_nesting_level > 1)
2677 ctx->is_nested = true;
2678 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2679 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2680 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2681 name = create_tmp_var_name (".omp_data_s");
2682 name = build_decl (gimple_location (stmt),
2683 TYPE_DECL, name, ctx->record_type);
2684 DECL_ARTIFICIAL (name) = 1;
2685 DECL_NAMELESS (name) = 1;
2686 TYPE_NAME (ctx->record_type) = name;
2687 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2688 if (!gimple_omp_parallel_grid_phony (stmt))
2690 create_omp_child_function (ctx, false);
2691 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2694 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2695 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2697 if (TYPE_FIELDS (ctx->record_type) == NULL)
2698 ctx->record_type = ctx->receiver_decl = NULL;
2701 /* Scan an OpenMP task directive. */
2703 static void
2704 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2706 omp_context *ctx;
2707 tree name, t;
2708 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2710 /* Ignore task directives with empty bodies. */
2711 if (optimize > 0
2712 && empty_body_p (gimple_omp_body (stmt)))
2714 gsi_replace (gsi, gimple_build_nop (), false);
2715 return;
2718 if (gimple_omp_task_taskloop_p (stmt))
2719 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
2721 ctx = new_omp_context (stmt, outer_ctx);
2722 taskreg_contexts.safe_push (ctx);
2723 if (taskreg_nesting_level > 1)
2724 ctx->is_nested = true;
2725 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2726 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2727 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2728 name = create_tmp_var_name (".omp_data_s");
2729 name = build_decl (gimple_location (stmt),
2730 TYPE_DECL, name, ctx->record_type);
2731 DECL_ARTIFICIAL (name) = 1;
2732 DECL_NAMELESS (name) = 1;
2733 TYPE_NAME (ctx->record_type) = name;
2734 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2735 create_omp_child_function (ctx, false);
2736 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2738 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2740 if (ctx->srecord_type)
2742 name = create_tmp_var_name (".omp_data_a");
2743 name = build_decl (gimple_location (stmt),
2744 TYPE_DECL, name, ctx->srecord_type);
2745 DECL_ARTIFICIAL (name) = 1;
2746 DECL_NAMELESS (name) = 1;
2747 TYPE_NAME (ctx->srecord_type) = name;
2748 TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
2749 create_omp_child_function (ctx, true);
2752 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2754 if (TYPE_FIELDS (ctx->record_type) == NULL)
2756 ctx->record_type = ctx->receiver_decl = NULL;
2757 t = build_int_cst (long_integer_type_node, 0);
2758 gimple_omp_task_set_arg_size (stmt, t);
2759 t = build_int_cst (long_integer_type_node, 1);
2760 gimple_omp_task_set_arg_align (stmt, t);
2765 /* If any decls have been made addressable during scan_omp,
2766 adjust their fields if needed, and layout record types
2767 of parallel/task constructs. */
2769 static void
2770 finish_taskreg_scan (omp_context *ctx)
2772 if (ctx->record_type == NULL_TREE)
2773 return;
2775 /* If any task_shared_vars were needed, verify all
2776 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2777 statements if use_pointer_for_field hasn't changed
2778 because of that. If it did, update field types now. */
2779 if (task_shared_vars)
2781 tree c;
2783 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2784 c; c = OMP_CLAUSE_CHAIN (c))
2785 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
2786 && !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
2788 tree decl = OMP_CLAUSE_DECL (c);
2790 /* Global variables don't need to be copied,
2791 the receiver side will use them directly. */
2792 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2793 continue;
2794 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2795 || !use_pointer_for_field (decl, ctx))
2796 continue;
2797 tree field = lookup_field (decl, ctx);
2798 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2799 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2800 continue;
2801 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2802 TREE_THIS_VOLATILE (field) = 0;
2803 DECL_USER_ALIGN (field) = 0;
2804 SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
2805 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2806 SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
2807 if (ctx->srecord_type)
2809 tree sfield = lookup_sfield (decl, ctx);
2810 TREE_TYPE (sfield) = TREE_TYPE (field);
2811 TREE_THIS_VOLATILE (sfield) = 0;
2812 DECL_USER_ALIGN (sfield) = 0;
2813 SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
2814 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2815 SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
2820 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2822 layout_type (ctx->record_type);
2823 fixup_child_record_type (ctx);
2825 else
2827 location_t loc = gimple_location (ctx->stmt);
2828 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2829 /* Move VLA fields to the end. */
2830 p = &TYPE_FIELDS (ctx->record_type);
2831 while (*p)
2832 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2833 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2835 *q = *p;
2836 *p = TREE_CHAIN (*p);
2837 TREE_CHAIN (*q) = NULL_TREE;
2838 q = &TREE_CHAIN (*q);
2840 else
2841 p = &DECL_CHAIN (*p);
2842 *p = vla_fields;
2843 if (gimple_omp_task_taskloop_p (ctx->stmt))
2845 /* Move fields corresponding to first and second _looptemp_
2846 clause first. There are filled by GOMP_taskloop
2847 and thus need to be in specific positions. */
2848 tree c1 = gimple_omp_task_clauses (ctx->stmt);
2849 c1 = find_omp_clause (c1, OMP_CLAUSE__LOOPTEMP_);
2850 tree c2 = find_omp_clause (OMP_CLAUSE_CHAIN (c1),
2851 OMP_CLAUSE__LOOPTEMP_);
2852 tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
2853 tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
2854 p = &TYPE_FIELDS (ctx->record_type);
2855 while (*p)
2856 if (*p == f1 || *p == f2)
2857 *p = DECL_CHAIN (*p);
2858 else
2859 p = &DECL_CHAIN (*p);
2860 DECL_CHAIN (f1) = f2;
2861 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
2862 TYPE_FIELDS (ctx->record_type) = f1;
2863 if (ctx->srecord_type)
2865 f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
2866 f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
2867 p = &TYPE_FIELDS (ctx->srecord_type);
2868 while (*p)
2869 if (*p == f1 || *p == f2)
2870 *p = DECL_CHAIN (*p);
2871 else
2872 p = &DECL_CHAIN (*p);
2873 DECL_CHAIN (f1) = f2;
2874 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
2875 TYPE_FIELDS (ctx->srecord_type) = f1;
2878 layout_type (ctx->record_type);
2879 fixup_child_record_type (ctx);
2880 if (ctx->srecord_type)
2881 layout_type (ctx->srecord_type);
2882 tree t = fold_convert_loc (loc, long_integer_type_node,
2883 TYPE_SIZE_UNIT (ctx->record_type));
2884 gimple_omp_task_set_arg_size (ctx->stmt, t);
2885 t = build_int_cst (long_integer_type_node,
2886 TYPE_ALIGN_UNIT (ctx->record_type));
2887 gimple_omp_task_set_arg_align (ctx->stmt, t);
2891 /* Find the enclosing offload context. */
2893 static omp_context *
2894 enclosing_target_ctx (omp_context *ctx)
2896 for (; ctx; ctx = ctx->outer)
2897 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
2898 break;
2900 return ctx;
2903 /* Return true if ctx is part of an oacc kernels region. */
2905 static bool
2906 ctx_in_oacc_kernels_region (omp_context *ctx)
2908 for (;ctx != NULL; ctx = ctx->outer)
2910 gimple *stmt = ctx->stmt;
2911 if (gimple_code (stmt) == GIMPLE_OMP_TARGET
2912 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
2913 return true;
2916 return false;
2919 /* Check the parallelism clauses inside a kernels regions.
2920 Until kernels handling moves to use the same loop indirection
2921 scheme as parallel, we need to do this checking early. */
2923 static unsigned
2924 check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
2926 bool checking = true;
2927 unsigned outer_mask = 0;
2928 unsigned this_mask = 0;
2929 bool has_seq = false, has_auto = false;
2931 if (ctx->outer)
2932 outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
2933 if (!stmt)
2935 checking = false;
2936 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
2937 return outer_mask;
2938 stmt = as_a <gomp_for *> (ctx->stmt);
2941 for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
2943 switch (OMP_CLAUSE_CODE (c))
2945 case OMP_CLAUSE_GANG:
2946 this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
2947 break;
2948 case OMP_CLAUSE_WORKER:
2949 this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
2950 break;
2951 case OMP_CLAUSE_VECTOR:
2952 this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
2953 break;
2954 case OMP_CLAUSE_SEQ:
2955 has_seq = true;
2956 break;
2957 case OMP_CLAUSE_AUTO:
2958 has_auto = true;
2959 break;
2960 default:
2961 break;
2965 if (checking)
2967 if (has_seq && (this_mask || has_auto))
2968 error_at (gimple_location (stmt), "%<seq%> overrides other"
2969 " OpenACC loop specifiers");
2970 else if (has_auto && this_mask)
2971 error_at (gimple_location (stmt), "%<auto%> conflicts with other"
2972 " OpenACC loop specifiers");
2974 if (this_mask & outer_mask)
2975 error_at (gimple_location (stmt), "inner loop uses same"
2976 " OpenACC parallelism as containing loop");
2979 return outer_mask | this_mask;
2982 /* Scan a GIMPLE_OMP_FOR. */
2984 static void
2985 scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2987 omp_context *ctx;
2988 size_t i;
2989 tree clauses = gimple_omp_for_clauses (stmt);
2991 ctx = new_omp_context (stmt, outer_ctx);
2993 if (is_gimple_omp_oacc (stmt))
2995 omp_context *tgt = enclosing_target_ctx (outer_ctx);
2997 if (!tgt || is_oacc_parallel (tgt))
2998 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
3000 char const *check = NULL;
3002 switch (OMP_CLAUSE_CODE (c))
3004 case OMP_CLAUSE_GANG:
3005 check = "gang";
3006 break;
3008 case OMP_CLAUSE_WORKER:
3009 check = "worker";
3010 break;
3012 case OMP_CLAUSE_VECTOR:
3013 check = "vector";
3014 break;
3016 default:
3017 break;
3020 if (check && OMP_CLAUSE_OPERAND (c, 0))
3021 error_at (gimple_location (stmt),
3022 "argument not permitted on %qs clause in"
3023 " OpenACC %<parallel%>", check);
3026 if (tgt && is_oacc_kernels (tgt))
3028 /* Strip out reductions, as they are not handled yet. */
3029 tree *prev_ptr = &clauses;
3031 while (tree probe = *prev_ptr)
3033 tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
3035 if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
3036 *prev_ptr = *next_ptr;
3037 else
3038 prev_ptr = next_ptr;
3041 gimple_omp_for_set_clauses (stmt, clauses);
3042 check_oacc_kernel_gwv (stmt, ctx);
3046 scan_sharing_clauses (clauses, ctx);
3048 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
3049 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
3051 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
3052 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
3053 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
3054 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
3056 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3059 /* Scan an OpenMP sections directive. */
3061 static void
3062 scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
3064 omp_context *ctx;
3066 ctx = new_omp_context (stmt, outer_ctx);
3067 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
3068 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3071 /* Scan an OpenMP single directive. */
3073 static void
3074 scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
3076 omp_context *ctx;
3077 tree name;
3079 ctx = new_omp_context (stmt, outer_ctx);
3080 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3081 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
3082 name = create_tmp_var_name (".omp_copy_s");
3083 name = build_decl (gimple_location (stmt),
3084 TYPE_DECL, name, ctx->record_type);
3085 TYPE_NAME (ctx->record_type) = name;
3087 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
3088 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3090 if (TYPE_FIELDS (ctx->record_type) == NULL)
3091 ctx->record_type = NULL;
3092 else
3093 layout_type (ctx->record_type);
3096 /* Return true if the CLAUSES of an omp target guarantee that the base pointers
3097 used in the corresponding offloaded function are restrict. */
3099 static bool
3100 omp_target_base_pointers_restrict_p (tree clauses)
3102 /* The analysis relies on the GOMP_MAP_FORCE_* mapping kinds, which are only
3103 used by OpenACC. */
3104 if (flag_openacc == 0)
3105 return false;
3107 /* I. Basic example:
3109 void foo (void)
3111 unsigned int a[2], b[2];
3113 #pragma acc kernels \
3114 copyout (a) \
3115 copyout (b)
3117 a[0] = 0;
3118 b[0] = 1;
3122 After gimplification, we have:
3124 #pragma omp target oacc_kernels \
3125 map(force_from:a [len: 8]) \
3126 map(force_from:b [len: 8])
3128 a[0] = 0;
3129 b[0] = 1;
3132 Because both mappings have the force prefix, we know that they will be
3133 allocated when calling the corresponding offloaded function, which means we
3134 can mark the base pointers for a and b in the offloaded function as
3135 restrict. */
3137 tree c;
3138 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
3140 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
3141 return false;
3143 switch (OMP_CLAUSE_MAP_KIND (c))
3145 case GOMP_MAP_FORCE_ALLOC:
3146 case GOMP_MAP_FORCE_TO:
3147 case GOMP_MAP_FORCE_FROM:
3148 case GOMP_MAP_FORCE_TOFROM:
3149 break;
3150 default:
3151 return false;
3155 return true;
3158 /* Scan a GIMPLE_OMP_TARGET. */
3160 static void
3161 scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
3163 omp_context *ctx;
3164 tree name;
3165 bool offloaded = is_gimple_omp_offloaded (stmt);
3166 tree clauses = gimple_omp_target_clauses (stmt);
3168 ctx = new_omp_context (stmt, outer_ctx);
3169 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3170 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
3171 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
3172 name = create_tmp_var_name (".omp_data_t");
3173 name = build_decl (gimple_location (stmt),
3174 TYPE_DECL, name, ctx->record_type);
3175 DECL_ARTIFICIAL (name) = 1;
3176 DECL_NAMELESS (name) = 1;
3177 TYPE_NAME (ctx->record_type) = name;
3178 TYPE_ARTIFICIAL (ctx->record_type) = 1;
3180 bool base_pointers_restrict = false;
3181 if (offloaded)
3183 create_omp_child_function (ctx, false);
3184 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
3186 base_pointers_restrict = omp_target_base_pointers_restrict_p (clauses);
3187 if (base_pointers_restrict
3188 && dump_file && (dump_flags & TDF_DETAILS))
3189 fprintf (dump_file,
3190 "Base pointers in offloaded function are restrict\n");
3193 scan_sharing_clauses (clauses, ctx, base_pointers_restrict);
3194 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3196 if (TYPE_FIELDS (ctx->record_type) == NULL)
3197 ctx->record_type = ctx->receiver_decl = NULL;
3198 else
3200 TYPE_FIELDS (ctx->record_type)
3201 = nreverse (TYPE_FIELDS (ctx->record_type));
3202 if (flag_checking)
3204 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
3205 for (tree field = TYPE_FIELDS (ctx->record_type);
3206 field;
3207 field = DECL_CHAIN (field))
3208 gcc_assert (DECL_ALIGN (field) == align);
3210 layout_type (ctx->record_type);
3211 if (offloaded)
3212 fixup_child_record_type (ctx);
3216 /* Scan an OpenMP teams directive. */
3218 static void
3219 scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
3221 omp_context *ctx = new_omp_context (stmt, outer_ctx);
3222 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
3223 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3226 /* Check nesting restrictions. */
3227 static bool
3228 check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
3230 tree c;
3232 if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
3233 /* GRID_BODY is an artificial construct, nesting rules will be checked in
3234 the original copy of its contents. */
3235 return true;
3237 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
3238 inside an OpenACC CTX. */
3239 if (!(is_gimple_omp (stmt)
3240 && is_gimple_omp_oacc (stmt))
3241 /* Except for atomic codes that we share with OpenMP. */
3242 && !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
3243 || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
3245 if (get_oacc_fn_attrib (cfun->decl) != NULL)
3247 error_at (gimple_location (stmt),
3248 "non-OpenACC construct inside of OpenACC routine");
3249 return false;
3251 else
3252 for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
3253 if (is_gimple_omp (octx->stmt)
3254 && is_gimple_omp_oacc (octx->stmt))
3256 error_at (gimple_location (stmt),
3257 "non-OpenACC construct inside of OpenACC region");
3258 return false;
3262 if (ctx != NULL)
3264 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3265 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3267 c = NULL_TREE;
3268 if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
3270 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3271 if (find_omp_clause (c, OMP_CLAUSE_SIMD))
3273 if (find_omp_clause (c, OMP_CLAUSE_THREADS)
3274 && (ctx->outer == NULL
3275 || !gimple_omp_for_combined_into_p (ctx->stmt)
3276 || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
3277 || (gimple_omp_for_kind (ctx->outer->stmt)
3278 != GF_OMP_FOR_KIND_FOR)
3279 || !gimple_omp_for_combined_p (ctx->outer->stmt)))
3281 error_at (gimple_location (stmt),
3282 "%<ordered simd threads%> must be closely "
3283 "nested inside of %<for simd%> region");
3284 return false;
3286 return true;
3289 error_at (gimple_location (stmt),
3290 "OpenMP constructs other than %<#pragma omp ordered simd%>"
3291 " may not be nested inside %<simd%> region");
3292 return false;
3294 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3296 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
3297 || (gimple_omp_for_kind (stmt)
3298 != GF_OMP_FOR_KIND_DISTRIBUTE))
3299 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
3301 error_at (gimple_location (stmt),
3302 "only %<distribute%> or %<parallel%> regions are "
3303 "allowed to be strictly nested inside %<teams%> "
3304 "region");
3305 return false;
3309 switch (gimple_code (stmt))
3311 case GIMPLE_OMP_FOR:
3312 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
3313 return true;
3314 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3316 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
3318 error_at (gimple_location (stmt),
3319 "%<distribute%> region must be strictly nested "
3320 "inside %<teams%> construct");
3321 return false;
3323 return true;
3325 /* We split taskloop into task and nested taskloop in it. */
3326 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
3327 return true;
3328 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
3330 bool ok = false;
3332 if (ctx)
3333 switch (gimple_code (ctx->stmt))
3335 case GIMPLE_OMP_FOR:
3336 ok = (gimple_omp_for_kind (ctx->stmt)
3337 == GF_OMP_FOR_KIND_OACC_LOOP);
3338 break;
3340 case GIMPLE_OMP_TARGET:
3341 switch (gimple_omp_target_kind (ctx->stmt))
3343 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
3344 case GF_OMP_TARGET_KIND_OACC_KERNELS:
3345 ok = true;
3346 break;
3348 default:
3349 break;
3352 default:
3353 break;
3355 else if (get_oacc_fn_attrib (current_function_decl))
3356 ok = true;
3357 if (!ok)
3359 error_at (gimple_location (stmt),
3360 "OpenACC loop directive must be associated with"
3361 " an OpenACC compute region");
3362 return false;
3365 /* FALLTHRU */
3366 case GIMPLE_CALL:
3367 if (is_gimple_call (stmt)
3368 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3369 == BUILT_IN_GOMP_CANCEL
3370 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3371 == BUILT_IN_GOMP_CANCELLATION_POINT))
3373 const char *bad = NULL;
3374 const char *kind = NULL;
3375 const char *construct
3376 = (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3377 == BUILT_IN_GOMP_CANCEL)
3378 ? "#pragma omp cancel"
3379 : "#pragma omp cancellation point";
3380 if (ctx == NULL)
3382 error_at (gimple_location (stmt), "orphaned %qs construct",
3383 construct);
3384 return false;
3386 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
3387 ? tree_to_shwi (gimple_call_arg (stmt, 0))
3388 : 0)
3390 case 1:
3391 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
3392 bad = "#pragma omp parallel";
3393 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3394 == BUILT_IN_GOMP_CANCEL
3395 && !integer_zerop (gimple_call_arg (stmt, 1)))
3396 ctx->cancellable = true;
3397 kind = "parallel";
3398 break;
3399 case 2:
3400 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3401 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
3402 bad = "#pragma omp for";
3403 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3404 == BUILT_IN_GOMP_CANCEL
3405 && !integer_zerop (gimple_call_arg (stmt, 1)))
3407 ctx->cancellable = true;
3408 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3409 OMP_CLAUSE_NOWAIT))
3410 warning_at (gimple_location (stmt), 0,
3411 "%<#pragma omp cancel for%> inside "
3412 "%<nowait%> for construct");
3413 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3414 OMP_CLAUSE_ORDERED))
3415 warning_at (gimple_location (stmt), 0,
3416 "%<#pragma omp cancel for%> inside "
3417 "%<ordered%> for construct");
3419 kind = "for";
3420 break;
3421 case 4:
3422 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
3423 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
3424 bad = "#pragma omp sections";
3425 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3426 == BUILT_IN_GOMP_CANCEL
3427 && !integer_zerop (gimple_call_arg (stmt, 1)))
3429 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
3431 ctx->cancellable = true;
3432 if (find_omp_clause (gimple_omp_sections_clauses
3433 (ctx->stmt),
3434 OMP_CLAUSE_NOWAIT))
3435 warning_at (gimple_location (stmt), 0,
3436 "%<#pragma omp cancel sections%> inside "
3437 "%<nowait%> sections construct");
3439 else
3441 gcc_assert (ctx->outer
3442 && gimple_code (ctx->outer->stmt)
3443 == GIMPLE_OMP_SECTIONS);
3444 ctx->outer->cancellable = true;
3445 if (find_omp_clause (gimple_omp_sections_clauses
3446 (ctx->outer->stmt),
3447 OMP_CLAUSE_NOWAIT))
3448 warning_at (gimple_location (stmt), 0,
3449 "%<#pragma omp cancel sections%> inside "
3450 "%<nowait%> sections construct");
3453 kind = "sections";
3454 break;
3455 case 8:
3456 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
3457 bad = "#pragma omp task";
3458 else
3460 for (omp_context *octx = ctx->outer;
3461 octx; octx = octx->outer)
3463 switch (gimple_code (octx->stmt))
3465 case GIMPLE_OMP_TASKGROUP:
3466 break;
3467 case GIMPLE_OMP_TARGET:
3468 if (gimple_omp_target_kind (octx->stmt)
3469 != GF_OMP_TARGET_KIND_REGION)
3470 continue;
3471 /* FALLTHRU */
3472 case GIMPLE_OMP_PARALLEL:
3473 case GIMPLE_OMP_TEAMS:
3474 error_at (gimple_location (stmt),
3475 "%<%s taskgroup%> construct not closely "
3476 "nested inside of %<taskgroup%> region",
3477 construct);
3478 return false;
3479 default:
3480 continue;
3482 break;
3484 ctx->cancellable = true;
3486 kind = "taskgroup";
3487 break;
3488 default:
3489 error_at (gimple_location (stmt), "invalid arguments");
3490 return false;
3492 if (bad)
3494 error_at (gimple_location (stmt),
3495 "%<%s %s%> construct not closely nested inside of %qs",
3496 construct, kind, bad);
3497 return false;
3500 /* FALLTHRU */
3501 case GIMPLE_OMP_SECTIONS:
3502 case GIMPLE_OMP_SINGLE:
3503 for (; ctx != NULL; ctx = ctx->outer)
3504 switch (gimple_code (ctx->stmt))
3506 case GIMPLE_OMP_FOR:
3507 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
3508 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
3509 break;
3510 /* FALLTHRU */
3511 case GIMPLE_OMP_SECTIONS:
3512 case GIMPLE_OMP_SINGLE:
3513 case GIMPLE_OMP_ORDERED:
3514 case GIMPLE_OMP_MASTER:
3515 case GIMPLE_OMP_TASK:
3516 case GIMPLE_OMP_CRITICAL:
3517 if (is_gimple_call (stmt))
3519 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3520 != BUILT_IN_GOMP_BARRIER)
3521 return true;
3522 error_at (gimple_location (stmt),
3523 "barrier region may not be closely nested inside "
3524 "of work-sharing, %<critical%>, %<ordered%>, "
3525 "%<master%>, explicit %<task%> or %<taskloop%> "
3526 "region");
3527 return false;
3529 error_at (gimple_location (stmt),
3530 "work-sharing region may not be closely nested inside "
3531 "of work-sharing, %<critical%>, %<ordered%>, "
3532 "%<master%>, explicit %<task%> or %<taskloop%> region");
3533 return false;
3534 case GIMPLE_OMP_PARALLEL:
3535 case GIMPLE_OMP_TEAMS:
3536 return true;
3537 case GIMPLE_OMP_TARGET:
3538 if (gimple_omp_target_kind (ctx->stmt)
3539 == GF_OMP_TARGET_KIND_REGION)
3540 return true;
3541 break;
3542 default:
3543 break;
3545 break;
3546 case GIMPLE_OMP_MASTER:
3547 for (; ctx != NULL; ctx = ctx->outer)
3548 switch (gimple_code (ctx->stmt))
3550 case GIMPLE_OMP_FOR:
3551 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
3552 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
3553 break;
3554 /* FALLTHRU */
3555 case GIMPLE_OMP_SECTIONS:
3556 case GIMPLE_OMP_SINGLE:
3557 case GIMPLE_OMP_TASK:
3558 error_at (gimple_location (stmt),
3559 "%<master%> region may not be closely nested inside "
3560 "of work-sharing, explicit %<task%> or %<taskloop%> "
3561 "region");
3562 return false;
3563 case GIMPLE_OMP_PARALLEL:
3564 case GIMPLE_OMP_TEAMS:
3565 return true;
3566 case GIMPLE_OMP_TARGET:
3567 if (gimple_omp_target_kind (ctx->stmt)
3568 == GF_OMP_TARGET_KIND_REGION)
3569 return true;
3570 break;
3571 default:
3572 break;
3574 break;
3575 case GIMPLE_OMP_TASK:
3576 for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
3577 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
3578 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
3579 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
3581 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3582 error_at (OMP_CLAUSE_LOCATION (c),
3583 "%<depend(%s)%> is only allowed in %<omp ordered%>",
3584 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
3585 return false;
3587 break;
3588 case GIMPLE_OMP_ORDERED:
3589 for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3590 c; c = OMP_CLAUSE_CHAIN (c))
3592 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
3594 gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
3595 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
3596 continue;
3598 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3599 if (kind == OMP_CLAUSE_DEPEND_SOURCE
3600 || kind == OMP_CLAUSE_DEPEND_SINK)
3602 tree oclause;
3603 /* Look for containing ordered(N) loop. */
3604 if (ctx == NULL
3605 || gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3606 || (oclause
3607 = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3608 OMP_CLAUSE_ORDERED)) == NULL_TREE)
3610 error_at (OMP_CLAUSE_LOCATION (c),
3611 "%<ordered%> construct with %<depend%> clause "
3612 "must be closely nested inside an %<ordered%> "
3613 "loop");
3614 return false;
3616 else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
3618 error_at (OMP_CLAUSE_LOCATION (c),
3619 "%<ordered%> construct with %<depend%> clause "
3620 "must be closely nested inside a loop with "
3621 "%<ordered%> clause with a parameter");
3622 return false;
3625 else
3627 error_at (OMP_CLAUSE_LOCATION (c),
3628 "invalid depend kind in omp %<ordered%> %<depend%>");
3629 return false;
3632 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3633 if (find_omp_clause (c, OMP_CLAUSE_SIMD))
3635 /* ordered simd must be closely nested inside of simd region,
3636 and simd region must not encounter constructs other than
3637 ordered simd, therefore ordered simd may be either orphaned,
3638 or ctx->stmt must be simd. The latter case is handled already
3639 earlier. */
3640 if (ctx != NULL)
3642 error_at (gimple_location (stmt),
3643 "%<ordered%> %<simd%> must be closely nested inside "
3644 "%<simd%> region");
3645 return false;
3648 for (; ctx != NULL; ctx = ctx->outer)
3649 switch (gimple_code (ctx->stmt))
3651 case GIMPLE_OMP_CRITICAL:
3652 case GIMPLE_OMP_TASK:
3653 case GIMPLE_OMP_ORDERED:
3654 ordered_in_taskloop:
3655 error_at (gimple_location (stmt),
3656 "%<ordered%> region may not be closely nested inside "
3657 "of %<critical%>, %<ordered%>, explicit %<task%> or "
3658 "%<taskloop%> region");
3659 return false;
3660 case GIMPLE_OMP_FOR:
3661 if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
3662 goto ordered_in_taskloop;
3663 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3664 OMP_CLAUSE_ORDERED) == NULL)
3666 error_at (gimple_location (stmt),
3667 "%<ordered%> region must be closely nested inside "
3668 "a loop region with an %<ordered%> clause");
3669 return false;
3671 return true;
3672 case GIMPLE_OMP_TARGET:
3673 if (gimple_omp_target_kind (ctx->stmt)
3674 != GF_OMP_TARGET_KIND_REGION)
3675 break;
3676 /* FALLTHRU */
3677 case GIMPLE_OMP_PARALLEL:
3678 case GIMPLE_OMP_TEAMS:
3679 error_at (gimple_location (stmt),
3680 "%<ordered%> region must be closely nested inside "
3681 "a loop region with an %<ordered%> clause");
3682 return false;
3683 default:
3684 break;
3686 break;
3687 case GIMPLE_OMP_CRITICAL:
3689 tree this_stmt_name
3690 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
3691 for (; ctx != NULL; ctx = ctx->outer)
3692 if (gomp_critical *other_crit
3693 = dyn_cast <gomp_critical *> (ctx->stmt))
3694 if (this_stmt_name == gimple_omp_critical_name (other_crit))
3696 error_at (gimple_location (stmt),
3697 "%<critical%> region may not be nested inside "
3698 "a %<critical%> region with the same name");
3699 return false;
3702 break;
3703 case GIMPLE_OMP_TEAMS:
3704 if (ctx == NULL
3705 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
3706 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
3708 error_at (gimple_location (stmt),
3709 "%<teams%> construct not closely nested inside of "
3710 "%<target%> construct");
3711 return false;
3713 break;
3714 case GIMPLE_OMP_TARGET:
3715 for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
3716 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
3717 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
3718 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
3720 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3721 error_at (OMP_CLAUSE_LOCATION (c),
3722 "%<depend(%s)%> is only allowed in %<omp ordered%>",
3723 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
3724 return false;
3726 if (is_gimple_omp_offloaded (stmt)
3727 && get_oacc_fn_attrib (cfun->decl) != NULL)
3729 error_at (gimple_location (stmt),
3730 "OpenACC region inside of OpenACC routine, nested "
3731 "parallelism not supported yet");
3732 return false;
3734 for (; ctx != NULL; ctx = ctx->outer)
3736 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
3738 if (is_gimple_omp (stmt)
3739 && is_gimple_omp_oacc (stmt)
3740 && is_gimple_omp (ctx->stmt))
3742 error_at (gimple_location (stmt),
3743 "OpenACC construct inside of non-OpenACC region");
3744 return false;
3746 continue;
3749 const char *stmt_name, *ctx_stmt_name;
3750 switch (gimple_omp_target_kind (stmt))
3752 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3753 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3754 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3755 case GF_OMP_TARGET_KIND_ENTER_DATA:
3756 stmt_name = "target enter data"; break;
3757 case GF_OMP_TARGET_KIND_EXIT_DATA:
3758 stmt_name = "target exit data"; break;
3759 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3760 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3761 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3762 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3763 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
3764 stmt_name = "enter/exit data"; break;
3765 case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
3766 break;
3767 default: gcc_unreachable ();
3769 switch (gimple_omp_target_kind (ctx->stmt))
3771 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3772 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3773 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
3774 ctx_stmt_name = "parallel"; break;
3775 case GF_OMP_TARGET_KIND_OACC_KERNELS:
3776 ctx_stmt_name = "kernels"; break;
3777 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3778 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
3779 ctx_stmt_name = "host_data"; break;
3780 default: gcc_unreachable ();
3783 /* OpenACC/OpenMP mismatch? */
3784 if (is_gimple_omp_oacc (stmt)
3785 != is_gimple_omp_oacc (ctx->stmt))
3787 error_at (gimple_location (stmt),
3788 "%s %qs construct inside of %s %qs region",
3789 (is_gimple_omp_oacc (stmt)
3790 ? "OpenACC" : "OpenMP"), stmt_name,
3791 (is_gimple_omp_oacc (ctx->stmt)
3792 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3793 return false;
3795 if (is_gimple_omp_offloaded (ctx->stmt))
3797 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3798 if (is_gimple_omp_oacc (ctx->stmt))
3800 error_at (gimple_location (stmt),
3801 "%qs construct inside of %qs region",
3802 stmt_name, ctx_stmt_name);
3803 return false;
3805 else
3807 warning_at (gimple_location (stmt), 0,
3808 "%qs construct inside of %qs region",
3809 stmt_name, ctx_stmt_name);
3813 break;
3814 default:
3815 break;
3817 return true;
3821 /* Helper function scan_omp.
3823 Callback for walk_tree or operators in walk_gimple_stmt used to
3824 scan for OMP directives in TP. */
3826 static tree
3827 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3829 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3830 omp_context *ctx = (omp_context *) wi->info;
3831 tree t = *tp;
3833 switch (TREE_CODE (t))
3835 case VAR_DECL:
3836 case PARM_DECL:
3837 case LABEL_DECL:
3838 case RESULT_DECL:
3839 if (ctx)
3841 tree repl = remap_decl (t, &ctx->cb);
3842 gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
3843 *tp = repl;
3845 break;
3847 default:
3848 if (ctx && TYPE_P (t))
3849 *tp = remap_type (t, &ctx->cb);
3850 else if (!DECL_P (t))
3852 *walk_subtrees = 1;
3853 if (ctx)
3855 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3856 if (tem != TREE_TYPE (t))
3858 if (TREE_CODE (t) == INTEGER_CST)
3859 *tp = wide_int_to_tree (tem, t);
3860 else
3861 TREE_TYPE (t) = tem;
3865 break;
3868 return NULL_TREE;
3871 /* Return true if FNDECL is a setjmp or a longjmp. */
3873 static bool
3874 setjmp_or_longjmp_p (const_tree fndecl)
3876 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3877 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3878 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3879 return true;
3881 tree declname = DECL_NAME (fndecl);
3882 if (!declname)
3883 return false;
3884 const char *name = IDENTIFIER_POINTER (declname);
3885 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3889 /* Helper function for scan_omp.
3891 Callback for walk_gimple_stmt used to scan for OMP directives in
3892 the current statement in GSI. */
3894 static tree
3895 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3896 struct walk_stmt_info *wi)
3898 gimple *stmt = gsi_stmt (*gsi);
3899 omp_context *ctx = (omp_context *) wi->info;
3901 if (gimple_has_location (stmt))
3902 input_location = gimple_location (stmt);
3904 /* Check the nesting restrictions. */
3905 bool remove = false;
3906 if (is_gimple_omp (stmt))
3907 remove = !check_omp_nesting_restrictions (stmt, ctx);
3908 else if (is_gimple_call (stmt))
3910 tree fndecl = gimple_call_fndecl (stmt);
3911 if (fndecl)
3913 if (setjmp_or_longjmp_p (fndecl)
3914 && ctx
3915 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3916 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3918 remove = true;
3919 error_at (gimple_location (stmt),
3920 "setjmp/longjmp inside simd construct");
3922 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3923 switch (DECL_FUNCTION_CODE (fndecl))
3925 case BUILT_IN_GOMP_BARRIER:
3926 case BUILT_IN_GOMP_CANCEL:
3927 case BUILT_IN_GOMP_CANCELLATION_POINT:
3928 case BUILT_IN_GOMP_TASKYIELD:
3929 case BUILT_IN_GOMP_TASKWAIT:
3930 case BUILT_IN_GOMP_TASKGROUP_START:
3931 case BUILT_IN_GOMP_TASKGROUP_END:
3932 remove = !check_omp_nesting_restrictions (stmt, ctx);
3933 break;
3934 default:
3935 break;
3939 if (remove)
3941 stmt = gimple_build_nop ();
3942 gsi_replace (gsi, stmt, false);
3945 *handled_ops_p = true;
3947 switch (gimple_code (stmt))
3949 case GIMPLE_OMP_PARALLEL:
3950 taskreg_nesting_level++;
3951 scan_omp_parallel (gsi, ctx);
3952 taskreg_nesting_level--;
3953 break;
3955 case GIMPLE_OMP_TASK:
3956 taskreg_nesting_level++;
3957 scan_omp_task (gsi, ctx);
3958 taskreg_nesting_level--;
3959 break;
3961 case GIMPLE_OMP_FOR:
3962 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3963 break;
3965 case GIMPLE_OMP_SECTIONS:
3966 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3967 break;
3969 case GIMPLE_OMP_SINGLE:
3970 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3971 break;
3973 case GIMPLE_OMP_SECTION:
3974 case GIMPLE_OMP_MASTER:
3975 case GIMPLE_OMP_TASKGROUP:
3976 case GIMPLE_OMP_ORDERED:
3977 case GIMPLE_OMP_CRITICAL:
3978 case GIMPLE_OMP_GRID_BODY:
3979 ctx = new_omp_context (stmt, ctx);
3980 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3981 break;
3983 case GIMPLE_OMP_TARGET:
3984 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3985 break;
3987 case GIMPLE_OMP_TEAMS:
3988 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3989 break;
3991 case GIMPLE_BIND:
3993 tree var;
3995 *handled_ops_p = false;
3996 if (ctx)
3997 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3998 var ;
3999 var = DECL_CHAIN (var))
4000 insert_decl_map (&ctx->cb, var, var);
4002 break;
4003 default:
4004 *handled_ops_p = false;
4005 break;
4008 return NULL_TREE;
4012 /* Scan all the statements starting at the current statement. CTX
4013 contains context information about the OMP directives and
4014 clauses found during the scan. */
4016 static void
4017 scan_omp (gimple_seq *body_p, omp_context *ctx)
4019 location_t saved_location;
4020 struct walk_stmt_info wi;
4022 memset (&wi, 0, sizeof (wi));
4023 wi.info = ctx;
4024 wi.want_locations = true;
4026 saved_location = input_location;
4027 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
4028 input_location = saved_location;
4031 /* Re-gimplification and code generation routines. */
4033 /* Build a call to GOMP_barrier. */
4035 static gimple *
4036 build_omp_barrier (tree lhs)
4038 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
4039 : BUILT_IN_GOMP_BARRIER);
4040 gcall *g = gimple_build_call (fndecl, 0);
4041 if (lhs)
4042 gimple_call_set_lhs (g, lhs);
4043 return g;
4046 /* If a context was created for STMT when it was scanned, return it. */
4048 static omp_context *
4049 maybe_lookup_ctx (gimple *stmt)
4051 splay_tree_node n;
4052 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
4053 return n ? (omp_context *) n->value : NULL;
4057 /* Find the mapping for DECL in CTX or the immediately enclosing
4058 context that has a mapping for DECL.
4060 If CTX is a nested parallel directive, we may have to use the decl
4061 mappings created in CTX's parent context. Suppose that we have the
4062 following parallel nesting (variable UIDs showed for clarity):
4064 iD.1562 = 0;
4065 #omp parallel shared(iD.1562) -> outer parallel
4066 iD.1562 = iD.1562 + 1;
4068 #omp parallel shared (iD.1562) -> inner parallel
4069 iD.1562 = iD.1562 - 1;
4071 Each parallel structure will create a distinct .omp_data_s structure
4072 for copying iD.1562 in/out of the directive:
4074 outer parallel .omp_data_s.1.i -> iD.1562
4075 inner parallel .omp_data_s.2.i -> iD.1562
4077 A shared variable mapping will produce a copy-out operation before
4078 the parallel directive and a copy-in operation after it. So, in
4079 this case we would have:
4081 iD.1562 = 0;
4082 .omp_data_o.1.i = iD.1562;
4083 #omp parallel shared(iD.1562) -> outer parallel
4084 .omp_data_i.1 = &.omp_data_o.1
4085 .omp_data_i.1->i = .omp_data_i.1->i + 1;
4087 .omp_data_o.2.i = iD.1562; -> **
4088 #omp parallel shared(iD.1562) -> inner parallel
4089 .omp_data_i.2 = &.omp_data_o.2
4090 .omp_data_i.2->i = .omp_data_i.2->i - 1;
4093 ** This is a problem. The symbol iD.1562 cannot be referenced
4094 inside the body of the outer parallel region. But since we are
4095 emitting this copy operation while expanding the inner parallel
4096 directive, we need to access the CTX structure of the outer
4097 parallel directive to get the correct mapping:
4099 .omp_data_o.2.i = .omp_data_i.1->i
4101 Since there may be other workshare or parallel directives enclosing
4102 the parallel directive, it may be necessary to walk up the context
4103 parent chain. This is not a problem in general because nested
4104 parallelism happens only rarely. */
4106 static tree
4107 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
4109 tree t;
4110 omp_context *up;
4112 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
4113 t = maybe_lookup_decl (decl, up);
4115 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
4117 return t ? t : decl;
4121 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
4122 in outer contexts. */
4124 static tree
4125 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
4127 tree t = NULL;
4128 omp_context *up;
4130 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
4131 t = maybe_lookup_decl (decl, up);
4133 return t ? t : decl;
4137 /* Construct the initialization value for reduction operation OP. */
4139 tree
4140 omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
4142 switch (op)
4144 case PLUS_EXPR:
4145 case MINUS_EXPR:
4146 case BIT_IOR_EXPR:
4147 case BIT_XOR_EXPR:
4148 case TRUTH_OR_EXPR:
4149 case TRUTH_ORIF_EXPR:
4150 case TRUTH_XOR_EXPR:
4151 case NE_EXPR:
4152 return build_zero_cst (type);
4154 case MULT_EXPR:
4155 case TRUTH_AND_EXPR:
4156 case TRUTH_ANDIF_EXPR:
4157 case EQ_EXPR:
4158 return fold_convert_loc (loc, type, integer_one_node);
4160 case BIT_AND_EXPR:
4161 return fold_convert_loc (loc, type, integer_minus_one_node);
4163 case MAX_EXPR:
4164 if (SCALAR_FLOAT_TYPE_P (type))
4166 REAL_VALUE_TYPE max, min;
4167 if (HONOR_INFINITIES (type))
4169 real_inf (&max);
4170 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
4172 else
4173 real_maxval (&min, 1, TYPE_MODE (type));
4174 return build_real (type, min);
4176 else if (POINTER_TYPE_P (type))
4178 wide_int min
4179 = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4180 return wide_int_to_tree (type, min);
4182 else
4184 gcc_assert (INTEGRAL_TYPE_P (type));
4185 return TYPE_MIN_VALUE (type);
4188 case MIN_EXPR:
4189 if (SCALAR_FLOAT_TYPE_P (type))
4191 REAL_VALUE_TYPE max;
4192 if (HONOR_INFINITIES (type))
4193 real_inf (&max);
4194 else
4195 real_maxval (&max, 0, TYPE_MODE (type));
4196 return build_real (type, max);
4198 else if (POINTER_TYPE_P (type))
4200 wide_int max
4201 = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4202 return wide_int_to_tree (type, max);
4204 else
4206 gcc_assert (INTEGRAL_TYPE_P (type));
4207 return TYPE_MAX_VALUE (type);
4210 default:
4211 gcc_unreachable ();
4215 /* Construct the initialization value for reduction CLAUSE. */
4217 tree
4218 omp_reduction_init (tree clause, tree type)
4220 return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
4221 OMP_CLAUSE_REDUCTION_CODE (clause), type);
4224 /* Return alignment to be assumed for var in CLAUSE, which should be
4225 OMP_CLAUSE_ALIGNED. */
4227 static tree
4228 omp_clause_aligned_alignment (tree clause)
4230 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
4231 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
4233 /* Otherwise return implementation defined alignment. */
4234 unsigned int al = 1;
4235 machine_mode mode, vmode;
4236 int vs = targetm.vectorize.autovectorize_vector_sizes ();
4237 if (vs)
4238 vs = 1 << floor_log2 (vs);
4239 static enum mode_class classes[]
4240 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
4241 for (int i = 0; i < 4; i += 2)
4242 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
4243 mode != VOIDmode;
4244 mode = GET_MODE_WIDER_MODE (mode))
4246 vmode = targetm.vectorize.preferred_simd_mode (mode);
4247 if (GET_MODE_CLASS (vmode) != classes[i + 1])
4248 continue;
4249 while (vs
4250 && GET_MODE_SIZE (vmode) < vs
4251 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
4252 vmode = GET_MODE_2XWIDER_MODE (vmode);
4254 tree type = lang_hooks.types.type_for_mode (mode, 1);
4255 if (type == NULL_TREE || TYPE_MODE (type) != mode)
4256 continue;
4257 type = build_vector_type (type, GET_MODE_SIZE (vmode)
4258 / GET_MODE_SIZE (mode));
4259 if (TYPE_MODE (type) != vmode)
4260 continue;
4261 if (TYPE_ALIGN_UNIT (type) > al)
4262 al = TYPE_ALIGN_UNIT (type);
4264 return build_int_cst (integer_type_node, al);
4267 /* Return maximum possible vectorization factor for the target. */
4269 static int
4270 omp_max_vf (void)
4272 if (!optimize
4273 || optimize_debug
4274 || !flag_tree_loop_optimize
4275 || (!flag_tree_loop_vectorize
4276 && (global_options_set.x_flag_tree_loop_vectorize
4277 || global_options_set.x_flag_tree_vectorize)))
4278 return 1;
4280 int vs = targetm.vectorize.autovectorize_vector_sizes ();
4281 if (vs)
4283 vs = 1 << floor_log2 (vs);
4284 return vs;
4286 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
4287 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
4288 return GET_MODE_NUNITS (vqimode);
4289 return 1;
4292 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
4293 privatization. */
4295 static bool
4296 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
4297 tree &idx, tree &lane, tree &ivar, tree &lvar)
4299 if (max_vf == 0)
4301 max_vf = omp_max_vf ();
4302 if (max_vf > 1)
4304 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
4305 OMP_CLAUSE_SAFELEN);
4306 if (c
4307 && (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST
4308 || tree_int_cst_sgn (OMP_CLAUSE_SAFELEN_EXPR (c)) != 1))
4309 max_vf = 1;
4310 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
4311 max_vf) == -1)
4312 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
4314 if (max_vf > 1)
4316 idx = create_tmp_var (unsigned_type_node);
4317 lane = create_tmp_var (unsigned_type_node);
4320 if (max_vf == 1)
4321 return false;
4323 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
4324 tree avar = create_tmp_var_raw (atype);
4325 if (TREE_ADDRESSABLE (new_var))
4326 TREE_ADDRESSABLE (avar) = 1;
4327 DECL_ATTRIBUTES (avar)
4328 = tree_cons (get_identifier ("omp simd array"), NULL,
4329 DECL_ATTRIBUTES (avar));
4330 gimple_add_tmp_var (avar);
4331 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
4332 NULL_TREE, NULL_TREE);
4333 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
4334 NULL_TREE, NULL_TREE);
4335 if (DECL_P (new_var))
4337 SET_DECL_VALUE_EXPR (new_var, lvar);
4338 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4340 return true;
4343 /* Helper function of lower_rec_input_clauses. For a reference
4344 in simd reduction, add an underlying variable it will reference. */
4346 static void
4347 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
4349 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
4350 if (TREE_CONSTANT (z))
4352 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
4353 get_name (new_vard));
4354 gimple_add_tmp_var (z);
4355 TREE_ADDRESSABLE (z) = 1;
4356 z = build_fold_addr_expr_loc (loc, z);
4357 gimplify_assign (new_vard, z, ilist);
4361 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
4362 from the receiver (aka child) side and initializers for REFERENCE_TYPE
4363 private variables. Initialization statements go in ILIST, while calls
4364 to destructors go in DLIST. */
4366 static void
4367 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
4368 omp_context *ctx, struct omp_for_data *fd)
4370 tree c, dtor, copyin_seq, x, ptr;
4371 bool copyin_by_ref = false;
4372 bool lastprivate_firstprivate = false;
4373 bool reduction_omp_orig_ref = false;
4374 int pass;
4375 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4376 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
4377 int max_vf = 0;
4378 tree lane = NULL_TREE, idx = NULL_TREE;
4379 tree ivar = NULL_TREE, lvar = NULL_TREE;
4380 gimple_seq llist[2] = { NULL, NULL };
4382 copyin_seq = NULL;
4384 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
4385 with data sharing clauses referencing variable sized vars. That
4386 is unnecessarily hard to support and very unlikely to result in
4387 vectorized code anyway. */
4388 if (is_simd)
4389 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4390 switch (OMP_CLAUSE_CODE (c))
4392 case OMP_CLAUSE_LINEAR:
4393 if (OMP_CLAUSE_LINEAR_ARRAY (c))
4394 max_vf = 1;
4395 /* FALLTHRU */
4396 case OMP_CLAUSE_PRIVATE:
4397 case OMP_CLAUSE_FIRSTPRIVATE:
4398 case OMP_CLAUSE_LASTPRIVATE:
4399 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
4400 max_vf = 1;
4401 break;
4402 case OMP_CLAUSE_REDUCTION:
4403 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
4404 || is_variable_sized (OMP_CLAUSE_DECL (c)))
4405 max_vf = 1;
4406 break;
4407 default:
4408 continue;
4411 /* Do all the fixed sized types in the first pass, and the variable sized
4412 types in the second pass. This makes sure that the scalar arguments to
4413 the variable sized types are processed before we use them in the
4414 variable sized operations. */
4415 for (pass = 0; pass < 2; ++pass)
4417 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4419 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
4420 tree var, new_var;
4421 bool by_ref;
4422 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4424 switch (c_kind)
4426 case OMP_CLAUSE_PRIVATE:
4427 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
4428 continue;
4429 break;
4430 case OMP_CLAUSE_SHARED:
4431 /* Ignore shared directives in teams construct. */
4432 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
4433 continue;
4434 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
4436 gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
4437 || is_global_var (OMP_CLAUSE_DECL (c)));
4438 continue;
4440 case OMP_CLAUSE_FIRSTPRIVATE:
4441 case OMP_CLAUSE_COPYIN:
4442 break;
4443 case OMP_CLAUSE_LINEAR:
4444 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
4445 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
4446 lastprivate_firstprivate = true;
4447 break;
4448 case OMP_CLAUSE_REDUCTION:
4449 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
4450 reduction_omp_orig_ref = true;
4451 break;
4452 case OMP_CLAUSE__LOOPTEMP_:
4453 /* Handle _looptemp_ clauses only on parallel/task. */
4454 if (fd)
4455 continue;
4456 break;
4457 case OMP_CLAUSE_LASTPRIVATE:
4458 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4460 lastprivate_firstprivate = true;
4461 if (pass != 0 || is_taskloop_ctx (ctx))
4462 continue;
4464 /* Even without corresponding firstprivate, if
4465 decl is Fortran allocatable, it needs outer var
4466 reference. */
4467 else if (pass == 0
4468 && lang_hooks.decls.omp_private_outer_ref
4469 (OMP_CLAUSE_DECL (c)))
4470 lastprivate_firstprivate = true;
4471 break;
4472 case OMP_CLAUSE_ALIGNED:
4473 if (pass == 0)
4474 continue;
4475 var = OMP_CLAUSE_DECL (c);
4476 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
4477 && !is_global_var (var))
4479 new_var = maybe_lookup_decl (var, ctx);
4480 if (new_var == NULL_TREE)
4481 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
4482 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
4483 tree alarg = omp_clause_aligned_alignment (c);
4484 alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
4485 x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
4486 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4487 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
4488 gimplify_and_add (x, ilist);
4490 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
4491 && is_global_var (var))
4493 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
4494 new_var = lookup_decl (var, ctx);
4495 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
4496 t = build_fold_addr_expr_loc (clause_loc, t);
4497 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
4498 tree alarg = omp_clause_aligned_alignment (c);
4499 alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
4500 t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
4501 t = fold_convert_loc (clause_loc, ptype, t);
4502 x = create_tmp_var (ptype);
4503 t = build2 (MODIFY_EXPR, ptype, x, t);
4504 gimplify_and_add (t, ilist);
4505 t = build_simple_mem_ref_loc (clause_loc, x);
4506 SET_DECL_VALUE_EXPR (new_var, t);
4507 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4509 continue;
4510 default:
4511 continue;
4514 new_var = var = OMP_CLAUSE_DECL (c);
4515 if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
4517 var = TREE_OPERAND (var, 0);
4518 if (TREE_CODE (var) == POINTER_PLUS_EXPR)
4519 var = TREE_OPERAND (var, 0);
4520 if (TREE_CODE (var) == INDIRECT_REF
4521 || TREE_CODE (var) == ADDR_EXPR)
4522 var = TREE_OPERAND (var, 0);
4523 if (is_variable_sized (var))
4525 gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
4526 var = DECL_VALUE_EXPR (var);
4527 gcc_assert (TREE_CODE (var) == INDIRECT_REF);
4528 var = TREE_OPERAND (var, 0);
4529 gcc_assert (DECL_P (var));
4531 new_var = var;
4533 if (c_kind != OMP_CLAUSE_COPYIN)
4534 new_var = lookup_decl (var, ctx);
4536 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
4538 if (pass != 0)
4539 continue;
4541 /* C/C++ array section reductions. */
4542 else if (c_kind == OMP_CLAUSE_REDUCTION
4543 && var != OMP_CLAUSE_DECL (c))
4545 if (pass == 0)
4546 continue;
4548 tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
4549 tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
4550 if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
4552 tree b = TREE_OPERAND (orig_var, 1);
4553 b = maybe_lookup_decl (b, ctx);
4554 if (b == NULL)
4556 b = TREE_OPERAND (orig_var, 1);
4557 b = maybe_lookup_decl_in_outer_ctx (b, ctx);
4559 if (integer_zerop (bias))
4560 bias = b;
4561 else
4563 bias = fold_convert_loc (clause_loc,
4564 TREE_TYPE (b), bias);
4565 bias = fold_build2_loc (clause_loc, PLUS_EXPR,
4566 TREE_TYPE (b), b, bias);
4568 orig_var = TREE_OPERAND (orig_var, 0);
4570 if (TREE_CODE (orig_var) == INDIRECT_REF
4571 || TREE_CODE (orig_var) == ADDR_EXPR)
4572 orig_var = TREE_OPERAND (orig_var, 0);
4573 tree d = OMP_CLAUSE_DECL (c);
4574 tree type = TREE_TYPE (d);
4575 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
4576 tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
4577 const char *name = get_name (orig_var);
4578 if (TREE_CONSTANT (v))
4580 x = create_tmp_var_raw (type, name);
4581 gimple_add_tmp_var (x);
4582 TREE_ADDRESSABLE (x) = 1;
4583 x = build_fold_addr_expr_loc (clause_loc, x);
4585 else
4587 tree atmp
4588 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4589 tree t = maybe_lookup_decl (v, ctx);
4590 if (t)
4591 v = t;
4592 else
4593 v = maybe_lookup_decl_in_outer_ctx (v, ctx);
4594 gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
4595 t = fold_build2_loc (clause_loc, PLUS_EXPR,
4596 TREE_TYPE (v), v,
4597 build_int_cst (TREE_TYPE (v), 1));
4598 t = fold_build2_loc (clause_loc, MULT_EXPR,
4599 TREE_TYPE (v), t,
4600 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4601 tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
4602 x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
4605 tree ptype = build_pointer_type (TREE_TYPE (type));
4606 x = fold_convert_loc (clause_loc, ptype, x);
4607 tree y = create_tmp_var (ptype, name);
4608 gimplify_assign (y, x, ilist);
4609 x = y;
4610 tree yb = y;
4612 if (!integer_zerop (bias))
4614 bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
4615 bias);
4616 yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
4618 yb = fold_build2_loc (clause_loc, MINUS_EXPR,
4619 pointer_sized_int_node, yb, bias);
4620 x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
4621 yb = create_tmp_var (ptype, name);
4622 gimplify_assign (yb, x, ilist);
4623 x = yb;
4626 d = TREE_OPERAND (d, 0);
4627 if (TREE_CODE (d) == POINTER_PLUS_EXPR)
4628 d = TREE_OPERAND (d, 0);
4629 if (TREE_CODE (d) == ADDR_EXPR)
4631 if (orig_var != var)
4633 gcc_assert (is_variable_sized (orig_var));
4634 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
4636 gimplify_assign (new_var, x, ilist);
4637 tree new_orig_var = lookup_decl (orig_var, ctx);
4638 tree t = build_fold_indirect_ref (new_var);
4639 DECL_IGNORED_P (new_var) = 0;
4640 TREE_THIS_NOTRAP (t);
4641 SET_DECL_VALUE_EXPR (new_orig_var, t);
4642 DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
4644 else
4646 x = build2 (MEM_REF, TREE_TYPE (new_var), x,
4647 build_int_cst (ptype, 0));
4648 SET_DECL_VALUE_EXPR (new_var, x);
4649 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4652 else
4654 gcc_assert (orig_var == var);
4655 if (TREE_CODE (d) == INDIRECT_REF)
4657 x = create_tmp_var (ptype, name);
4658 TREE_ADDRESSABLE (x) = 1;
4659 gimplify_assign (x, yb, ilist);
4660 x = build_fold_addr_expr_loc (clause_loc, x);
4662 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4663 gimplify_assign (new_var, x, ilist);
4665 tree y1 = create_tmp_var (ptype, NULL);
4666 gimplify_assign (y1, y, ilist);
4667 tree i2 = NULL_TREE, y2 = NULL_TREE;
4668 tree body2 = NULL_TREE, end2 = NULL_TREE;
4669 tree y3 = NULL_TREE, y4 = NULL_TREE;
4670 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
4672 y2 = create_tmp_var (ptype, NULL);
4673 gimplify_assign (y2, y, ilist);
4674 tree ref = build_outer_var_ref (var, ctx);
4675 /* For ref build_outer_var_ref already performs this. */
4676 if (TREE_CODE (d) == INDIRECT_REF)
4677 gcc_assert (is_reference (var));
4678 else if (TREE_CODE (d) == ADDR_EXPR)
4679 ref = build_fold_addr_expr (ref);
4680 else if (is_reference (var))
4681 ref = build_fold_addr_expr (ref);
4682 ref = fold_convert_loc (clause_loc, ptype, ref);
4683 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
4684 && OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
4686 y3 = create_tmp_var (ptype, NULL);
4687 gimplify_assign (y3, unshare_expr (ref), ilist);
4689 if (is_simd)
4691 y4 = create_tmp_var (ptype, NULL);
4692 gimplify_assign (y4, ref, dlist);
4695 tree i = create_tmp_var (TREE_TYPE (v), NULL);
4696 gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
4697 tree body = create_artificial_label (UNKNOWN_LOCATION);
4698 tree end = create_artificial_label (UNKNOWN_LOCATION);
4699 gimple_seq_add_stmt (ilist, gimple_build_label (body));
4700 if (y2)
4702 i2 = create_tmp_var (TREE_TYPE (v), NULL);
4703 gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
4704 body2 = create_artificial_label (UNKNOWN_LOCATION);
4705 end2 = create_artificial_label (UNKNOWN_LOCATION);
4706 gimple_seq_add_stmt (dlist, gimple_build_label (body2));
4708 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4710 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4711 tree decl_placeholder
4712 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
4713 SET_DECL_VALUE_EXPR (decl_placeholder,
4714 build_simple_mem_ref (y1));
4715 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
4716 SET_DECL_VALUE_EXPR (placeholder,
4717 y3 ? build_simple_mem_ref (y3)
4718 : error_mark_node);
4719 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4720 x = lang_hooks.decls.omp_clause_default_ctor
4721 (c, build_simple_mem_ref (y1),
4722 y3 ? build_simple_mem_ref (y3) : NULL_TREE);
4723 if (x)
4724 gimplify_and_add (x, ilist);
4725 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4727 gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4728 lower_omp (&tseq, ctx);
4729 gimple_seq_add_seq (ilist, tseq);
4731 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4732 if (is_simd)
4734 SET_DECL_VALUE_EXPR (decl_placeholder,
4735 build_simple_mem_ref (y2));
4736 SET_DECL_VALUE_EXPR (placeholder,
4737 build_simple_mem_ref (y4));
4738 gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4739 lower_omp (&tseq, ctx);
4740 gimple_seq_add_seq (dlist, tseq);
4741 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4743 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4744 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
4745 x = lang_hooks.decls.omp_clause_dtor
4746 (c, build_simple_mem_ref (y2));
4747 if (x)
4749 gimple_seq tseq = NULL;
4750 dtor = x;
4751 gimplify_stmt (&dtor, &tseq);
4752 gimple_seq_add_seq (dlist, tseq);
4755 else
4757 x = omp_reduction_init (c, TREE_TYPE (type));
4758 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
4760 /* reduction(-:var) sums up the partial results, so it
4761 acts identically to reduction(+:var). */
4762 if (code == MINUS_EXPR)
4763 code = PLUS_EXPR;
4765 gimplify_assign (build_simple_mem_ref (y1), x, ilist);
4766 if (is_simd)
4768 x = build2 (code, TREE_TYPE (type),
4769 build_simple_mem_ref (y4),
4770 build_simple_mem_ref (y2));
4771 gimplify_assign (build_simple_mem_ref (y4), x, dlist);
4774 gimple *g
4775 = gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
4776 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4777 gimple_seq_add_stmt (ilist, g);
4778 if (y3)
4780 g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
4781 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4782 gimple_seq_add_stmt (ilist, g);
4784 g = gimple_build_assign (i, PLUS_EXPR, i,
4785 build_int_cst (TREE_TYPE (i), 1));
4786 gimple_seq_add_stmt (ilist, g);
4787 g = gimple_build_cond (LE_EXPR, i, v, body, end);
4788 gimple_seq_add_stmt (ilist, g);
4789 gimple_seq_add_stmt (ilist, gimple_build_label (end));
4790 if (y2)
4792 g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
4793 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4794 gimple_seq_add_stmt (dlist, g);
4795 if (y4)
4797 g = gimple_build_assign
4798 (y4, POINTER_PLUS_EXPR, y4,
4799 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4800 gimple_seq_add_stmt (dlist, g);
4802 g = gimple_build_assign (i2, PLUS_EXPR, i2,
4803 build_int_cst (TREE_TYPE (i2), 1));
4804 gimple_seq_add_stmt (dlist, g);
4805 g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
4806 gimple_seq_add_stmt (dlist, g);
4807 gimple_seq_add_stmt (dlist, gimple_build_label (end2));
4809 continue;
4811 else if (is_variable_sized (var))
4813 /* For variable sized types, we need to allocate the
4814 actual storage here. Call alloca and store the
4815 result in the pointer decl that we created elsewhere. */
4816 if (pass == 0)
4817 continue;
4819 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
4821 gcall *stmt;
4822 tree tmp, atmp;
4824 ptr = DECL_VALUE_EXPR (new_var);
4825 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
4826 ptr = TREE_OPERAND (ptr, 0);
4827 gcc_assert (DECL_P (ptr));
4828 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
4830 /* void *tmp = __builtin_alloca */
4831 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4832 stmt = gimple_build_call (atmp, 2, x,
4833 size_int (DECL_ALIGN (var)));
4834 tmp = create_tmp_var_raw (ptr_type_node);
4835 gimple_add_tmp_var (tmp);
4836 gimple_call_set_lhs (stmt, tmp);
4838 gimple_seq_add_stmt (ilist, stmt);
4840 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
4841 gimplify_assign (ptr, x, ilist);
4844 else if (is_reference (var))
4846 /* For references that are being privatized for Fortran,
4847 allocate new backing storage for the new pointer
4848 variable. This allows us to avoid changing all the
4849 code that expects a pointer to something that expects
4850 a direct variable. */
4851 if (pass == 0)
4852 continue;
4854 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
4855 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
4857 x = build_receiver_ref (var, false, ctx);
4858 x = build_fold_addr_expr_loc (clause_loc, x);
4860 else if (TREE_CONSTANT (x))
4862 /* For reduction in SIMD loop, defer adding the
4863 initialization of the reference, because if we decide
4864 to use SIMD array for it, the initilization could cause
4865 expansion ICE. */
4866 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
4867 x = NULL_TREE;
4868 else
4870 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
4871 get_name (var));
4872 gimple_add_tmp_var (x);
4873 TREE_ADDRESSABLE (x) = 1;
4874 x = build_fold_addr_expr_loc (clause_loc, x);
4877 else
4879 tree atmp
4880 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4881 tree rtype = TREE_TYPE (TREE_TYPE (new_var));
4882 tree al = size_int (TYPE_ALIGN (rtype));
4883 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
4886 if (x)
4888 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4889 gimplify_assign (new_var, x, ilist);
4892 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4894 else if (c_kind == OMP_CLAUSE_REDUCTION
4895 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4897 if (pass == 0)
4898 continue;
4900 else if (pass != 0)
4901 continue;
4903 switch (OMP_CLAUSE_CODE (c))
4905 case OMP_CLAUSE_SHARED:
4906 /* Ignore shared directives in teams construct. */
4907 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
4908 continue;
4909 /* Shared global vars are just accessed directly. */
4910 if (is_global_var (new_var))
4911 break;
4912 /* For taskloop firstprivate/lastprivate, represented
4913 as firstprivate and shared clause on the task, new_var
4914 is the firstprivate var. */
4915 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
4916 break;
4917 /* Set up the DECL_VALUE_EXPR for shared variables now. This
4918 needs to be delayed until after fixup_child_record_type so
4919 that we get the correct type during the dereference. */
4920 by_ref = use_pointer_for_field (var, ctx);
4921 x = build_receiver_ref (var, by_ref, ctx);
4922 SET_DECL_VALUE_EXPR (new_var, x);
4923 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4925 /* ??? If VAR is not passed by reference, and the variable
4926 hasn't been initialized yet, then we'll get a warning for
4927 the store into the omp_data_s structure. Ideally, we'd be
4928 able to notice this and not store anything at all, but
4929 we're generating code too early. Suppress the warning. */
4930 if (!by_ref)
4931 TREE_NO_WARNING (var) = 1;
4932 break;
4934 case OMP_CLAUSE_LASTPRIVATE:
4935 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4936 break;
4937 /* FALLTHRU */
4939 case OMP_CLAUSE_PRIVATE:
4940 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
4941 x = build_outer_var_ref (var, ctx);
4942 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4944 if (is_task_ctx (ctx))
4945 x = build_receiver_ref (var, false, ctx);
4946 else
4947 x = build_outer_var_ref (var, ctx);
4949 else
4950 x = NULL;
4951 do_private:
4952 tree nx;
4953 nx = lang_hooks.decls.omp_clause_default_ctor
4954 (c, unshare_expr (new_var), x);
4955 if (is_simd)
4957 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
4958 if ((TREE_ADDRESSABLE (new_var) || nx || y
4959 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
4960 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4961 idx, lane, ivar, lvar))
4963 if (nx)
4964 x = lang_hooks.decls.omp_clause_default_ctor
4965 (c, unshare_expr (ivar), x);
4966 if (nx && x)
4967 gimplify_and_add (x, &llist[0]);
4968 if (y)
4970 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
4971 if (y)
4973 gimple_seq tseq = NULL;
4975 dtor = y;
4976 gimplify_stmt (&dtor, &tseq);
4977 gimple_seq_add_seq (&llist[1], tseq);
4980 break;
4983 if (nx)
4984 gimplify_and_add (nx, ilist);
4985 /* FALLTHRU */
4987 do_dtor:
4988 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
4989 if (x)
4991 gimple_seq tseq = NULL;
4993 dtor = x;
4994 gimplify_stmt (&dtor, &tseq);
4995 gimple_seq_add_seq (dlist, tseq);
4997 break;
4999 case OMP_CLAUSE_LINEAR:
5000 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
5001 goto do_firstprivate;
5002 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
5003 x = NULL;
5004 else
5005 x = build_outer_var_ref (var, ctx);
5006 goto do_private;
5008 case OMP_CLAUSE_FIRSTPRIVATE:
5009 if (is_task_ctx (ctx))
5011 if (is_reference (var) || is_variable_sized (var))
5012 goto do_dtor;
5013 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
5014 ctx))
5015 || use_pointer_for_field (var, NULL))
5017 x = build_receiver_ref (var, false, ctx);
5018 SET_DECL_VALUE_EXPR (new_var, x);
5019 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
5020 goto do_dtor;
5023 do_firstprivate:
5024 x = build_outer_var_ref (var, ctx);
5025 if (is_simd)
5027 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
5028 && gimple_omp_for_combined_into_p (ctx->stmt))
5030 tree t = OMP_CLAUSE_LINEAR_STEP (c);
5031 tree stept = TREE_TYPE (t);
5032 tree ct = find_omp_clause (clauses,
5033 OMP_CLAUSE__LOOPTEMP_);
5034 gcc_assert (ct);
5035 tree l = OMP_CLAUSE_DECL (ct);
5036 tree n1 = fd->loop.n1;
5037 tree step = fd->loop.step;
5038 tree itype = TREE_TYPE (l);
5039 if (POINTER_TYPE_P (itype))
5040 itype = signed_type_for (itype);
5041 l = fold_build2 (MINUS_EXPR, itype, l, n1);
5042 if (TYPE_UNSIGNED (itype)
5043 && fd->loop.cond_code == GT_EXPR)
5044 l = fold_build2 (TRUNC_DIV_EXPR, itype,
5045 fold_build1 (NEGATE_EXPR, itype, l),
5046 fold_build1 (NEGATE_EXPR,
5047 itype, step));
5048 else
5049 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
5050 t = fold_build2 (MULT_EXPR, stept,
5051 fold_convert (stept, l), t);
5053 if (OMP_CLAUSE_LINEAR_ARRAY (c))
5055 x = lang_hooks.decls.omp_clause_linear_ctor
5056 (c, new_var, x, t);
5057 gimplify_and_add (x, ilist);
5058 goto do_dtor;
5061 if (POINTER_TYPE_P (TREE_TYPE (x)))
5062 x = fold_build2 (POINTER_PLUS_EXPR,
5063 TREE_TYPE (x), x, t);
5064 else
5065 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
5068 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
5069 || TREE_ADDRESSABLE (new_var))
5070 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5071 idx, lane, ivar, lvar))
5073 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
5075 tree iv = create_tmp_var (TREE_TYPE (new_var));
5076 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
5077 gimplify_and_add (x, ilist);
5078 gimple_stmt_iterator gsi
5079 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
5080 gassign *g
5081 = gimple_build_assign (unshare_expr (lvar), iv);
5082 gsi_insert_before_without_update (&gsi, g,
5083 GSI_SAME_STMT);
5084 tree t = OMP_CLAUSE_LINEAR_STEP (c);
5085 enum tree_code code = PLUS_EXPR;
5086 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
5087 code = POINTER_PLUS_EXPR;
5088 g = gimple_build_assign (iv, code, iv, t);
5089 gsi_insert_before_without_update (&gsi, g,
5090 GSI_SAME_STMT);
5091 break;
5093 x = lang_hooks.decls.omp_clause_copy_ctor
5094 (c, unshare_expr (ivar), x);
5095 gimplify_and_add (x, &llist[0]);
5096 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
5097 if (x)
5099 gimple_seq tseq = NULL;
5101 dtor = x;
5102 gimplify_stmt (&dtor, &tseq);
5103 gimple_seq_add_seq (&llist[1], tseq);
5105 break;
5108 x = lang_hooks.decls.omp_clause_copy_ctor
5109 (c, unshare_expr (new_var), x);
5110 gimplify_and_add (x, ilist);
5111 goto do_dtor;
5113 case OMP_CLAUSE__LOOPTEMP_:
5114 gcc_assert (is_taskreg_ctx (ctx));
5115 x = build_outer_var_ref (var, ctx);
5116 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
5117 gimplify_and_add (x, ilist);
5118 break;
5120 case OMP_CLAUSE_COPYIN:
5121 by_ref = use_pointer_for_field (var, NULL);
5122 x = build_receiver_ref (var, by_ref, ctx);
5123 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
5124 append_to_statement_list (x, &copyin_seq);
5125 copyin_by_ref |= by_ref;
5126 break;
5128 case OMP_CLAUSE_REDUCTION:
5129 /* OpenACC reductions are initialized using the
5130 GOACC_REDUCTION internal function. */
5131 if (is_gimple_omp_oacc (ctx->stmt))
5132 break;
5133 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5135 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5136 gimple *tseq;
5137 x = build_outer_var_ref (var, ctx);
5139 if (is_reference (var)
5140 && !useless_type_conversion_p (TREE_TYPE (placeholder),
5141 TREE_TYPE (x)))
5142 x = build_fold_addr_expr_loc (clause_loc, x);
5143 SET_DECL_VALUE_EXPR (placeholder, x);
5144 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5145 tree new_vard = new_var;
5146 if (is_reference (var))
5148 gcc_assert (TREE_CODE (new_var) == MEM_REF);
5149 new_vard = TREE_OPERAND (new_var, 0);
5150 gcc_assert (DECL_P (new_vard));
5152 if (is_simd
5153 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5154 idx, lane, ivar, lvar))
5156 if (new_vard == new_var)
5158 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
5159 SET_DECL_VALUE_EXPR (new_var, ivar);
5161 else
5163 SET_DECL_VALUE_EXPR (new_vard,
5164 build_fold_addr_expr (ivar));
5165 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
5167 x = lang_hooks.decls.omp_clause_default_ctor
5168 (c, unshare_expr (ivar),
5169 build_outer_var_ref (var, ctx));
5170 if (x)
5171 gimplify_and_add (x, &llist[0]);
5172 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
5174 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
5175 lower_omp (&tseq, ctx);
5176 gimple_seq_add_seq (&llist[0], tseq);
5178 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
5179 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
5180 lower_omp (&tseq, ctx);
5181 gimple_seq_add_seq (&llist[1], tseq);
5182 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5183 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
5184 if (new_vard == new_var)
5185 SET_DECL_VALUE_EXPR (new_var, lvar);
5186 else
5187 SET_DECL_VALUE_EXPR (new_vard,
5188 build_fold_addr_expr (lvar));
5189 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
5190 if (x)
5192 tseq = NULL;
5193 dtor = x;
5194 gimplify_stmt (&dtor, &tseq);
5195 gimple_seq_add_seq (&llist[1], tseq);
5197 break;
5199 /* If this is a reference to constant size reduction var
5200 with placeholder, we haven't emitted the initializer
5201 for it because it is undesirable if SIMD arrays are used.
5202 But if they aren't used, we need to emit the deferred
5203 initialization now. */
5204 else if (is_reference (var) && is_simd)
5205 handle_simd_reference (clause_loc, new_vard, ilist);
5206 x = lang_hooks.decls.omp_clause_default_ctor
5207 (c, unshare_expr (new_var),
5208 build_outer_var_ref (var, ctx));
5209 if (x)
5210 gimplify_and_add (x, ilist);
5211 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
5213 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
5214 lower_omp (&tseq, ctx);
5215 gimple_seq_add_seq (ilist, tseq);
5217 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
5218 if (is_simd)
5220 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
5221 lower_omp (&tseq, ctx);
5222 gimple_seq_add_seq (dlist, tseq);
5223 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5225 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
5226 goto do_dtor;
5228 else
5230 x = omp_reduction_init (c, TREE_TYPE (new_var));
5231 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
5232 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
5234 /* reduction(-:var) sums up the partial results, so it
5235 acts identically to reduction(+:var). */
5236 if (code == MINUS_EXPR)
5237 code = PLUS_EXPR;
5239 tree new_vard = new_var;
5240 if (is_simd && is_reference (var))
5242 gcc_assert (TREE_CODE (new_var) == MEM_REF);
5243 new_vard = TREE_OPERAND (new_var, 0);
5244 gcc_assert (DECL_P (new_vard));
5246 if (is_simd
5247 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5248 idx, lane, ivar, lvar))
5250 tree ref = build_outer_var_ref (var, ctx);
5252 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
5254 x = build2 (code, TREE_TYPE (ref), ref, ivar);
5255 ref = build_outer_var_ref (var, ctx);
5256 gimplify_assign (ref, x, &llist[1]);
5258 if (new_vard != new_var)
5260 SET_DECL_VALUE_EXPR (new_vard,
5261 build_fold_addr_expr (lvar));
5262 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
5265 else
5267 if (is_reference (var) && is_simd)
5268 handle_simd_reference (clause_loc, new_vard, ilist);
5269 gimplify_assign (new_var, x, ilist);
5270 if (is_simd)
5272 tree ref = build_outer_var_ref (var, ctx);
5274 x = build2 (code, TREE_TYPE (ref), ref, new_var);
5275 ref = build_outer_var_ref (var, ctx);
5276 gimplify_assign (ref, x, dlist);
5280 break;
5282 default:
5283 gcc_unreachable ();
5288 if (lane)
5290 tree uid = create_tmp_var (ptr_type_node, "simduid");
5291 /* Don't want uninit warnings on simduid, it is always uninitialized,
5292 but we use it not for the value, but for the DECL_UID only. */
5293 TREE_NO_WARNING (uid) = 1;
5294 gimple *g
5295 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
5296 gimple_call_set_lhs (g, lane);
5297 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
5298 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
5299 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
5300 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
5301 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
5302 gimple_omp_for_set_clauses (ctx->stmt, c);
5303 g = gimple_build_assign (lane, INTEGER_CST,
5304 build_int_cst (unsigned_type_node, 0));
5305 gimple_seq_add_stmt (ilist, g);
5306 for (int i = 0; i < 2; i++)
5307 if (llist[i])
5309 tree vf = create_tmp_var (unsigned_type_node);
5310 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
5311 gimple_call_set_lhs (g, vf);
5312 gimple_seq *seq = i == 0 ? ilist : dlist;
5313 gimple_seq_add_stmt (seq, g);
5314 tree t = build_int_cst (unsigned_type_node, 0);
5315 g = gimple_build_assign (idx, INTEGER_CST, t);
5316 gimple_seq_add_stmt (seq, g);
5317 tree body = create_artificial_label (UNKNOWN_LOCATION);
5318 tree header = create_artificial_label (UNKNOWN_LOCATION);
5319 tree end = create_artificial_label (UNKNOWN_LOCATION);
5320 gimple_seq_add_stmt (seq, gimple_build_goto (header));
5321 gimple_seq_add_stmt (seq, gimple_build_label (body));
5322 gimple_seq_add_seq (seq, llist[i]);
5323 t = build_int_cst (unsigned_type_node, 1);
5324 g = gimple_build_assign (idx, PLUS_EXPR, idx, t);
5325 gimple_seq_add_stmt (seq, g);
5326 gimple_seq_add_stmt (seq, gimple_build_label (header));
5327 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
5328 gimple_seq_add_stmt (seq, g);
5329 gimple_seq_add_stmt (seq, gimple_build_label (end));
5333 /* The copyin sequence is not to be executed by the main thread, since
5334 that would result in self-copies. Perhaps not visible to scalars,
5335 but it certainly is to C++ operator=. */
5336 if (copyin_seq)
5338 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
5340 x = build2 (NE_EXPR, boolean_type_node, x,
5341 build_int_cst (TREE_TYPE (x), 0));
5342 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
5343 gimplify_and_add (x, ilist);
5346 /* If any copyin variable is passed by reference, we must ensure the
5347 master thread doesn't modify it before it is copied over in all
5348 threads. Similarly for variables in both firstprivate and
5349 lastprivate clauses we need to ensure the lastprivate copying
5350 happens after firstprivate copying in all threads. And similarly
5351 for UDRs if initializer expression refers to omp_orig. */
5352 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
5354 /* Don't add any barrier for #pragma omp simd or
5355 #pragma omp distribute. */
5356 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
5357 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
5358 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
5361 /* If max_vf is non-zero, then we can use only a vectorization factor
5362 up to the max_vf we chose. So stick it into the safelen clause. */
5363 if (max_vf)
5365 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
5366 OMP_CLAUSE_SAFELEN);
5367 if (c == NULL_TREE
5368 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
5369 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
5370 max_vf) == 1))
5372 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
5373 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
5374 max_vf);
5375 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
5376 gimple_omp_for_set_clauses (ctx->stmt, c);
5382 /* Generate code to implement the LASTPRIVATE clauses. This is used for
5383 both parallel and workshare constructs. PREDICATE may be NULL if it's
5384 always true. */
5386 static void
5387 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
5388 omp_context *ctx)
5390 tree x, c, label = NULL, orig_clauses = clauses;
5391 bool par_clauses = false;
5392 tree simduid = NULL, lastlane = NULL;
5394 /* Early exit if there are no lastprivate or linear clauses. */
5395 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
5396 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
5397 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
5398 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
5399 break;
5400 if (clauses == NULL)
5402 /* If this was a workshare clause, see if it had been combined
5403 with its parallel. In that case, look for the clauses on the
5404 parallel statement itself. */
5405 if (is_parallel_ctx (ctx))
5406 return;
5408 ctx = ctx->outer;
5409 if (ctx == NULL || !is_parallel_ctx (ctx))
5410 return;
5412 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
5413 OMP_CLAUSE_LASTPRIVATE);
5414 if (clauses == NULL)
5415 return;
5416 par_clauses = true;
5419 if (predicate)
5421 gcond *stmt;
5422 tree label_true, arm1, arm2;
5424 label = create_artificial_label (UNKNOWN_LOCATION);
5425 label_true = create_artificial_label (UNKNOWN_LOCATION);
5426 arm1 = TREE_OPERAND (predicate, 0);
5427 arm2 = TREE_OPERAND (predicate, 1);
5428 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
5429 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
5430 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
5431 label_true, label);
5432 gimple_seq_add_stmt (stmt_list, stmt);
5433 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
5436 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
5437 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
5439 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
5440 if (simduid)
5441 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5444 for (c = clauses; c ;)
5446 tree var, new_var;
5447 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5449 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5450 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
5451 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
5453 var = OMP_CLAUSE_DECL (c);
5454 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5455 && OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
5456 && is_taskloop_ctx (ctx))
5458 gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
5459 new_var = lookup_decl (var, ctx->outer);
5461 else
5463 new_var = lookup_decl (var, ctx);
5464 /* Avoid uninitialized warnings for lastprivate and
5465 for linear iterators. */
5466 if (predicate
5467 && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5468 || OMP_CLAUSE_LINEAR_NO_COPYIN (c)))
5469 TREE_NO_WARNING (new_var) = 1;
5472 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
5474 tree val = DECL_VALUE_EXPR (new_var);
5475 if (TREE_CODE (val) == ARRAY_REF
5476 && VAR_P (TREE_OPERAND (val, 0))
5477 && lookup_attribute ("omp simd array",
5478 DECL_ATTRIBUTES (TREE_OPERAND (val,
5479 0))))
5481 if (lastlane == NULL)
5483 lastlane = create_tmp_var (unsigned_type_node);
5484 gcall *g
5485 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
5486 2, simduid,
5487 TREE_OPERAND (val, 1));
5488 gimple_call_set_lhs (g, lastlane);
5489 gimple_seq_add_stmt (stmt_list, g);
5491 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
5492 TREE_OPERAND (val, 0), lastlane,
5493 NULL_TREE, NULL_TREE);
5497 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5498 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
5500 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
5501 gimple_seq_add_seq (stmt_list,
5502 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
5503 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
5505 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
5506 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
5508 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
5509 gimple_seq_add_seq (stmt_list,
5510 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
5511 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
5514 x = NULL_TREE;
5515 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5516 && OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c))
5518 gcc_checking_assert (is_taskloop_ctx (ctx));
5519 tree ovar = maybe_lookup_decl_in_outer_ctx (var,
5520 ctx->outer->outer);
5521 if (is_global_var (ovar))
5522 x = ovar;
5524 if (!x)
5525 x = build_outer_var_ref (var, ctx, true);
5526 if (is_reference (var))
5527 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5528 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
5529 gimplify_and_add (x, stmt_list);
5531 c = OMP_CLAUSE_CHAIN (c);
5532 if (c == NULL && !par_clauses)
5534 /* If this was a workshare clause, see if it had been combined
5535 with its parallel. In that case, continue looking for the
5536 clauses also on the parallel statement itself. */
5537 if (is_parallel_ctx (ctx))
5538 break;
5540 ctx = ctx->outer;
5541 if (ctx == NULL || !is_parallel_ctx (ctx))
5542 break;
5544 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
5545 OMP_CLAUSE_LASTPRIVATE);
5546 par_clauses = true;
5550 if (label)
5551 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
5554 /* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
5555 (which might be a placeholder). INNER is true if this is an inner
5556 axis of a multi-axis loop. FORK and JOIN are (optional) fork and
5557 join markers. Generate the before-loop forking sequence in
5558 FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
5559 general form of these sequences is
5561 GOACC_REDUCTION_SETUP
5562 GOACC_FORK
5563 GOACC_REDUCTION_INIT
5565 GOACC_REDUCTION_FINI
5566 GOACC_JOIN
5567 GOACC_REDUCTION_TEARDOWN. */
5569 static void
5570 lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
5571 gcall *fork, gcall *join, gimple_seq *fork_seq,
5572 gimple_seq *join_seq, omp_context *ctx)
5574 gimple_seq before_fork = NULL;
5575 gimple_seq after_fork = NULL;
5576 gimple_seq before_join = NULL;
5577 gimple_seq after_join = NULL;
5578 tree init_code = NULL_TREE, fini_code = NULL_TREE,
5579 setup_code = NULL_TREE, teardown_code = NULL_TREE;
5580 unsigned offset = 0;
5582 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
5583 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
5585 tree orig = OMP_CLAUSE_DECL (c);
5586 tree var = maybe_lookup_decl (orig, ctx);
5587 tree ref_to_res = NULL_TREE;
5588 tree incoming, outgoing, v1, v2, v3;
5589 bool is_private = false;
5591 enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
5592 if (rcode == MINUS_EXPR)
5593 rcode = PLUS_EXPR;
5594 else if (rcode == TRUTH_ANDIF_EXPR)
5595 rcode = BIT_AND_EXPR;
5596 else if (rcode == TRUTH_ORIF_EXPR)
5597 rcode = BIT_IOR_EXPR;
5598 tree op = build_int_cst (unsigned_type_node, rcode);
5600 if (!var)
5601 var = orig;
5603 incoming = outgoing = var;
5605 if (!inner)
5607 /* See if an outer construct also reduces this variable. */
5608 omp_context *outer = ctx;
5610 while (omp_context *probe = outer->outer)
5612 enum gimple_code type = gimple_code (probe->stmt);
5613 tree cls;
5615 switch (type)
5617 case GIMPLE_OMP_FOR:
5618 cls = gimple_omp_for_clauses (probe->stmt);
5619 break;
5621 case GIMPLE_OMP_TARGET:
5622 if (gimple_omp_target_kind (probe->stmt)
5623 != GF_OMP_TARGET_KIND_OACC_PARALLEL)
5624 goto do_lookup;
5626 cls = gimple_omp_target_clauses (probe->stmt);
5627 break;
5629 default:
5630 goto do_lookup;
5633 outer = probe;
5634 for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
5635 if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
5636 && orig == OMP_CLAUSE_DECL (cls))
5638 incoming = outgoing = lookup_decl (orig, probe);
5639 goto has_outer_reduction;
5641 else if ((OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_FIRSTPRIVATE
5642 || OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_PRIVATE)
5643 && orig == OMP_CLAUSE_DECL (cls))
5645 is_private = true;
5646 goto do_lookup;
5650 do_lookup:
5651 /* This is the outermost construct with this reduction,
5652 see if there's a mapping for it. */
5653 if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
5654 && maybe_lookup_field (orig, outer) && !is_private)
5656 ref_to_res = build_receiver_ref (orig, false, outer);
5657 if (is_reference (orig))
5658 ref_to_res = build_simple_mem_ref (ref_to_res);
5660 tree type = TREE_TYPE (var);
5661 if (POINTER_TYPE_P (type))
5662 type = TREE_TYPE (type);
5664 outgoing = var;
5665 incoming = omp_reduction_init_op (loc, rcode, type);
5667 else
5669 /* Try to look at enclosing contexts for reduction var,
5670 use original if no mapping found. */
5671 tree t = NULL_TREE;
5672 omp_context *c = ctx->outer;
5673 while (c && !t)
5675 t = maybe_lookup_decl (orig, c);
5676 c = c->outer;
5678 incoming = outgoing = (t ? t : orig);
5681 has_outer_reduction:;
5684 if (!ref_to_res)
5685 ref_to_res = integer_zero_node;
5687 if (is_reference (orig))
5689 tree type = TREE_TYPE (var);
5690 const char *id = IDENTIFIER_POINTER (DECL_NAME (var));
5692 if (!inner)
5694 tree x = create_tmp_var (TREE_TYPE (type), id);
5695 gimplify_assign (var, build_fold_addr_expr (x), fork_seq);
5698 v1 = create_tmp_var (type, id);
5699 v2 = create_tmp_var (type, id);
5700 v3 = create_tmp_var (type, id);
5702 gimplify_assign (v1, var, fork_seq);
5703 gimplify_assign (v2, var, fork_seq);
5704 gimplify_assign (v3, var, fork_seq);
5706 var = build_simple_mem_ref (var);
5707 v1 = build_simple_mem_ref (v1);
5708 v2 = build_simple_mem_ref (v2);
5709 v3 = build_simple_mem_ref (v3);
5710 outgoing = build_simple_mem_ref (outgoing);
5712 if (!TREE_CONSTANT (incoming))
5713 incoming = build_simple_mem_ref (incoming);
5715 else
5716 v1 = v2 = v3 = var;
5718 /* Determine position in reduction buffer, which may be used
5719 by target. */
5720 enum machine_mode mode = TYPE_MODE (TREE_TYPE (var));
5721 unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
5722 offset = (offset + align - 1) & ~(align - 1);
5723 tree off = build_int_cst (sizetype, offset);
5724 offset += GET_MODE_SIZE (mode);
5726 if (!init_code)
5728 init_code = build_int_cst (integer_type_node,
5729 IFN_GOACC_REDUCTION_INIT);
5730 fini_code = build_int_cst (integer_type_node,
5731 IFN_GOACC_REDUCTION_FINI);
5732 setup_code = build_int_cst (integer_type_node,
5733 IFN_GOACC_REDUCTION_SETUP);
5734 teardown_code = build_int_cst (integer_type_node,
5735 IFN_GOACC_REDUCTION_TEARDOWN);
5738 tree setup_call
5739 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5740 TREE_TYPE (var), 6, setup_code,
5741 unshare_expr (ref_to_res),
5742 incoming, level, op, off);
5743 tree init_call
5744 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5745 TREE_TYPE (var), 6, init_code,
5746 unshare_expr (ref_to_res),
5747 v1, level, op, off);
5748 tree fini_call
5749 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5750 TREE_TYPE (var), 6, fini_code,
5751 unshare_expr (ref_to_res),
5752 v2, level, op, off);
5753 tree teardown_call
5754 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5755 TREE_TYPE (var), 6, teardown_code,
5756 ref_to_res, v3, level, op, off);
5758 gimplify_assign (v1, setup_call, &before_fork);
5759 gimplify_assign (v2, init_call, &after_fork);
5760 gimplify_assign (v3, fini_call, &before_join);
5761 gimplify_assign (outgoing, teardown_call, &after_join);
5764 /* Now stitch things together. */
5765 gimple_seq_add_seq (fork_seq, before_fork);
5766 if (fork)
5767 gimple_seq_add_stmt (fork_seq, fork);
5768 gimple_seq_add_seq (fork_seq, after_fork);
5770 gimple_seq_add_seq (join_seq, before_join);
5771 if (join)
5772 gimple_seq_add_stmt (join_seq, join);
5773 gimple_seq_add_seq (join_seq, after_join);
5776 /* Generate code to implement the REDUCTION clauses. */
5778 static void
5779 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
5781 gimple_seq sub_seq = NULL;
5782 gimple *stmt;
5783 tree x, c;
5784 int count = 0;
5786 /* OpenACC loop reductions are handled elsewhere. */
5787 if (is_gimple_omp_oacc (ctx->stmt))
5788 return;
5790 /* SIMD reductions are handled in lower_rec_input_clauses. */
5791 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
5792 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
5793 return;
5795 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
5796 update in that case, otherwise use a lock. */
5797 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
5798 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
5800 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
5801 || TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
5803 /* Never use OMP_ATOMIC for array reductions or UDRs. */
5804 count = -1;
5805 break;
5807 count++;
5810 if (count == 0)
5811 return;
5813 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
5815 tree var, ref, new_var, orig_var;
5816 enum tree_code code;
5817 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5819 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
5820 continue;
5822 orig_var = var = OMP_CLAUSE_DECL (c);
5823 if (TREE_CODE (var) == MEM_REF)
5825 var = TREE_OPERAND (var, 0);
5826 if (TREE_CODE (var) == POINTER_PLUS_EXPR)
5827 var = TREE_OPERAND (var, 0);
5828 if (TREE_CODE (var) == INDIRECT_REF
5829 || TREE_CODE (var) == ADDR_EXPR)
5830 var = TREE_OPERAND (var, 0);
5831 orig_var = var;
5832 if (is_variable_sized (var))
5834 gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
5835 var = DECL_VALUE_EXPR (var);
5836 gcc_assert (TREE_CODE (var) == INDIRECT_REF);
5837 var = TREE_OPERAND (var, 0);
5838 gcc_assert (DECL_P (var));
5841 new_var = lookup_decl (var, ctx);
5842 if (var == OMP_CLAUSE_DECL (c) && is_reference (var))
5843 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5844 ref = build_outer_var_ref (var, ctx);
5845 code = OMP_CLAUSE_REDUCTION_CODE (c);
5847 /* reduction(-:var) sums up the partial results, so it acts
5848 identically to reduction(+:var). */
5849 if (code == MINUS_EXPR)
5850 code = PLUS_EXPR;
5852 if (count == 1)
5854 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
5856 addr = save_expr (addr);
5857 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
5858 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
5859 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
5860 gimplify_and_add (x, stmt_seqp);
5861 return;
5863 else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
5865 tree d = OMP_CLAUSE_DECL (c);
5866 tree type = TREE_TYPE (d);
5867 tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
5868 tree i = create_tmp_var (TREE_TYPE (v), NULL);
5869 tree ptype = build_pointer_type (TREE_TYPE (type));
5870 tree bias = TREE_OPERAND (d, 1);
5871 d = TREE_OPERAND (d, 0);
5872 if (TREE_CODE (d) == POINTER_PLUS_EXPR)
5874 tree b = TREE_OPERAND (d, 1);
5875 b = maybe_lookup_decl (b, ctx);
5876 if (b == NULL)
5878 b = TREE_OPERAND (d, 1);
5879 b = maybe_lookup_decl_in_outer_ctx (b, ctx);
5881 if (integer_zerop (bias))
5882 bias = b;
5883 else
5885 bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
5886 bias = fold_build2_loc (clause_loc, PLUS_EXPR,
5887 TREE_TYPE (b), b, bias);
5889 d = TREE_OPERAND (d, 0);
5891 /* For ref build_outer_var_ref already performs this, so
5892 only new_var needs a dereference. */
5893 if (TREE_CODE (d) == INDIRECT_REF)
5895 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5896 gcc_assert (is_reference (var) && var == orig_var);
5898 else if (TREE_CODE (d) == ADDR_EXPR)
5900 if (orig_var == var)
5902 new_var = build_fold_addr_expr (new_var);
5903 ref = build_fold_addr_expr (ref);
5906 else
5908 gcc_assert (orig_var == var);
5909 if (is_reference (var))
5910 ref = build_fold_addr_expr (ref);
5912 if (DECL_P (v))
5914 tree t = maybe_lookup_decl (v, ctx);
5915 if (t)
5916 v = t;
5917 else
5918 v = maybe_lookup_decl_in_outer_ctx (v, ctx);
5919 gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
5921 if (!integer_zerop (bias))
5923 bias = fold_convert_loc (clause_loc, sizetype, bias);
5924 new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
5925 TREE_TYPE (new_var), new_var,
5926 unshare_expr (bias));
5927 ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
5928 TREE_TYPE (ref), ref, bias);
5930 new_var = fold_convert_loc (clause_loc, ptype, new_var);
5931 ref = fold_convert_loc (clause_loc, ptype, ref);
5932 tree m = create_tmp_var (ptype, NULL);
5933 gimplify_assign (m, new_var, stmt_seqp);
5934 new_var = m;
5935 m = create_tmp_var (ptype, NULL);
5936 gimplify_assign (m, ref, stmt_seqp);
5937 ref = m;
5938 gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
5939 tree body = create_artificial_label (UNKNOWN_LOCATION);
5940 tree end = create_artificial_label (UNKNOWN_LOCATION);
5941 gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
5942 tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
5943 tree out = build_simple_mem_ref_loc (clause_loc, ref);
5944 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5946 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5947 tree decl_placeholder
5948 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
5949 SET_DECL_VALUE_EXPR (placeholder, out);
5950 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5951 SET_DECL_VALUE_EXPR (decl_placeholder, priv);
5952 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
5953 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
5954 gimple_seq_add_seq (&sub_seq,
5955 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
5956 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5957 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
5958 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
5960 else
5962 x = build2 (code, TREE_TYPE (out), out, priv);
5963 out = unshare_expr (out);
5964 gimplify_assign (out, x, &sub_seq);
5966 gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
5967 TYPE_SIZE_UNIT (TREE_TYPE (type)));
5968 gimple_seq_add_stmt (&sub_seq, g);
5969 g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
5970 TYPE_SIZE_UNIT (TREE_TYPE (type)));
5971 gimple_seq_add_stmt (&sub_seq, g);
5972 g = gimple_build_assign (i, PLUS_EXPR, i,
5973 build_int_cst (TREE_TYPE (i), 1));
5974 gimple_seq_add_stmt (&sub_seq, g);
5975 g = gimple_build_cond (LE_EXPR, i, v, body, end);
5976 gimple_seq_add_stmt (&sub_seq, g);
5977 gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
5979 else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5981 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5983 if (is_reference (var)
5984 && !useless_type_conversion_p (TREE_TYPE (placeholder),
5985 TREE_TYPE (ref)))
5986 ref = build_fold_addr_expr_loc (clause_loc, ref);
5987 SET_DECL_VALUE_EXPR (placeholder, ref);
5988 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5989 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
5990 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
5991 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5992 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
5994 else
5996 x = build2 (code, TREE_TYPE (ref), ref, new_var);
5997 ref = build_outer_var_ref (var, ctx);
5998 gimplify_assign (ref, x, &sub_seq);
6002 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
6004 gimple_seq_add_stmt (stmt_seqp, stmt);
6006 gimple_seq_add_seq (stmt_seqp, sub_seq);
6008 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
6010 gimple_seq_add_stmt (stmt_seqp, stmt);
6014 /* Generate code to implement the COPYPRIVATE clauses. */
6016 static void
6017 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
6018 omp_context *ctx)
6020 tree c;
6022 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
6024 tree var, new_var, ref, x;
6025 bool by_ref;
6026 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
6028 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
6029 continue;
6031 var = OMP_CLAUSE_DECL (c);
6032 by_ref = use_pointer_for_field (var, NULL);
6034 ref = build_sender_ref (var, ctx);
6035 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
6036 if (by_ref)
6038 x = build_fold_addr_expr_loc (clause_loc, new_var);
6039 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
6041 gimplify_assign (ref, x, slist);
6043 ref = build_receiver_ref (var, false, ctx);
6044 if (by_ref)
6046 ref = fold_convert_loc (clause_loc,
6047 build_pointer_type (TREE_TYPE (new_var)),
6048 ref);
6049 ref = build_fold_indirect_ref_loc (clause_loc, ref);
6051 if (is_reference (var))
6053 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
6054 ref = build_simple_mem_ref_loc (clause_loc, ref);
6055 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
6057 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
6058 gimplify_and_add (x, rlist);
6063 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
6064 and REDUCTION from the sender (aka parent) side. */
6066 static void
6067 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
6068 omp_context *ctx)
6070 tree c, t;
6071 int ignored_looptemp = 0;
6072 bool is_taskloop = false;
6074 /* For taskloop, ignore first two _looptemp_ clauses, those are initialized
6075 by GOMP_taskloop. */
6076 if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
6078 ignored_looptemp = 2;
6079 is_taskloop = true;
6082 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
6084 tree val, ref, x, var;
6085 bool by_ref, do_in = false, do_out = false;
6086 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
6088 switch (OMP_CLAUSE_CODE (c))
6090 case OMP_CLAUSE_PRIVATE:
6091 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6092 break;
6093 continue;
6094 case OMP_CLAUSE_FIRSTPRIVATE:
6095 case OMP_CLAUSE_COPYIN:
6096 case OMP_CLAUSE_LASTPRIVATE:
6097 case OMP_CLAUSE_REDUCTION:
6098 break;
6099 case OMP_CLAUSE_SHARED:
6100 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
6101 break;
6102 continue;
6103 case OMP_CLAUSE__LOOPTEMP_:
6104 if (ignored_looptemp)
6106 ignored_looptemp--;
6107 continue;
6109 break;
6110 default:
6111 continue;
6114 val = OMP_CLAUSE_DECL (c);
6115 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
6116 && TREE_CODE (val) == MEM_REF)
6118 val = TREE_OPERAND (val, 0);
6119 if (TREE_CODE (val) == POINTER_PLUS_EXPR)
6120 val = TREE_OPERAND (val, 0);
6121 if (TREE_CODE (val) == INDIRECT_REF
6122 || TREE_CODE (val) == ADDR_EXPR)
6123 val = TREE_OPERAND (val, 0);
6124 if (is_variable_sized (val))
6125 continue;
6128 /* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
6129 outer taskloop region. */
6130 omp_context *ctx_for_o = ctx;
6131 if (is_taskloop
6132 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
6133 && OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
6134 ctx_for_o = ctx->outer;
6136 var = lookup_decl_in_outer_ctx (val, ctx_for_o);
6138 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
6139 && is_global_var (var))
6140 continue;
6142 t = omp_member_access_dummy_var (var);
6143 if (t)
6145 var = DECL_VALUE_EXPR (var);
6146 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
6147 if (o != t)
6148 var = unshare_and_remap (var, t, o);
6149 else
6150 var = unshare_expr (var);
6153 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
6155 /* Handle taskloop firstprivate/lastprivate, where the
6156 lastprivate on GIMPLE_OMP_TASK is represented as
6157 OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
6158 tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
6159 x = omp_build_component_ref (ctx->sender_decl, f);
6160 if (use_pointer_for_field (val, ctx))
6161 var = build_fold_addr_expr (var);
6162 gimplify_assign (x, var, ilist);
6163 DECL_ABSTRACT_ORIGIN (f) = NULL;
6164 continue;
6167 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
6168 || val == OMP_CLAUSE_DECL (c))
6169 && is_variable_sized (val))
6170 continue;
6171 by_ref = use_pointer_for_field (val, NULL);
6173 switch (OMP_CLAUSE_CODE (c))
6175 case OMP_CLAUSE_FIRSTPRIVATE:
6176 if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c)
6177 && !by_ref
6178 && is_task_ctx (ctx))
6179 TREE_NO_WARNING (var) = 1;
6180 do_in = true;
6181 break;
6183 case OMP_CLAUSE_PRIVATE:
6184 case OMP_CLAUSE_COPYIN:
6185 case OMP_CLAUSE__LOOPTEMP_:
6186 do_in = true;
6187 break;
6189 case OMP_CLAUSE_LASTPRIVATE:
6190 if (by_ref || is_reference (val))
6192 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
6193 continue;
6194 do_in = true;
6196 else
6198 do_out = true;
6199 if (lang_hooks.decls.omp_private_outer_ref (val))
6200 do_in = true;
6202 break;
6204 case OMP_CLAUSE_REDUCTION:
6205 do_in = true;
6206 if (val == OMP_CLAUSE_DECL (c))
6207 do_out = !(by_ref || is_reference (val));
6208 else
6209 by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
6210 break;
6212 default:
6213 gcc_unreachable ();
6216 if (do_in)
6218 ref = build_sender_ref (val, ctx);
6219 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
6220 gimplify_assign (ref, x, ilist);
6221 if (is_task_ctx (ctx))
6222 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
6225 if (do_out)
6227 ref = build_sender_ref (val, ctx);
6228 gimplify_assign (var, ref, olist);
6233 /* Generate code to implement SHARED from the sender (aka parent)
6234 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
6235 list things that got automatically shared. */
6237 static void
6238 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
6240 tree var, ovar, nvar, t, f, x, record_type;
6242 if (ctx->record_type == NULL)
6243 return;
6245 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
6246 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6248 ovar = DECL_ABSTRACT_ORIGIN (f);
6249 if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
6250 continue;
6252 nvar = maybe_lookup_decl (ovar, ctx);
6253 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
6254 continue;
6256 /* If CTX is a nested parallel directive. Find the immediately
6257 enclosing parallel or workshare construct that contains a
6258 mapping for OVAR. */
6259 var = lookup_decl_in_outer_ctx (ovar, ctx);
6261 t = omp_member_access_dummy_var (var);
6262 if (t)
6264 var = DECL_VALUE_EXPR (var);
6265 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
6266 if (o != t)
6267 var = unshare_and_remap (var, t, o);
6268 else
6269 var = unshare_expr (var);
6272 if (use_pointer_for_field (ovar, ctx))
6274 x = build_sender_ref (ovar, ctx);
6275 var = build_fold_addr_expr (var);
6276 gimplify_assign (x, var, ilist);
6278 else
6280 x = build_sender_ref (ovar, ctx);
6281 gimplify_assign (x, var, ilist);
6283 if (!TREE_READONLY (var)
6284 /* We don't need to receive a new reference to a result
6285 or parm decl. In fact we may not store to it as we will
6286 invalidate any pending RSO and generate wrong gimple
6287 during inlining. */
6288 && !((TREE_CODE (var) == RESULT_DECL
6289 || TREE_CODE (var) == PARM_DECL)
6290 && DECL_BY_REFERENCE (var)))
6292 x = build_sender_ref (ovar, ctx);
6293 gimplify_assign (var, x, olist);
6299 /* Emit an OpenACC head marker call, encapulating the partitioning and
6300 other information that must be processed by the target compiler.
6301 Return the maximum number of dimensions the associated loop might
6302 be partitioned over. */
6304 static unsigned
6305 lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
6306 gimple_seq *seq, omp_context *ctx)
6308 unsigned levels = 0;
6309 unsigned tag = 0;
6310 tree gang_static = NULL_TREE;
6311 auto_vec<tree, 5> args;
6313 args.quick_push (build_int_cst
6314 (integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
6315 args.quick_push (ddvar);
6316 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
6318 switch (OMP_CLAUSE_CODE (c))
6320 case OMP_CLAUSE_GANG:
6321 tag |= OLF_DIM_GANG;
6322 gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
6323 /* static:* is represented by -1, and we can ignore it, as
6324 scheduling is always static. */
6325 if (gang_static && integer_minus_onep (gang_static))
6326 gang_static = NULL_TREE;
6327 levels++;
6328 break;
6330 case OMP_CLAUSE_WORKER:
6331 tag |= OLF_DIM_WORKER;
6332 levels++;
6333 break;
6335 case OMP_CLAUSE_VECTOR:
6336 tag |= OLF_DIM_VECTOR;
6337 levels++;
6338 break;
6340 case OMP_CLAUSE_SEQ:
6341 tag |= OLF_SEQ;
6342 break;
6344 case OMP_CLAUSE_AUTO:
6345 tag |= OLF_AUTO;
6346 break;
6348 case OMP_CLAUSE_INDEPENDENT:
6349 tag |= OLF_INDEPENDENT;
6350 break;
6352 default:
6353 continue;
6357 if (gang_static)
6359 if (DECL_P (gang_static))
6360 gang_static = build_outer_var_ref (gang_static, ctx);
6361 tag |= OLF_GANG_STATIC;
6364 /* In a parallel region, loops are implicitly INDEPENDENT. */
6365 omp_context *tgt = enclosing_target_ctx (ctx);
6366 if (!tgt || is_oacc_parallel (tgt))
6367 tag |= OLF_INDEPENDENT;
6369 /* A loop lacking SEQ, GANG, WORKER and/or VECTOR is implicitly AUTO. */
6370 if (!(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE)
6371 | OLF_SEQ)))
6372 tag |= OLF_AUTO;
6374 /* Ensure at least one level. */
6375 if (!levels)
6376 levels++;
6378 args.quick_push (build_int_cst (integer_type_node, levels));
6379 args.quick_push (build_int_cst (integer_type_node, tag));
6380 if (gang_static)
6381 args.quick_push (gang_static);
6383 gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
6384 gimple_set_location (call, loc);
6385 gimple_set_lhs (call, ddvar);
6386 gimple_seq_add_stmt (seq, call);
6388 return levels;
6391 /* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
6392 partitioning level of the enclosed region. */
6394 static void
6395 lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
6396 tree tofollow, gimple_seq *seq)
6398 int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
6399 : IFN_UNIQUE_OACC_TAIL_MARK);
6400 tree marker = build_int_cst (integer_type_node, marker_kind);
6401 int nargs = 2 + (tofollow != NULL_TREE);
6402 gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
6403 marker, ddvar, tofollow);
6404 gimple_set_location (call, loc);
6405 gimple_set_lhs (call, ddvar);
6406 gimple_seq_add_stmt (seq, call);
6409 /* Generate the before and after OpenACC loop sequences. CLAUSES are
6410 the loop clauses, from which we extract reductions. Initialize
6411 HEAD and TAIL. */
6413 static void
6414 lower_oacc_head_tail (location_t loc, tree clauses,
6415 gimple_seq *head, gimple_seq *tail, omp_context *ctx)
6417 bool inner = false;
6418 tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
6419 gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
6421 unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
6422 tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
6423 tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
6425 gcc_assert (count);
6426 for (unsigned done = 1; count; count--, done++)
6428 gimple_seq fork_seq = NULL;
6429 gimple_seq join_seq = NULL;
6431 tree place = build_int_cst (integer_type_node, -1);
6432 gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
6433 fork_kind, ddvar, place);
6434 gimple_set_location (fork, loc);
6435 gimple_set_lhs (fork, ddvar);
6437 gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
6438 join_kind, ddvar, place);
6439 gimple_set_location (join, loc);
6440 gimple_set_lhs (join, ddvar);
6442 /* Mark the beginning of this level sequence. */
6443 if (inner)
6444 lower_oacc_loop_marker (loc, ddvar, true,
6445 build_int_cst (integer_type_node, count),
6446 &fork_seq);
6447 lower_oacc_loop_marker (loc, ddvar, false,
6448 build_int_cst (integer_type_node, done),
6449 &join_seq);
6451 lower_oacc_reductions (loc, clauses, place, inner,
6452 fork, join, &fork_seq, &join_seq, ctx);
6454 /* Append this level to head. */
6455 gimple_seq_add_seq (head, fork_seq);
6456 /* Prepend it to tail. */
6457 gimple_seq_add_seq (&join_seq, *tail);
6458 *tail = join_seq;
6460 inner = true;
6463 /* Mark the end of the sequence. */
6464 lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
6465 lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
6468 /* A convenience function to build an empty GIMPLE_COND with just the
6469 condition. */
6471 static gcond *
6472 gimple_build_cond_empty (tree cond)
6474 enum tree_code pred_code;
6475 tree lhs, rhs;
6477 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
6478 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
6481 /* Return true if a parallel REGION is within a declare target function or
6482 within a target region and is not a part of a gridified target. */
6484 static bool
6485 parallel_needs_hsa_kernel_p (struct omp_region *region)
6487 bool indirect = false;
6488 for (region = region->outer; region; region = region->outer)
6490 if (region->type == GIMPLE_OMP_PARALLEL)
6491 indirect = true;
6492 else if (region->type == GIMPLE_OMP_TARGET)
6494 gomp_target *tgt_stmt
6495 = as_a <gomp_target *> (last_stmt (region->entry));
6497 if (find_omp_clause (gimple_omp_target_clauses (tgt_stmt),
6498 OMP_CLAUSE__GRIDDIM_))
6499 return indirect;
6500 else
6501 return true;
6505 if (lookup_attribute ("omp declare target",
6506 DECL_ATTRIBUTES (current_function_decl)))
6507 return true;
6509 return false;
6512 static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
6513 bool = false);
6515 /* Build the function calls to GOMP_parallel_start etc to actually
6516 generate the parallel operation. REGION is the parallel region
6517 being expanded. BB is the block where to insert the code. WS_ARGS
6518 will be set if this is a call to a combined parallel+workshare
6519 construct, it contains the list of additional arguments needed by
6520 the workshare construct. */
6522 static void
6523 expand_parallel_call (struct omp_region *region, basic_block bb,
6524 gomp_parallel *entry_stmt,
6525 vec<tree, va_gc> *ws_args)
6527 tree t, t1, t2, val, cond, c, clauses, flags;
6528 gimple_stmt_iterator gsi;
6529 gimple *stmt;
6530 enum built_in_function start_ix;
6531 int start_ix2;
6532 location_t clause_loc;
6533 vec<tree, va_gc> *args;
6535 clauses = gimple_omp_parallel_clauses (entry_stmt);
6537 /* Determine what flavor of GOMP_parallel we will be
6538 emitting. */
6539 start_ix = BUILT_IN_GOMP_PARALLEL;
6540 if (is_combined_parallel (region))
6542 switch (region->inner->type)
6544 case GIMPLE_OMP_FOR:
6545 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6546 switch (region->inner->sched_kind)
6548 case OMP_CLAUSE_SCHEDULE_RUNTIME:
6549 start_ix2 = 3;
6550 break;
6551 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
6552 case OMP_CLAUSE_SCHEDULE_GUIDED:
6553 if (region->inner->sched_modifiers
6554 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
6556 start_ix2 = 3 + region->inner->sched_kind;
6557 break;
6559 /* FALLTHRU */
6560 default:
6561 start_ix2 = region->inner->sched_kind;
6562 break;
6564 start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
6565 start_ix = (enum built_in_function) start_ix2;
6566 break;
6567 case GIMPLE_OMP_SECTIONS:
6568 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
6569 break;
6570 default:
6571 gcc_unreachable ();
6575 /* By default, the value of NUM_THREADS is zero (selected at run time)
6576 and there is no conditional. */
6577 cond = NULL_TREE;
6578 val = build_int_cst (unsigned_type_node, 0);
6579 flags = build_int_cst (unsigned_type_node, 0);
6581 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
6582 if (c)
6583 cond = OMP_CLAUSE_IF_EXPR (c);
6585 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
6586 if (c)
6588 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
6589 clause_loc = OMP_CLAUSE_LOCATION (c);
6591 else
6592 clause_loc = gimple_location (entry_stmt);
6594 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
6595 if (c)
6596 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
6598 /* Ensure 'val' is of the correct type. */
6599 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
6601 /* If we found the clause 'if (cond)', build either
6602 (cond != 0) or (cond ? val : 1u). */
6603 if (cond)
6605 cond = gimple_boolify (cond);
6607 if (integer_zerop (val))
6608 val = fold_build2_loc (clause_loc,
6609 EQ_EXPR, unsigned_type_node, cond,
6610 build_int_cst (TREE_TYPE (cond), 0));
6611 else
6613 basic_block cond_bb, then_bb, else_bb;
6614 edge e, e_then, e_else;
6615 tree tmp_then, tmp_else, tmp_join, tmp_var;
6617 tmp_var = create_tmp_var (TREE_TYPE (val));
6618 if (gimple_in_ssa_p (cfun))
6620 tmp_then = make_ssa_name (tmp_var);
6621 tmp_else = make_ssa_name (tmp_var);
6622 tmp_join = make_ssa_name (tmp_var);
6624 else
6626 tmp_then = tmp_var;
6627 tmp_else = tmp_var;
6628 tmp_join = tmp_var;
6631 e = split_block_after_labels (bb);
6632 cond_bb = e->src;
6633 bb = e->dest;
6634 remove_edge (e);
6636 then_bb = create_empty_bb (cond_bb);
6637 else_bb = create_empty_bb (then_bb);
6638 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
6639 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
6641 stmt = gimple_build_cond_empty (cond);
6642 gsi = gsi_start_bb (cond_bb);
6643 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6645 gsi = gsi_start_bb (then_bb);
6646 expand_omp_build_assign (&gsi, tmp_then, val, true);
6648 gsi = gsi_start_bb (else_bb);
6649 expand_omp_build_assign (&gsi, tmp_else,
6650 build_int_cst (unsigned_type_node, 1),
6651 true);
6653 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
6654 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
6655 add_bb_to_loop (then_bb, cond_bb->loop_father);
6656 add_bb_to_loop (else_bb, cond_bb->loop_father);
6657 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
6658 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
6660 if (gimple_in_ssa_p (cfun))
6662 gphi *phi = create_phi_node (tmp_join, bb);
6663 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
6664 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
6667 val = tmp_join;
6670 gsi = gsi_start_bb (bb);
6671 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
6672 false, GSI_CONTINUE_LINKING);
6675 gsi = gsi_last_bb (bb);
6676 t = gimple_omp_parallel_data_arg (entry_stmt);
6677 if (t == NULL)
6678 t1 = null_pointer_node;
6679 else
6680 t1 = build_fold_addr_expr (t);
6681 tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt);
6682 t2 = build_fold_addr_expr (child_fndecl);
6684 vec_alloc (args, 4 + vec_safe_length (ws_args));
6685 args->quick_push (t2);
6686 args->quick_push (t1);
6687 args->quick_push (val);
6688 if (ws_args)
6689 args->splice (*ws_args);
6690 args->quick_push (flags);
6692 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
6693 builtin_decl_explicit (start_ix), args);
6695 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6696 false, GSI_CONTINUE_LINKING);
6698 if (hsa_gen_requested_p ()
6699 && parallel_needs_hsa_kernel_p (region))
6701 cgraph_node *child_cnode = cgraph_node::get (child_fndecl);
6702 hsa_register_kernel (child_cnode);
6706 /* Insert a function call whose name is FUNC_NAME with the information from
6707 ENTRY_STMT into the basic_block BB. */
6709 static void
6710 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
6711 vec <tree, va_gc> *ws_args)
6713 tree t, t1, t2;
6714 gimple_stmt_iterator gsi;
6715 vec <tree, va_gc> *args;
6717 gcc_assert (vec_safe_length (ws_args) == 2);
6718 tree func_name = (*ws_args)[0];
6719 tree grain = (*ws_args)[1];
6721 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
6722 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
6723 gcc_assert (count != NULL_TREE);
6724 count = OMP_CLAUSE_OPERAND (count, 0);
6726 gsi = gsi_last_bb (bb);
6727 t = gimple_omp_parallel_data_arg (entry_stmt);
6728 if (t == NULL)
6729 t1 = null_pointer_node;
6730 else
6731 t1 = build_fold_addr_expr (t);
6732 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
6734 vec_alloc (args, 4);
6735 args->quick_push (t2);
6736 args->quick_push (t1);
6737 args->quick_push (count);
6738 args->quick_push (grain);
6739 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
6741 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
6742 GSI_CONTINUE_LINKING);
6745 /* Build the function call to GOMP_task to actually
6746 generate the task operation. BB is the block where to insert the code. */
6748 static void
6749 expand_task_call (struct omp_region *region, basic_block bb,
6750 gomp_task *entry_stmt)
6752 tree t1, t2, t3;
6753 gimple_stmt_iterator gsi;
6754 location_t loc = gimple_location (entry_stmt);
6756 tree clauses = gimple_omp_task_clauses (entry_stmt);
6758 tree ifc = find_omp_clause (clauses, OMP_CLAUSE_IF);
6759 tree untied = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
6760 tree mergeable = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
6761 tree depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
6762 tree finalc = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
6763 tree priority = find_omp_clause (clauses, OMP_CLAUSE_PRIORITY);
6765 unsigned int iflags
6766 = (untied ? GOMP_TASK_FLAG_UNTIED : 0)
6767 | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
6768 | (depend ? GOMP_TASK_FLAG_DEPEND : 0);
6770 bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
6771 tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
6772 tree num_tasks = NULL_TREE;
6773 bool ull = false;
6774 if (taskloop_p)
6776 gimple *g = last_stmt (region->outer->entry);
6777 gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
6778 && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
6779 struct omp_for_data fd;
6780 extract_omp_for_data (as_a <gomp_for *> (g), &fd, NULL);
6781 startvar = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6782 endvar = find_omp_clause (OMP_CLAUSE_CHAIN (startvar),
6783 OMP_CLAUSE__LOOPTEMP_);
6784 startvar = OMP_CLAUSE_DECL (startvar);
6785 endvar = OMP_CLAUSE_DECL (endvar);
6786 step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
6787 if (fd.loop.cond_code == LT_EXPR)
6788 iflags |= GOMP_TASK_FLAG_UP;
6789 tree tclauses = gimple_omp_for_clauses (g);
6790 num_tasks = find_omp_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
6791 if (num_tasks)
6792 num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
6793 else
6795 num_tasks = find_omp_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
6796 if (num_tasks)
6798 iflags |= GOMP_TASK_FLAG_GRAINSIZE;
6799 num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
6801 else
6802 num_tasks = integer_zero_node;
6804 num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
6805 if (ifc == NULL_TREE)
6806 iflags |= GOMP_TASK_FLAG_IF;
6807 if (find_omp_clause (tclauses, OMP_CLAUSE_NOGROUP))
6808 iflags |= GOMP_TASK_FLAG_NOGROUP;
6809 ull = fd.iter_type == long_long_unsigned_type_node;
6811 else if (priority)
6812 iflags |= GOMP_TASK_FLAG_PRIORITY;
6814 tree flags = build_int_cst (unsigned_type_node, iflags);
6816 tree cond = boolean_true_node;
6817 if (ifc)
6819 if (taskloop_p)
6821 tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
6822 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
6823 build_int_cst (unsigned_type_node,
6824 GOMP_TASK_FLAG_IF),
6825 build_int_cst (unsigned_type_node, 0));
6826 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
6827 flags, t);
6829 else
6830 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
6833 if (finalc)
6835 tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
6836 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
6837 build_int_cst (unsigned_type_node,
6838 GOMP_TASK_FLAG_FINAL),
6839 build_int_cst (unsigned_type_node, 0));
6840 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
6842 if (depend)
6843 depend = OMP_CLAUSE_DECL (depend);
6844 else
6845 depend = build_int_cst (ptr_type_node, 0);
6846 if (priority)
6847 priority = fold_convert (integer_type_node,
6848 OMP_CLAUSE_PRIORITY_EXPR (priority));
6849 else
6850 priority = integer_zero_node;
6852 gsi = gsi_last_bb (bb);
6853 tree t = gimple_omp_task_data_arg (entry_stmt);
6854 if (t == NULL)
6855 t2 = null_pointer_node;
6856 else
6857 t2 = build_fold_addr_expr_loc (loc, t);
6858 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
6859 t = gimple_omp_task_copy_fn (entry_stmt);
6860 if (t == NULL)
6861 t3 = null_pointer_node;
6862 else
6863 t3 = build_fold_addr_expr_loc (loc, t);
6865 if (taskloop_p)
6866 t = build_call_expr (ull
6867 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
6868 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
6869 11, t1, t2, t3,
6870 gimple_omp_task_arg_size (entry_stmt),
6871 gimple_omp_task_arg_align (entry_stmt), flags,
6872 num_tasks, priority, startvar, endvar, step);
6873 else
6874 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
6875 9, t1, t2, t3,
6876 gimple_omp_task_arg_size (entry_stmt),
6877 gimple_omp_task_arg_align (entry_stmt), cond, flags,
6878 depend, priority);
6880 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6881 false, GSI_CONTINUE_LINKING);
6885 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
6886 catch handler and return it. This prevents programs from violating the
6887 structured block semantics with throws. */
6889 static gimple_seq
6890 maybe_catch_exception (gimple_seq body)
6892 gimple *g;
6893 tree decl;
6895 if (!flag_exceptions)
6896 return body;
6898 if (lang_hooks.eh_protect_cleanup_actions != NULL)
6899 decl = lang_hooks.eh_protect_cleanup_actions ();
6900 else
6901 decl = builtin_decl_explicit (BUILT_IN_TRAP);
6903 g = gimple_build_eh_must_not_throw (decl);
6904 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
6905 GIMPLE_TRY_CATCH);
6907 return gimple_seq_alloc_with_stmt (g);
6910 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
6912 static tree
6913 vec2chain (vec<tree, va_gc> *v)
6915 tree chain = NULL_TREE, t;
6916 unsigned ix;
6918 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
6920 DECL_CHAIN (t) = chain;
6921 chain = t;
6924 return chain;
6928 /* Remove barriers in REGION->EXIT's block. Note that this is only
6929 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
6930 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
6931 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
6932 removed. */
6934 static void
6935 remove_exit_barrier (struct omp_region *region)
6937 gimple_stmt_iterator gsi;
6938 basic_block exit_bb;
6939 edge_iterator ei;
6940 edge e;
6941 gimple *stmt;
6942 int any_addressable_vars = -1;
6944 exit_bb = region->exit;
6946 /* If the parallel region doesn't return, we don't have REGION->EXIT
6947 block at all. */
6948 if (! exit_bb)
6949 return;
6951 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
6952 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
6953 statements that can appear in between are extremely limited -- no
6954 memory operations at all. Here, we allow nothing at all, so the
6955 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
6956 gsi = gsi_last_bb (exit_bb);
6957 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
6958 gsi_prev (&gsi);
6959 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
6960 return;
6962 FOR_EACH_EDGE (e, ei, exit_bb->preds)
6964 gsi = gsi_last_bb (e->src);
6965 if (gsi_end_p (gsi))
6966 continue;
6967 stmt = gsi_stmt (gsi);
6968 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
6969 && !gimple_omp_return_nowait_p (stmt))
6971 /* OpenMP 3.0 tasks unfortunately prevent this optimization
6972 in many cases. If there could be tasks queued, the barrier
6973 might be needed to let the tasks run before some local
6974 variable of the parallel that the task uses as shared
6975 runs out of scope. The task can be spawned either
6976 from within current function (this would be easy to check)
6977 or from some function it calls and gets passed an address
6978 of such a variable. */
6979 if (any_addressable_vars < 0)
6981 gomp_parallel *parallel_stmt
6982 = as_a <gomp_parallel *> (last_stmt (region->entry));
6983 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
6984 tree local_decls, block, decl;
6985 unsigned ix;
6987 any_addressable_vars = 0;
6988 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
6989 if (TREE_ADDRESSABLE (decl))
6991 any_addressable_vars = 1;
6992 break;
6994 for (block = gimple_block (stmt);
6995 !any_addressable_vars
6996 && block
6997 && TREE_CODE (block) == BLOCK;
6998 block = BLOCK_SUPERCONTEXT (block))
7000 for (local_decls = BLOCK_VARS (block);
7001 local_decls;
7002 local_decls = DECL_CHAIN (local_decls))
7003 if (TREE_ADDRESSABLE (local_decls))
7005 any_addressable_vars = 1;
7006 break;
7008 if (block == gimple_block (parallel_stmt))
7009 break;
7012 if (!any_addressable_vars)
7013 gimple_omp_return_set_nowait (stmt);
7018 static void
7019 remove_exit_barriers (struct omp_region *region)
7021 if (region->type == GIMPLE_OMP_PARALLEL)
7022 remove_exit_barrier (region);
7024 if (region->inner)
7026 region = region->inner;
7027 remove_exit_barriers (region);
7028 while (region->next)
7030 region = region->next;
7031 remove_exit_barriers (region);
7036 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
7037 calls. These can't be declared as const functions, but
7038 within one parallel body they are constant, so they can be
7039 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
7040 which are declared const. Similarly for task body, except
7041 that in untied task omp_get_thread_num () can change at any task
7042 scheduling point. */
7044 static void
7045 optimize_omp_library_calls (gimple *entry_stmt)
7047 basic_block bb;
7048 gimple_stmt_iterator gsi;
7049 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7050 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
7051 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
7052 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
7053 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
7054 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
7055 OMP_CLAUSE_UNTIED) != NULL);
7057 FOR_EACH_BB_FN (bb, cfun)
7058 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
7060 gimple *call = gsi_stmt (gsi);
7061 tree decl;
7063 if (is_gimple_call (call)
7064 && (decl = gimple_call_fndecl (call))
7065 && DECL_EXTERNAL (decl)
7066 && TREE_PUBLIC (decl)
7067 && DECL_INITIAL (decl) == NULL)
7069 tree built_in;
7071 if (DECL_NAME (decl) == thr_num_id)
7073 /* In #pragma omp task untied omp_get_thread_num () can change
7074 during the execution of the task region. */
7075 if (untied_task)
7076 continue;
7077 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7079 else if (DECL_NAME (decl) == num_thr_id)
7080 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
7081 else
7082 continue;
7084 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
7085 || gimple_call_num_args (call) != 0)
7086 continue;
7088 if (flag_exceptions && !TREE_NOTHROW (decl))
7089 continue;
7091 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
7092 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
7093 TREE_TYPE (TREE_TYPE (built_in))))
7094 continue;
7096 gimple_call_set_fndecl (call, built_in);
7101 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
7102 regimplified. */
7104 static tree
7105 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
7107 tree t = *tp;
7109 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7110 if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
7111 return t;
7113 if (TREE_CODE (t) == ADDR_EXPR)
7114 recompute_tree_invariant_for_addr_expr (t);
7116 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7117 return NULL_TREE;
7120 /* Prepend or append TO = FROM assignment before or after *GSI_P. */
7122 static void
7123 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
7124 bool after)
7126 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
7127 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
7128 !after, after ? GSI_CONTINUE_LINKING
7129 : GSI_SAME_STMT);
7130 gimple *stmt = gimple_build_assign (to, from);
7131 if (after)
7132 gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
7133 else
7134 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
7135 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
7136 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
7138 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
7139 gimple_regimplify_operands (stmt, &gsi);
7143 /* Expand the OpenMP parallel or task directive starting at REGION. */
7145 static void
7146 expand_omp_taskreg (struct omp_region *region)
7148 basic_block entry_bb, exit_bb, new_bb;
7149 struct function *child_cfun;
7150 tree child_fn, block, t;
7151 gimple_stmt_iterator gsi;
7152 gimple *entry_stmt, *stmt;
7153 edge e;
7154 vec<tree, va_gc> *ws_args;
7156 entry_stmt = last_stmt (region->entry);
7157 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
7158 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7160 entry_bb = region->entry;
7161 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
7162 exit_bb = region->cont;
7163 else
7164 exit_bb = region->exit;
7166 bool is_cilk_for
7167 = (flag_cilkplus
7168 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
7169 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
7170 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
7172 if (is_cilk_for)
7173 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
7174 and the inner statement contains the name of the built-in function
7175 and grain. */
7176 ws_args = region->inner->ws_args;
7177 else if (is_combined_parallel (region))
7178 ws_args = region->ws_args;
7179 else
7180 ws_args = NULL;
7182 if (child_cfun->cfg)
7184 /* Due to inlining, it may happen that we have already outlined
7185 the region, in which case all we need to do is make the
7186 sub-graph unreachable and emit the parallel call. */
7187 edge entry_succ_e, exit_succ_e;
7189 entry_succ_e = single_succ_edge (entry_bb);
7191 gsi = gsi_last_bb (entry_bb);
7192 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
7193 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
7194 gsi_remove (&gsi, true);
7196 new_bb = entry_bb;
7197 if (exit_bb)
7199 exit_succ_e = single_succ_edge (exit_bb);
7200 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
7202 remove_edge_and_dominated_blocks (entry_succ_e);
7204 else
7206 unsigned srcidx, dstidx, num;
7208 /* If the parallel region needs data sent from the parent
7209 function, then the very first statement (except possible
7210 tree profile counter updates) of the parallel body
7211 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7212 &.OMP_DATA_O is passed as an argument to the child function,
7213 we need to replace it with the argument as seen by the child
7214 function.
7216 In most cases, this will end up being the identity assignment
7217 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7218 a function call that has been inlined, the original PARM_DECL
7219 .OMP_DATA_I may have been converted into a different local
7220 variable. In which case, we need to keep the assignment. */
7221 if (gimple_omp_taskreg_data_arg (entry_stmt))
7223 basic_block entry_succ_bb
7224 = single_succ_p (entry_bb) ? single_succ (entry_bb)
7225 : FALLTHRU_EDGE (entry_bb)->dest;
7226 tree arg;
7227 gimple *parcopy_stmt = NULL;
7229 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7231 gimple *stmt;
7233 gcc_assert (!gsi_end_p (gsi));
7234 stmt = gsi_stmt (gsi);
7235 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7236 continue;
7238 if (gimple_num_ops (stmt) == 2)
7240 tree arg = gimple_assign_rhs1 (stmt);
7242 /* We're ignore the subcode because we're
7243 effectively doing a STRIP_NOPS. */
7245 if (TREE_CODE (arg) == ADDR_EXPR
7246 && TREE_OPERAND (arg, 0)
7247 == gimple_omp_taskreg_data_arg (entry_stmt))
7249 parcopy_stmt = stmt;
7250 break;
7255 gcc_assert (parcopy_stmt != NULL);
7256 arg = DECL_ARGUMENTS (child_fn);
7258 if (!gimple_in_ssa_p (cfun))
7260 if (gimple_assign_lhs (parcopy_stmt) == arg)
7261 gsi_remove (&gsi, true);
7262 else
7264 /* ?? Is setting the subcode really necessary ?? */
7265 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
7266 gimple_assign_set_rhs1 (parcopy_stmt, arg);
7269 else
7271 tree lhs = gimple_assign_lhs (parcopy_stmt);
7272 gcc_assert (SSA_NAME_VAR (lhs) == arg);
7273 /* We'd like to set the rhs to the default def in the child_fn,
7274 but it's too early to create ssa names in the child_fn.
7275 Instead, we set the rhs to the parm. In
7276 move_sese_region_to_fn, we introduce a default def for the
7277 parm, map the parm to it's default def, and once we encounter
7278 this stmt, replace the parm with the default def. */
7279 gimple_assign_set_rhs1 (parcopy_stmt, arg);
7280 update_stmt (parcopy_stmt);
7284 /* Declare local variables needed in CHILD_CFUN. */
7285 block = DECL_INITIAL (child_fn);
7286 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7287 /* The gimplifier could record temporaries in parallel/task block
7288 rather than in containing function's local_decls chain,
7289 which would mean cgraph missed finalizing them. Do it now. */
7290 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7291 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
7292 varpool_node::finalize_decl (t);
7293 DECL_SAVED_TREE (child_fn) = NULL;
7294 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7295 gimple_set_body (child_fn, NULL);
7296 TREE_USED (block) = 1;
7298 /* Reset DECL_CONTEXT on function arguments. */
7299 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7300 DECL_CONTEXT (t) = child_fn;
7302 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
7303 so that it can be moved to the child function. */
7304 gsi = gsi_last_bb (entry_bb);
7305 stmt = gsi_stmt (gsi);
7306 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7307 || gimple_code (stmt) == GIMPLE_OMP_TASK));
7308 e = split_block (entry_bb, stmt);
7309 gsi_remove (&gsi, true);
7310 entry_bb = e->dest;
7311 edge e2 = NULL;
7312 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
7313 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7314 else
7316 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
7317 gcc_assert (e2->dest == region->exit);
7318 remove_edge (BRANCH_EDGE (entry_bb));
7319 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
7320 gsi = gsi_last_bb (region->exit);
7321 gcc_assert (!gsi_end_p (gsi)
7322 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7323 gsi_remove (&gsi, true);
7326 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
7327 if (exit_bb)
7329 gsi = gsi_last_bb (exit_bb);
7330 gcc_assert (!gsi_end_p (gsi)
7331 && (gimple_code (gsi_stmt (gsi))
7332 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
7333 stmt = gimple_build_return (NULL);
7334 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7335 gsi_remove (&gsi, true);
7338 /* Move the parallel region into CHILD_CFUN. */
7340 if (gimple_in_ssa_p (cfun))
7342 init_tree_ssa (child_cfun);
7343 init_ssa_operands (child_cfun);
7344 child_cfun->gimple_df->in_ssa_p = true;
7345 block = NULL_TREE;
7347 else
7348 block = gimple_block (entry_stmt);
7350 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7351 if (exit_bb)
7352 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7353 if (e2)
7355 basic_block dest_bb = e2->dest;
7356 if (!exit_bb)
7357 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
7358 remove_edge (e2);
7359 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
7361 /* When the OMP expansion process cannot guarantee an up-to-date
7362 loop tree arrange for the child function to fixup loops. */
7363 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7364 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7366 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7367 num = vec_safe_length (child_cfun->local_decls);
7368 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7370 t = (*child_cfun->local_decls)[srcidx];
7371 if (DECL_CONTEXT (t) == cfun->decl)
7372 continue;
7373 if (srcidx != dstidx)
7374 (*child_cfun->local_decls)[dstidx] = t;
7375 dstidx++;
7377 if (dstidx != num)
7378 vec_safe_truncate (child_cfun->local_decls, dstidx);
7380 /* Inform the callgraph about the new function. */
7381 child_cfun->curr_properties = cfun->curr_properties;
7382 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
7383 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
7384 cgraph_node *node = cgraph_node::get_create (child_fn);
7385 node->parallelized_function = 1;
7386 cgraph_node::add_new_function (child_fn, true);
7388 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
7389 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
7391 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7392 fixed in a following pass. */
7393 push_cfun (child_cfun);
7394 if (need_asm)
7395 assign_assembler_name_if_neeeded (child_fn);
7397 if (optimize)
7398 optimize_omp_library_calls (entry_stmt);
7399 cgraph_edge::rebuild_edges ();
7401 /* Some EH regions might become dead, see PR34608. If
7402 pass_cleanup_cfg isn't the first pass to happen with the
7403 new child, these dead EH edges might cause problems.
7404 Clean them up now. */
7405 if (flag_exceptions)
7407 basic_block bb;
7408 bool changed = false;
7410 FOR_EACH_BB_FN (bb, cfun)
7411 changed |= gimple_purge_dead_eh_edges (bb);
7412 if (changed)
7413 cleanup_tree_cfg ();
7415 if (gimple_in_ssa_p (cfun))
7416 update_ssa (TODO_update_ssa);
7417 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7418 verify_loop_structure ();
7419 pop_cfun ();
7421 if (dump_file && !gimple_in_ssa_p (cfun))
7423 omp_any_child_fn_dumped = true;
7424 dump_function_header (dump_file, child_fn, dump_flags);
7425 dump_function_to_file (child_fn, dump_file, dump_flags);
7429 /* Emit a library call to launch the children threads. */
7430 if (is_cilk_for)
7431 expand_cilk_for_call (new_bb,
7432 as_a <gomp_parallel *> (entry_stmt), ws_args);
7433 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
7434 expand_parallel_call (region, new_bb,
7435 as_a <gomp_parallel *> (entry_stmt), ws_args);
7436 else
7437 expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
7438 if (gimple_in_ssa_p (cfun))
7439 update_ssa (TODO_update_ssa_only_virtuals);
7442 /* Information about members of an OpenACC collapsed loop nest. */
7444 struct oacc_collapse
7446 tree base; /* Base value. */
7447 tree iters; /* Number of steps. */
7448 tree step; /* step size. */
7451 /* Helper for expand_oacc_for. Determine collapsed loop information.
7452 Fill in COUNTS array. Emit any initialization code before GSI.
7453 Return the calculated outer loop bound of BOUND_TYPE. */
7455 static tree
7456 expand_oacc_collapse_init (const struct omp_for_data *fd,
7457 gimple_stmt_iterator *gsi,
7458 oacc_collapse *counts, tree bound_type)
7460 tree total = build_int_cst (bound_type, 1);
7461 int ix;
7463 gcc_assert (integer_onep (fd->loop.step));
7464 gcc_assert (integer_zerop (fd->loop.n1));
7466 for (ix = 0; ix != fd->collapse; ix++)
7468 const omp_for_data_loop *loop = &fd->loops[ix];
7470 tree iter_type = TREE_TYPE (loop->v);
7471 tree diff_type = iter_type;
7472 tree plus_type = iter_type;
7474 gcc_assert (loop->cond_code == fd->loop.cond_code);
7476 if (POINTER_TYPE_P (iter_type))
7477 plus_type = sizetype;
7478 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
7479 diff_type = signed_type_for (diff_type);
7481 tree b = loop->n1;
7482 tree e = loop->n2;
7483 tree s = loop->step;
7484 bool up = loop->cond_code == LT_EXPR;
7485 tree dir = build_int_cst (diff_type, up ? +1 : -1);
7486 bool negating;
7487 tree expr;
7489 b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
7490 true, GSI_SAME_STMT);
7491 e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
7492 true, GSI_SAME_STMT);
7494 /* Convert the step, avoiding possible unsigned->signed overflow. */
7495 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
7496 if (negating)
7497 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
7498 s = fold_convert (diff_type, s);
7499 if (negating)
7500 s = fold_build1 (NEGATE_EXPR, diff_type, s);
7501 s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
7502 true, GSI_SAME_STMT);
7504 /* Determine the range, avoiding possible unsigned->signed overflow. */
7505 negating = !up && TYPE_UNSIGNED (iter_type);
7506 expr = fold_build2 (MINUS_EXPR, plus_type,
7507 fold_convert (plus_type, negating ? b : e),
7508 fold_convert (plus_type, negating ? e : b));
7509 expr = fold_convert (diff_type, expr);
7510 if (negating)
7511 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
7512 tree range = force_gimple_operand_gsi
7513 (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
7515 /* Determine number of iterations. */
7516 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
7517 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
7518 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
7520 tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
7521 true, GSI_SAME_STMT);
7523 counts[ix].base = b;
7524 counts[ix].iters = iters;
7525 counts[ix].step = s;
7527 total = fold_build2 (MULT_EXPR, bound_type, total,
7528 fold_convert (bound_type, iters));
7531 return total;
7534 /* Emit initializers for collapsed loop members. IVAR is the outer
7535 loop iteration variable, from which collapsed loop iteration values
7536 are calculated. COUNTS array has been initialized by
7537 expand_oacc_collapse_inits. */
7539 static void
7540 expand_oacc_collapse_vars (const struct omp_for_data *fd,
7541 gimple_stmt_iterator *gsi,
7542 const oacc_collapse *counts, tree ivar)
7544 tree ivar_type = TREE_TYPE (ivar);
7546 /* The most rapidly changing iteration variable is the innermost
7547 one. */
7548 for (int ix = fd->collapse; ix--;)
7550 const omp_for_data_loop *loop = &fd->loops[ix];
7551 const oacc_collapse *collapse = &counts[ix];
7552 tree iter_type = TREE_TYPE (loop->v);
7553 tree diff_type = TREE_TYPE (collapse->step);
7554 tree plus_type = iter_type;
7555 enum tree_code plus_code = PLUS_EXPR;
7556 tree expr;
7558 if (POINTER_TYPE_P (iter_type))
7560 plus_code = POINTER_PLUS_EXPR;
7561 plus_type = sizetype;
7564 expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, ivar,
7565 fold_convert (ivar_type, collapse->iters));
7566 expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
7567 collapse->step);
7568 expr = fold_build2 (plus_code, iter_type, collapse->base,
7569 fold_convert (plus_type, expr));
7570 expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
7571 true, GSI_SAME_STMT);
7572 gassign *ass = gimple_build_assign (loop->v, expr);
7573 gsi_insert_before (gsi, ass, GSI_SAME_STMT);
7575 if (ix)
7577 expr = fold_build2 (TRUNC_DIV_EXPR, ivar_type, ivar,
7578 fold_convert (ivar_type, collapse->iters));
7579 ivar = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
7580 true, GSI_SAME_STMT);
7586 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
7587 of the combined collapse > 1 loop constructs, generate code like:
7588 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
7589 if (cond3 is <)
7590 adj = STEP3 - 1;
7591 else
7592 adj = STEP3 + 1;
7593 count3 = (adj + N32 - N31) / STEP3;
7594 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
7595 if (cond2 is <)
7596 adj = STEP2 - 1;
7597 else
7598 adj = STEP2 + 1;
7599 count2 = (adj + N22 - N21) / STEP2;
7600 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
7601 if (cond1 is <)
7602 adj = STEP1 - 1;
7603 else
7604 adj = STEP1 + 1;
7605 count1 = (adj + N12 - N11) / STEP1;
7606 count = count1 * count2 * count3;
7607 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
7608 count = 0;
7609 and set ZERO_ITER_BB to that bb. If this isn't the outermost
7610 of the combined loop constructs, just initialize COUNTS array
7611 from the _looptemp_ clauses. */
7613 /* NOTE: It *could* be better to moosh all of the BBs together,
7614 creating one larger BB with all the computation and the unexpected
7615 jump at the end. I.e.
7617 bool zero3, zero2, zero1, zero;
7619 zero3 = N32 c3 N31;
7620 count3 = (N32 - N31) /[cl] STEP3;
7621 zero2 = N22 c2 N21;
7622 count2 = (N22 - N21) /[cl] STEP2;
7623 zero1 = N12 c1 N11;
7624 count1 = (N12 - N11) /[cl] STEP1;
7625 zero = zero3 || zero2 || zero1;
7626 count = count1 * count2 * count3;
7627 if (__builtin_expect(zero, false)) goto zero_iter_bb;
7629 After all, we expect the zero=false, and thus we expect to have to
7630 evaluate all of the comparison expressions, so short-circuiting
7631 oughtn't be a win. Since the condition isn't protecting a
7632 denominator, we're not concerned about divide-by-zero, so we can
7633 fully evaluate count even if a numerator turned out to be wrong.
7635 It seems like putting this all together would create much better
7636 scheduling opportunities, and less pressure on the chip's branch
7637 predictor. */
7639 static void
7640 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
7641 basic_block &entry_bb, tree *counts,
7642 basic_block &zero_iter1_bb, int &first_zero_iter1,
7643 basic_block &zero_iter2_bb, int &first_zero_iter2,
7644 basic_block &l2_dom_bb)
7646 tree t, type = TREE_TYPE (fd->loop.v);
7647 edge e, ne;
7648 int i;
7650 /* Collapsed loops need work for expansion into SSA form. */
7651 gcc_assert (!gimple_in_ssa_p (cfun));
7653 if (gimple_omp_for_combined_into_p (fd->for_stmt)
7654 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
7656 gcc_assert (fd->ordered == 0);
7657 /* First two _looptemp_ clauses are for istart/iend, counts[0]
7658 isn't supposed to be handled, as the inner loop doesn't
7659 use it. */
7660 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7661 OMP_CLAUSE__LOOPTEMP_);
7662 gcc_assert (innerc);
7663 for (i = 0; i < fd->collapse; i++)
7665 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7666 OMP_CLAUSE__LOOPTEMP_);
7667 gcc_assert (innerc);
7668 if (i)
7669 counts[i] = OMP_CLAUSE_DECL (innerc);
7670 else
7671 counts[0] = NULL_TREE;
7673 return;
7676 for (i = fd->collapse; i < fd->ordered; i++)
7678 tree itype = TREE_TYPE (fd->loops[i].v);
7679 counts[i] = NULL_TREE;
7680 t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
7681 fold_convert (itype, fd->loops[i].n1),
7682 fold_convert (itype, fd->loops[i].n2));
7683 if (t && integer_zerop (t))
7685 for (i = fd->collapse; i < fd->ordered; i++)
7686 counts[i] = build_int_cst (type, 0);
7687 break;
7690 for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
7692 tree itype = TREE_TYPE (fd->loops[i].v);
7694 if (i >= fd->collapse && counts[i])
7695 continue;
7696 if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
7697 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
7698 fold_convert (itype, fd->loops[i].n1),
7699 fold_convert (itype, fd->loops[i].n2)))
7700 == NULL_TREE || !integer_onep (t)))
7702 gcond *cond_stmt;
7703 tree n1, n2;
7704 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
7705 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
7706 true, GSI_SAME_STMT);
7707 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
7708 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
7709 true, GSI_SAME_STMT);
7710 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
7711 NULL_TREE, NULL_TREE);
7712 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
7713 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
7714 expand_omp_regimplify_p, NULL, NULL)
7715 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
7716 expand_omp_regimplify_p, NULL, NULL))
7718 *gsi = gsi_for_stmt (cond_stmt);
7719 gimple_regimplify_operands (cond_stmt, gsi);
7721 e = split_block (entry_bb, cond_stmt);
7722 basic_block &zero_iter_bb
7723 = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
7724 int &first_zero_iter
7725 = i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
7726 if (zero_iter_bb == NULL)
7728 gassign *assign_stmt;
7729 first_zero_iter = i;
7730 zero_iter_bb = create_empty_bb (entry_bb);
7731 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
7732 *gsi = gsi_after_labels (zero_iter_bb);
7733 if (i < fd->collapse)
7734 assign_stmt = gimple_build_assign (fd->loop.n2,
7735 build_zero_cst (type));
7736 else
7738 counts[i] = create_tmp_reg (type, ".count");
7739 assign_stmt
7740 = gimple_build_assign (counts[i], build_zero_cst (type));
7742 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
7743 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
7744 entry_bb);
7746 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
7747 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
7748 e->flags = EDGE_TRUE_VALUE;
7749 e->probability = REG_BR_PROB_BASE - ne->probability;
7750 if (l2_dom_bb == NULL)
7751 l2_dom_bb = entry_bb;
7752 entry_bb = e->dest;
7753 *gsi = gsi_last_bb (entry_bb);
7756 if (POINTER_TYPE_P (itype))
7757 itype = signed_type_for (itype);
7758 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
7759 ? -1 : 1));
7760 t = fold_build2 (PLUS_EXPR, itype,
7761 fold_convert (itype, fd->loops[i].step), t);
7762 t = fold_build2 (PLUS_EXPR, itype, t,
7763 fold_convert (itype, fd->loops[i].n2));
7764 t = fold_build2 (MINUS_EXPR, itype, t,
7765 fold_convert (itype, fd->loops[i].n1));
7766 /* ?? We could probably use CEIL_DIV_EXPR instead of
7767 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
7768 generate the same code in the end because generically we
7769 don't know that the values involved must be negative for
7770 GT?? */
7771 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
7772 t = fold_build2 (TRUNC_DIV_EXPR, itype,
7773 fold_build1 (NEGATE_EXPR, itype, t),
7774 fold_build1 (NEGATE_EXPR, itype,
7775 fold_convert (itype,
7776 fd->loops[i].step)));
7777 else
7778 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
7779 fold_convert (itype, fd->loops[i].step));
7780 t = fold_convert (type, t);
7781 if (TREE_CODE (t) == INTEGER_CST)
7782 counts[i] = t;
7783 else
7785 if (i < fd->collapse || i != first_zero_iter2)
7786 counts[i] = create_tmp_reg (type, ".count");
7787 expand_omp_build_assign (gsi, counts[i], t);
7789 if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
7791 if (i == 0)
7792 t = counts[0];
7793 else
7794 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
7795 expand_omp_build_assign (gsi, fd->loop.n2, t);
7801 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
7802 T = V;
7803 V3 = N31 + (T % count3) * STEP3;
7804 T = T / count3;
7805 V2 = N21 + (T % count2) * STEP2;
7806 T = T / count2;
7807 V1 = N11 + T * STEP1;
7808 if this loop doesn't have an inner loop construct combined with it.
7809 If it does have an inner loop construct combined with it and the
7810 iteration count isn't known constant, store values from counts array
7811 into its _looptemp_ temporaries instead. */
7813 static void
7814 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
7815 tree *counts, gimple *inner_stmt, tree startvar)
7817 int i;
7818 if (gimple_omp_for_combined_p (fd->for_stmt))
7820 /* If fd->loop.n2 is constant, then no propagation of the counts
7821 is needed, they are constant. */
7822 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
7823 return;
7825 tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
7826 ? gimple_omp_taskreg_clauses (inner_stmt)
7827 : gimple_omp_for_clauses (inner_stmt);
7828 /* First two _looptemp_ clauses are for istart/iend, counts[0]
7829 isn't supposed to be handled, as the inner loop doesn't
7830 use it. */
7831 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
7832 gcc_assert (innerc);
7833 for (i = 0; i < fd->collapse; i++)
7835 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7836 OMP_CLAUSE__LOOPTEMP_);
7837 gcc_assert (innerc);
7838 if (i)
7840 tree tem = OMP_CLAUSE_DECL (innerc);
7841 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
7842 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
7843 false, GSI_CONTINUE_LINKING);
7844 gassign *stmt = gimple_build_assign (tem, t);
7845 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7848 return;
7851 tree type = TREE_TYPE (fd->loop.v);
7852 tree tem = create_tmp_reg (type, ".tem");
7853 gassign *stmt = gimple_build_assign (tem, startvar);
7854 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7856 for (i = fd->collapse - 1; i >= 0; i--)
7858 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
7859 itype = vtype;
7860 if (POINTER_TYPE_P (vtype))
7861 itype = signed_type_for (vtype);
7862 if (i != 0)
7863 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
7864 else
7865 t = tem;
7866 t = fold_convert (itype, t);
7867 t = fold_build2 (MULT_EXPR, itype, t,
7868 fold_convert (itype, fd->loops[i].step));
7869 if (POINTER_TYPE_P (vtype))
7870 t = fold_build_pointer_plus (fd->loops[i].n1, t);
7871 else
7872 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
7873 t = force_gimple_operand_gsi (gsi, t,
7874 DECL_P (fd->loops[i].v)
7875 && TREE_ADDRESSABLE (fd->loops[i].v),
7876 NULL_TREE, false,
7877 GSI_CONTINUE_LINKING);
7878 stmt = gimple_build_assign (fd->loops[i].v, t);
7879 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7880 if (i != 0)
7882 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
7883 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
7884 false, GSI_CONTINUE_LINKING);
7885 stmt = gimple_build_assign (tem, t);
7886 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7892 /* Helper function for expand_omp_for_*. Generate code like:
7893 L10:
7894 V3 += STEP3;
7895 if (V3 cond3 N32) goto BODY_BB; else goto L11;
7896 L11:
7897 V3 = N31;
7898 V2 += STEP2;
7899 if (V2 cond2 N22) goto BODY_BB; else goto L12;
7900 L12:
7901 V2 = N21;
7902 V1 += STEP1;
7903 goto BODY_BB; */
7905 static basic_block
7906 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
7907 basic_block body_bb)
7909 basic_block last_bb, bb, collapse_bb = NULL;
7910 int i;
7911 gimple_stmt_iterator gsi;
7912 edge e;
7913 tree t;
7914 gimple *stmt;
7916 last_bb = cont_bb;
7917 for (i = fd->collapse - 1; i >= 0; i--)
7919 tree vtype = TREE_TYPE (fd->loops[i].v);
7921 bb = create_empty_bb (last_bb);
7922 add_bb_to_loop (bb, last_bb->loop_father);
7923 gsi = gsi_start_bb (bb);
7925 if (i < fd->collapse - 1)
7927 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
7928 e->probability = REG_BR_PROB_BASE / 8;
7930 t = fd->loops[i + 1].n1;
7931 t = force_gimple_operand_gsi (&gsi, t,
7932 DECL_P (fd->loops[i + 1].v)
7933 && TREE_ADDRESSABLE (fd->loops[i
7934 + 1].v),
7935 NULL_TREE, false,
7936 GSI_CONTINUE_LINKING);
7937 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
7938 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7940 else
7941 collapse_bb = bb;
7943 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
7945 if (POINTER_TYPE_P (vtype))
7946 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
7947 else
7948 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
7949 t = force_gimple_operand_gsi (&gsi, t,
7950 DECL_P (fd->loops[i].v)
7951 && TREE_ADDRESSABLE (fd->loops[i].v),
7952 NULL_TREE, false, GSI_CONTINUE_LINKING);
7953 stmt = gimple_build_assign (fd->loops[i].v, t);
7954 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7956 if (i > 0)
7958 t = fd->loops[i].n2;
7959 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7960 false, GSI_CONTINUE_LINKING);
7961 tree v = fd->loops[i].v;
7962 if (DECL_P (v) && TREE_ADDRESSABLE (v))
7963 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
7964 false, GSI_CONTINUE_LINKING);
7965 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
7966 stmt = gimple_build_cond_empty (t);
7967 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7968 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
7969 e->probability = REG_BR_PROB_BASE * 7 / 8;
7971 else
7972 make_edge (bb, body_bb, EDGE_FALLTHRU);
7973 last_bb = bb;
7976 return collapse_bb;
7980 /* Expand #pragma omp ordered depend(source). */
7982 static void
7983 expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
7984 tree *counts, location_t loc)
7986 enum built_in_function source_ix
7987 = fd->iter_type == long_integer_type_node
7988 ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
7989 gimple *g
7990 = gimple_build_call (builtin_decl_explicit (source_ix), 1,
7991 build_fold_addr_expr (counts[fd->ordered]));
7992 gimple_set_location (g, loc);
7993 gsi_insert_before (gsi, g, GSI_SAME_STMT);
7996 /* Expand a single depend from #pragma omp ordered depend(sink:...). */
7998 static void
7999 expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
8000 tree *counts, tree c, location_t loc)
8002 auto_vec<tree, 10> args;
8003 enum built_in_function sink_ix
8004 = fd->iter_type == long_integer_type_node
8005 ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
8006 tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
8007 int i;
8008 gimple_stmt_iterator gsi2 = *gsi;
8009 bool warned_step = false;
8011 for (i = 0; i < fd->ordered; i++)
8013 tree step = NULL_TREE;
8014 off = TREE_PURPOSE (deps);
8015 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
8017 step = TREE_OPERAND (off, 1);
8018 off = TREE_OPERAND (off, 0);
8020 if (!integer_zerop (off))
8022 gcc_assert (fd->loops[i].cond_code == LT_EXPR
8023 || fd->loops[i].cond_code == GT_EXPR);
8024 bool forward = fd->loops[i].cond_code == LT_EXPR;
8025 if (step)
8027 /* Non-simple Fortran DO loops. If step is variable,
8028 we don't know at compile even the direction, so can't
8029 warn. */
8030 if (TREE_CODE (step) != INTEGER_CST)
8031 break;
8032 forward = tree_int_cst_sgn (step) != -1;
8034 if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8035 warning_at (loc, 0, "%<depend(sink)%> clause waiting for "
8036 "lexically later iteration");
8037 break;
8039 deps = TREE_CHAIN (deps);
8041 /* If all offsets corresponding to the collapsed loops are zero,
8042 this depend clause can be ignored. FIXME: but there is still a
8043 flush needed. We need to emit one __sync_synchronize () for it
8044 though (perhaps conditionally)? Solve this together with the
8045 conservative dependence folding optimization.
8046 if (i >= fd->collapse)
8047 return; */
8049 deps = OMP_CLAUSE_DECL (c);
8050 gsi_prev (&gsi2);
8051 edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
8052 edge e2 = split_block_after_labels (e1->dest);
8054 gsi2 = gsi_after_labels (e1->dest);
8055 *gsi = gsi_last_bb (e1->src);
8056 for (i = 0; i < fd->ordered; i++)
8058 tree itype = TREE_TYPE (fd->loops[i].v);
8059 tree step = NULL_TREE;
8060 tree orig_off = NULL_TREE;
8061 if (POINTER_TYPE_P (itype))
8062 itype = sizetype;
8063 if (i)
8064 deps = TREE_CHAIN (deps);
8065 off = TREE_PURPOSE (deps);
8066 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
8068 step = TREE_OPERAND (off, 1);
8069 off = TREE_OPERAND (off, 0);
8070 gcc_assert (fd->loops[i].cond_code == LT_EXPR
8071 && integer_onep (fd->loops[i].step)
8072 && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)));
8074 tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step);
8075 if (step)
8077 off = fold_convert_loc (loc, itype, off);
8078 orig_off = off;
8079 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
8082 if (integer_zerop (off))
8083 t = boolean_true_node;
8084 else
8086 tree a;
8087 tree co = fold_convert_loc (loc, itype, off);
8088 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
8090 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8091 co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
8092 a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
8093 TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
8094 co);
8096 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8097 a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
8098 fd->loops[i].v, co);
8099 else
8100 a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
8101 fd->loops[i].v, co);
8102 if (step)
8104 tree t1, t2;
8105 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8106 t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
8107 fd->loops[i].n1);
8108 else
8109 t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
8110 fd->loops[i].n2);
8111 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8112 t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
8113 fd->loops[i].n2);
8114 else
8115 t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
8116 fd->loops[i].n1);
8117 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
8118 step, build_int_cst (TREE_TYPE (step), 0));
8119 if (TREE_CODE (step) != INTEGER_CST)
8121 t1 = unshare_expr (t1);
8122 t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE,
8123 false, GSI_CONTINUE_LINKING);
8124 t2 = unshare_expr (t2);
8125 t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE,
8126 false, GSI_CONTINUE_LINKING);
8128 t = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
8129 t, t2, t1);
8131 else if (fd->loops[i].cond_code == LT_EXPR)
8133 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8134 t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
8135 fd->loops[i].n1);
8136 else
8137 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
8138 fd->loops[i].n2);
8140 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8141 t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
8142 fd->loops[i].n2);
8143 else
8144 t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
8145 fd->loops[i].n1);
8147 if (cond)
8148 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
8149 else
8150 cond = t;
8152 off = fold_convert_loc (loc, itype, off);
8154 if (step
8155 || (fd->loops[i].cond_code == LT_EXPR
8156 ? !integer_onep (fd->loops[i].step)
8157 : !integer_minus_onep (fd->loops[i].step)))
8159 if (step == NULL_TREE
8160 && TYPE_UNSIGNED (itype)
8161 && fd->loops[i].cond_code == GT_EXPR)
8162 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
8163 fold_build1_loc (loc, NEGATE_EXPR, itype,
8164 s));
8165 else
8166 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype,
8167 orig_off ? orig_off : off, s);
8168 t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
8169 build_int_cst (itype, 0));
8170 if (integer_zerop (t) && !warned_step)
8172 warning_at (loc, 0, "%<depend(sink)%> refers to iteration never "
8173 "in the iteration space");
8174 warned_step = true;
8176 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
8177 cond, t);
8180 if (i <= fd->collapse - 1 && fd->collapse > 1)
8181 t = fd->loop.v;
8182 else if (counts[i])
8183 t = counts[i];
8184 else
8186 t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
8187 fd->loops[i].v, fd->loops[i].n1);
8188 t = fold_convert_loc (loc, fd->iter_type, t);
8190 if (step)
8191 /* We have divided off by step already earlier. */;
8192 else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
8193 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
8194 fold_build1_loc (loc, NEGATE_EXPR, itype,
8195 s));
8196 else
8197 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
8198 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
8199 off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
8200 off = fold_convert_loc (loc, fd->iter_type, off);
8201 if (i <= fd->collapse - 1 && fd->collapse > 1)
8203 if (i)
8204 off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
8205 off);
8206 if (i < fd->collapse - 1)
8208 coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
8209 counts[i]);
8210 continue;
8213 off = unshare_expr (off);
8214 t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
8215 t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
8216 true, GSI_SAME_STMT);
8217 args.safe_push (t);
8219 gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
8220 gimple_set_location (g, loc);
8221 gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
8223 cond = unshare_expr (cond);
8224 cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
8225 GSI_CONTINUE_LINKING);
8226 gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
8227 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
8228 e3->probability = REG_BR_PROB_BASE / 8;
8229 e1->probability = REG_BR_PROB_BASE - e3->probability;
8230 e1->flags = EDGE_TRUE_VALUE;
8231 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
8233 *gsi = gsi_after_labels (e2->dest);
8236 /* Expand all #pragma omp ordered depend(source) and
8237 #pragma omp ordered depend(sink:...) constructs in the current
8238 #pragma omp for ordered(n) region. */
8240 static void
8241 expand_omp_ordered_source_sink (struct omp_region *region,
8242 struct omp_for_data *fd, tree *counts,
8243 basic_block cont_bb)
8245 struct omp_region *inner;
8246 int i;
8247 for (i = fd->collapse - 1; i < fd->ordered; i++)
8248 if (i == fd->collapse - 1 && fd->collapse > 1)
8249 counts[i] = NULL_TREE;
8250 else if (i >= fd->collapse && !cont_bb)
8251 counts[i] = build_zero_cst (fd->iter_type);
8252 else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
8253 && integer_onep (fd->loops[i].step))
8254 counts[i] = NULL_TREE;
8255 else
8256 counts[i] = create_tmp_var (fd->iter_type, ".orditer");
8257 tree atype
8258 = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
8259 counts[fd->ordered] = create_tmp_var (atype, ".orditera");
8260 TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
8262 for (inner = region->inner; inner; inner = inner->next)
8263 if (inner->type == GIMPLE_OMP_ORDERED)
8265 gomp_ordered *ord_stmt = inner->ord_stmt;
8266 gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
8267 location_t loc = gimple_location (ord_stmt);
8268 tree c;
8269 for (c = gimple_omp_ordered_clauses (ord_stmt);
8270 c; c = OMP_CLAUSE_CHAIN (c))
8271 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
8272 break;
8273 if (c)
8274 expand_omp_ordered_source (&gsi, fd, counts, loc);
8275 for (c = gimple_omp_ordered_clauses (ord_stmt);
8276 c; c = OMP_CLAUSE_CHAIN (c))
8277 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
8278 expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
8279 gsi_remove (&gsi, true);
8283 /* Wrap the body into fd->ordered - fd->collapse loops that aren't
8284 collapsed. */
8286 static basic_block
8287 expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
8288 basic_block cont_bb, basic_block body_bb,
8289 bool ordered_lastprivate)
8291 if (fd->ordered == fd->collapse)
8292 return cont_bb;
8294 if (!cont_bb)
8296 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8297 for (int i = fd->collapse; i < fd->ordered; i++)
8299 tree type = TREE_TYPE (fd->loops[i].v);
8300 tree n1 = fold_convert (type, fd->loops[i].n1);
8301 expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
8302 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8303 size_int (i - fd->collapse + 1),
8304 NULL_TREE, NULL_TREE);
8305 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
8307 return NULL;
8310 for (int i = fd->ordered - 1; i >= fd->collapse; i--)
8312 tree t, type = TREE_TYPE (fd->loops[i].v);
8313 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8314 expand_omp_build_assign (&gsi, fd->loops[i].v,
8315 fold_convert (type, fd->loops[i].n1));
8316 if (counts[i])
8317 expand_omp_build_assign (&gsi, counts[i],
8318 build_zero_cst (fd->iter_type));
8319 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8320 size_int (i - fd->collapse + 1),
8321 NULL_TREE, NULL_TREE);
8322 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
8323 if (!gsi_end_p (gsi))
8324 gsi_prev (&gsi);
8325 else
8326 gsi = gsi_last_bb (body_bb);
8327 edge e1 = split_block (body_bb, gsi_stmt (gsi));
8328 basic_block new_body = e1->dest;
8329 if (body_bb == cont_bb)
8330 cont_bb = new_body;
8331 edge e2 = NULL;
8332 basic_block new_header;
8333 if (EDGE_COUNT (cont_bb->preds) > 0)
8335 gsi = gsi_last_bb (cont_bb);
8336 if (POINTER_TYPE_P (type))
8337 t = fold_build_pointer_plus (fd->loops[i].v,
8338 fold_convert (sizetype,
8339 fd->loops[i].step));
8340 else
8341 t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
8342 fold_convert (type, fd->loops[i].step));
8343 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
8344 if (counts[i])
8346 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
8347 build_int_cst (fd->iter_type, 1));
8348 expand_omp_build_assign (&gsi, counts[i], t);
8349 t = counts[i];
8351 else
8353 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
8354 fd->loops[i].v, fd->loops[i].n1);
8355 t = fold_convert (fd->iter_type, t);
8356 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8357 true, GSI_SAME_STMT);
8359 aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8360 size_int (i - fd->collapse + 1),
8361 NULL_TREE, NULL_TREE);
8362 expand_omp_build_assign (&gsi, aref, t);
8363 gsi_prev (&gsi);
8364 e2 = split_block (cont_bb, gsi_stmt (gsi));
8365 new_header = e2->dest;
8367 else
8368 new_header = cont_bb;
8369 gsi = gsi_after_labels (new_header);
8370 tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
8371 true, GSI_SAME_STMT);
8372 tree n2
8373 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
8374 true, NULL_TREE, true, GSI_SAME_STMT);
8375 t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
8376 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
8377 edge e3 = split_block (new_header, gsi_stmt (gsi));
8378 cont_bb = e3->dest;
8379 remove_edge (e1);
8380 make_edge (body_bb, new_header, EDGE_FALLTHRU);
8381 e3->flags = EDGE_FALSE_VALUE;
8382 e3->probability = REG_BR_PROB_BASE / 8;
8383 e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
8384 e1->probability = REG_BR_PROB_BASE - e3->probability;
8386 set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
8387 set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
8389 if (e2)
8391 struct loop *loop = alloc_loop ();
8392 loop->header = new_header;
8393 loop->latch = e2->src;
8394 add_loop (loop, body_bb->loop_father);
8398 /* If there are any lastprivate clauses and it is possible some loops
8399 might have zero iterations, ensure all the decls are initialized,
8400 otherwise we could crash evaluating C++ class iterators with lastprivate
8401 clauses. */
8402 bool need_inits = false;
8403 for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
8404 if (need_inits)
8406 tree type = TREE_TYPE (fd->loops[i].v);
8407 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8408 expand_omp_build_assign (&gsi, fd->loops[i].v,
8409 fold_convert (type, fd->loops[i].n1));
8411 else
8413 tree type = TREE_TYPE (fd->loops[i].v);
8414 tree this_cond = fold_build2 (fd->loops[i].cond_code,
8415 boolean_type_node,
8416 fold_convert (type, fd->loops[i].n1),
8417 fold_convert (type, fd->loops[i].n2));
8418 if (!integer_onep (this_cond))
8419 need_inits = true;
8422 return cont_bb;
8426 /* A subroutine of expand_omp_for. Generate code for a parallel
8427 loop with any schedule. Given parameters:
8429 for (V = N1; V cond N2; V += STEP) BODY;
8431 where COND is "<" or ">", we generate pseudocode
8433 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
8434 if (more) goto L0; else goto L3;
8436 V = istart0;
8437 iend = iend0;
8439 BODY;
8440 V += STEP;
8441 if (V cond iend) goto L1; else goto L2;
8443 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
8446 If this is a combined omp parallel loop, instead of the call to
8447 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
8448 If this is gimple_omp_for_combined_p loop, then instead of assigning
8449 V and iend in L0 we assign the first two _looptemp_ clause decls of the
8450 inner GIMPLE_OMP_FOR and V += STEP; and
8451 if (V cond iend) goto L1; else goto L2; are removed.
8453 For collapsed loops, given parameters:
8454 collapse(3)
8455 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
8456 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
8457 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
8458 BODY;
8460 we generate pseudocode
8462 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
8463 if (cond3 is <)
8464 adj = STEP3 - 1;
8465 else
8466 adj = STEP3 + 1;
8467 count3 = (adj + N32 - N31) / STEP3;
8468 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
8469 if (cond2 is <)
8470 adj = STEP2 - 1;
8471 else
8472 adj = STEP2 + 1;
8473 count2 = (adj + N22 - N21) / STEP2;
8474 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
8475 if (cond1 is <)
8476 adj = STEP1 - 1;
8477 else
8478 adj = STEP1 + 1;
8479 count1 = (adj + N12 - N11) / STEP1;
8480 count = count1 * count2 * count3;
8481 goto Z1;
8483 count = 0;
8485 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
8486 if (more) goto L0; else goto L3;
8488 V = istart0;
8489 T = V;
8490 V3 = N31 + (T % count3) * STEP3;
8491 T = T / count3;
8492 V2 = N21 + (T % count2) * STEP2;
8493 T = T / count2;
8494 V1 = N11 + T * STEP1;
8495 iend = iend0;
8497 BODY;
8498 V += 1;
8499 if (V < iend) goto L10; else goto L2;
8500 L10:
8501 V3 += STEP3;
8502 if (V3 cond3 N32) goto L1; else goto L11;
8503 L11:
8504 V3 = N31;
8505 V2 += STEP2;
8506 if (V2 cond2 N22) goto L1; else goto L12;
8507 L12:
8508 V2 = N21;
8509 V1 += STEP1;
8510 goto L1;
8512 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
8517 static void
8518 expand_omp_for_generic (struct omp_region *region,
8519 struct omp_for_data *fd,
8520 enum built_in_function start_fn,
8521 enum built_in_function next_fn,
8522 gimple *inner_stmt)
8524 tree type, istart0, iend0, iend;
8525 tree t, vmain, vback, bias = NULL_TREE;
8526 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
8527 basic_block l2_bb = NULL, l3_bb = NULL;
8528 gimple_stmt_iterator gsi;
8529 gassign *assign_stmt;
8530 bool in_combined_parallel = is_combined_parallel (region);
8531 bool broken_loop = region->cont == NULL;
8532 edge e, ne;
8533 tree *counts = NULL;
8534 int i;
8535 bool ordered_lastprivate = false;
8537 gcc_assert (!broken_loop || !in_combined_parallel);
8538 gcc_assert (fd->iter_type == long_integer_type_node
8539 || !in_combined_parallel);
8541 entry_bb = region->entry;
8542 cont_bb = region->cont;
8543 collapse_bb = NULL;
8544 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
8545 gcc_assert (broken_loop
8546 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
8547 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
8548 l1_bb = single_succ (l0_bb);
8549 if (!broken_loop)
8551 l2_bb = create_empty_bb (cont_bb);
8552 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
8553 || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
8554 == l1_bb));
8555 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
8557 else
8558 l2_bb = NULL;
8559 l3_bb = BRANCH_EDGE (entry_bb)->dest;
8560 exit_bb = region->exit;
8562 gsi = gsi_last_bb (entry_bb);
8564 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
8565 if (fd->ordered
8566 && find_omp_clause (gimple_omp_for_clauses (gsi_stmt (gsi)),
8567 OMP_CLAUSE_LASTPRIVATE))
8568 ordered_lastprivate = false;
8569 if (fd->collapse > 1 || fd->ordered)
8571 int first_zero_iter1 = -1, first_zero_iter2 = -1;
8572 basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
8574 counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
8575 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
8576 zero_iter1_bb, first_zero_iter1,
8577 zero_iter2_bb, first_zero_iter2, l2_dom_bb);
8579 if (zero_iter1_bb)
8581 /* Some counts[i] vars might be uninitialized if
8582 some loop has zero iterations. But the body shouldn't
8583 be executed in that case, so just avoid uninit warnings. */
8584 for (i = first_zero_iter1;
8585 i < (fd->ordered ? fd->ordered : fd->collapse); i++)
8586 if (SSA_VAR_P (counts[i]))
8587 TREE_NO_WARNING (counts[i]) = 1;
8588 gsi_prev (&gsi);
8589 e = split_block (entry_bb, gsi_stmt (gsi));
8590 entry_bb = e->dest;
8591 make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
8592 gsi = gsi_last_bb (entry_bb);
8593 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
8594 get_immediate_dominator (CDI_DOMINATORS,
8595 zero_iter1_bb));
8597 if (zero_iter2_bb)
8599 /* Some counts[i] vars might be uninitialized if
8600 some loop has zero iterations. But the body shouldn't
8601 be executed in that case, so just avoid uninit warnings. */
8602 for (i = first_zero_iter2; i < fd->ordered; i++)
8603 if (SSA_VAR_P (counts[i]))
8604 TREE_NO_WARNING (counts[i]) = 1;
8605 if (zero_iter1_bb)
8606 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
8607 else
8609 gsi_prev (&gsi);
8610 e = split_block (entry_bb, gsi_stmt (gsi));
8611 entry_bb = e->dest;
8612 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
8613 gsi = gsi_last_bb (entry_bb);
8614 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
8615 get_immediate_dominator
8616 (CDI_DOMINATORS, zero_iter2_bb));
8619 if (fd->collapse == 1)
8621 counts[0] = fd->loop.n2;
8622 fd->loop = fd->loops[0];
8626 type = TREE_TYPE (fd->loop.v);
8627 istart0 = create_tmp_var (fd->iter_type, ".istart0");
8628 iend0 = create_tmp_var (fd->iter_type, ".iend0");
8629 TREE_ADDRESSABLE (istart0) = 1;
8630 TREE_ADDRESSABLE (iend0) = 1;
8632 /* See if we need to bias by LLONG_MIN. */
8633 if (fd->iter_type == long_long_unsigned_type_node
8634 && TREE_CODE (type) == INTEGER_TYPE
8635 && !TYPE_UNSIGNED (type)
8636 && fd->ordered == 0)
8638 tree n1, n2;
8640 if (fd->loop.cond_code == LT_EXPR)
8642 n1 = fd->loop.n1;
8643 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
8645 else
8647 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
8648 n2 = fd->loop.n1;
8650 if (TREE_CODE (n1) != INTEGER_CST
8651 || TREE_CODE (n2) != INTEGER_CST
8652 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
8653 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
8656 gimple_stmt_iterator gsif = gsi;
8657 gsi_prev (&gsif);
8659 tree arr = NULL_TREE;
8660 if (in_combined_parallel)
8662 gcc_assert (fd->ordered == 0);
8663 /* In a combined parallel loop, emit a call to
8664 GOMP_loop_foo_next. */
8665 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
8666 build_fold_addr_expr (istart0),
8667 build_fold_addr_expr (iend0));
8669 else
8671 tree t0, t1, t2, t3, t4;
8672 /* If this is not a combined parallel loop, emit a call to
8673 GOMP_loop_foo_start in ENTRY_BB. */
8674 t4 = build_fold_addr_expr (iend0);
8675 t3 = build_fold_addr_expr (istart0);
8676 if (fd->ordered)
8678 t0 = build_int_cst (unsigned_type_node,
8679 fd->ordered - fd->collapse + 1);
8680 arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
8681 fd->ordered
8682 - fd->collapse + 1),
8683 ".omp_counts");
8684 DECL_NAMELESS (arr) = 1;
8685 TREE_ADDRESSABLE (arr) = 1;
8686 TREE_STATIC (arr) = 1;
8687 vec<constructor_elt, va_gc> *v;
8688 vec_alloc (v, fd->ordered - fd->collapse + 1);
8689 int idx;
8691 for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
8693 tree c;
8694 if (idx == 0 && fd->collapse > 1)
8695 c = fd->loop.n2;
8696 else
8697 c = counts[idx + fd->collapse - 1];
8698 tree purpose = size_int (idx);
8699 CONSTRUCTOR_APPEND_ELT (v, purpose, c);
8700 if (TREE_CODE (c) != INTEGER_CST)
8701 TREE_STATIC (arr) = 0;
8704 DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
8705 if (!TREE_STATIC (arr))
8706 force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
8707 void_type_node, arr),
8708 true, NULL_TREE, true, GSI_SAME_STMT);
8709 t1 = build_fold_addr_expr (arr);
8710 t2 = NULL_TREE;
8712 else
8714 t2 = fold_convert (fd->iter_type, fd->loop.step);
8715 t1 = fd->loop.n2;
8716 t0 = fd->loop.n1;
8717 if (gimple_omp_for_combined_into_p (fd->for_stmt))
8719 tree innerc
8720 = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
8721 OMP_CLAUSE__LOOPTEMP_);
8722 gcc_assert (innerc);
8723 t0 = OMP_CLAUSE_DECL (innerc);
8724 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
8725 OMP_CLAUSE__LOOPTEMP_);
8726 gcc_assert (innerc);
8727 t1 = OMP_CLAUSE_DECL (innerc);
8729 if (POINTER_TYPE_P (TREE_TYPE (t0))
8730 && TYPE_PRECISION (TREE_TYPE (t0))
8731 != TYPE_PRECISION (fd->iter_type))
8733 /* Avoid casting pointers to integer of a different size. */
8734 tree itype = signed_type_for (type);
8735 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
8736 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
8738 else
8740 t1 = fold_convert (fd->iter_type, t1);
8741 t0 = fold_convert (fd->iter_type, t0);
8743 if (bias)
8745 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
8746 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
8749 if (fd->iter_type == long_integer_type_node || fd->ordered)
8751 if (fd->chunk_size)
8753 t = fold_convert (fd->iter_type, fd->chunk_size);
8754 t = omp_adjust_chunk_size (t, fd->simd_schedule);
8755 if (fd->ordered)
8756 t = build_call_expr (builtin_decl_explicit (start_fn),
8757 5, t0, t1, t, t3, t4);
8758 else
8759 t = build_call_expr (builtin_decl_explicit (start_fn),
8760 6, t0, t1, t2, t, t3, t4);
8762 else if (fd->ordered)
8763 t = build_call_expr (builtin_decl_explicit (start_fn),
8764 4, t0, t1, t3, t4);
8765 else
8766 t = build_call_expr (builtin_decl_explicit (start_fn),
8767 5, t0, t1, t2, t3, t4);
8769 else
8771 tree t5;
8772 tree c_bool_type;
8773 tree bfn_decl;
8775 /* The GOMP_loop_ull_*start functions have additional boolean
8776 argument, true for < loops and false for > loops.
8777 In Fortran, the C bool type can be different from
8778 boolean_type_node. */
8779 bfn_decl = builtin_decl_explicit (start_fn);
8780 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
8781 t5 = build_int_cst (c_bool_type,
8782 fd->loop.cond_code == LT_EXPR ? 1 : 0);
8783 if (fd->chunk_size)
8785 tree bfn_decl = builtin_decl_explicit (start_fn);
8786 t = fold_convert (fd->iter_type, fd->chunk_size);
8787 t = omp_adjust_chunk_size (t, fd->simd_schedule);
8788 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
8790 else
8791 t = build_call_expr (builtin_decl_explicit (start_fn),
8792 6, t5, t0, t1, t2, t3, t4);
8795 if (TREE_TYPE (t) != boolean_type_node)
8796 t = fold_build2 (NE_EXPR, boolean_type_node,
8797 t, build_int_cst (TREE_TYPE (t), 0));
8798 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8799 true, GSI_SAME_STMT);
8800 if (arr && !TREE_STATIC (arr))
8802 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
8803 TREE_THIS_VOLATILE (clobber) = 1;
8804 gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
8805 GSI_SAME_STMT);
8807 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
8809 /* Remove the GIMPLE_OMP_FOR statement. */
8810 gsi_remove (&gsi, true);
8812 if (gsi_end_p (gsif))
8813 gsif = gsi_after_labels (gsi_bb (gsif));
8814 gsi_next (&gsif);
8816 /* Iteration setup for sequential loop goes in L0_BB. */
8817 tree startvar = fd->loop.v;
8818 tree endvar = NULL_TREE;
8820 if (gimple_omp_for_combined_p (fd->for_stmt))
8822 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
8823 && gimple_omp_for_kind (inner_stmt)
8824 == GF_OMP_FOR_KIND_SIMD);
8825 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
8826 OMP_CLAUSE__LOOPTEMP_);
8827 gcc_assert (innerc);
8828 startvar = OMP_CLAUSE_DECL (innerc);
8829 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
8830 OMP_CLAUSE__LOOPTEMP_);
8831 gcc_assert (innerc);
8832 endvar = OMP_CLAUSE_DECL (innerc);
8835 gsi = gsi_start_bb (l0_bb);
8836 t = istart0;
8837 if (fd->ordered && fd->collapse == 1)
8838 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
8839 fold_convert (fd->iter_type, fd->loop.step));
8840 else if (bias)
8841 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
8842 if (fd->ordered && fd->collapse == 1)
8844 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8845 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
8846 fd->loop.n1, fold_convert (sizetype, t));
8847 else
8849 t = fold_convert (TREE_TYPE (startvar), t);
8850 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
8851 fd->loop.n1, t);
8854 else
8856 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8857 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
8858 t = fold_convert (TREE_TYPE (startvar), t);
8860 t = force_gimple_operand_gsi (&gsi, t,
8861 DECL_P (startvar)
8862 && TREE_ADDRESSABLE (startvar),
8863 NULL_TREE, false, GSI_CONTINUE_LINKING);
8864 assign_stmt = gimple_build_assign (startvar, t);
8865 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8867 t = iend0;
8868 if (fd->ordered && fd->collapse == 1)
8869 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
8870 fold_convert (fd->iter_type, fd->loop.step));
8871 else if (bias)
8872 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
8873 if (fd->ordered && fd->collapse == 1)
8875 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8876 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
8877 fd->loop.n1, fold_convert (sizetype, t));
8878 else
8880 t = fold_convert (TREE_TYPE (startvar), t);
8881 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
8882 fd->loop.n1, t);
8885 else
8887 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8888 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
8889 t = fold_convert (TREE_TYPE (startvar), t);
8891 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8892 false, GSI_CONTINUE_LINKING);
8893 if (endvar)
8895 assign_stmt = gimple_build_assign (endvar, iend);
8896 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8897 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
8898 assign_stmt = gimple_build_assign (fd->loop.v, iend);
8899 else
8900 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
8901 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8903 /* Handle linear clause adjustments. */
8904 tree itercnt = NULL_TREE;
8905 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
8906 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
8907 c; c = OMP_CLAUSE_CHAIN (c))
8908 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
8909 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
8911 tree d = OMP_CLAUSE_DECL (c);
8912 bool is_ref = is_reference (d);
8913 tree t = d, a, dest;
8914 if (is_ref)
8915 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
8916 tree type = TREE_TYPE (t);
8917 if (POINTER_TYPE_P (type))
8918 type = sizetype;
8919 dest = unshare_expr (t);
8920 tree v = create_tmp_var (TREE_TYPE (t), NULL);
8921 expand_omp_build_assign (&gsif, v, t);
8922 if (itercnt == NULL_TREE)
8924 itercnt = startvar;
8925 tree n1 = fd->loop.n1;
8926 if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
8928 itercnt
8929 = fold_convert (signed_type_for (TREE_TYPE (itercnt)),
8930 itercnt);
8931 n1 = fold_convert (TREE_TYPE (itercnt), n1);
8933 itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
8934 itercnt, n1);
8935 itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
8936 itercnt, fd->loop.step);
8937 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
8938 NULL_TREE, false,
8939 GSI_CONTINUE_LINKING);
8941 a = fold_build2 (MULT_EXPR, type,
8942 fold_convert (type, itercnt),
8943 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
8944 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
8945 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
8946 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8947 false, GSI_CONTINUE_LINKING);
8948 assign_stmt = gimple_build_assign (dest, t);
8949 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8951 if (fd->collapse > 1)
8952 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
8954 if (fd->ordered)
8956 /* Until now, counts array contained number of iterations or
8957 variable containing it for ith loop. From now on, we need
8958 those counts only for collapsed loops, and only for the 2nd
8959 till the last collapsed one. Move those one element earlier,
8960 we'll use counts[fd->collapse - 1] for the first source/sink
8961 iteration counter and so on and counts[fd->ordered]
8962 as the array holding the current counter values for
8963 depend(source). */
8964 if (fd->collapse > 1)
8965 memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
8966 if (broken_loop)
8968 int i;
8969 for (i = fd->collapse; i < fd->ordered; i++)
8971 tree type = TREE_TYPE (fd->loops[i].v);
8972 tree this_cond
8973 = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
8974 fold_convert (type, fd->loops[i].n1),
8975 fold_convert (type, fd->loops[i].n2));
8976 if (!integer_onep (this_cond))
8977 break;
8979 if (i < fd->ordered)
8981 cont_bb
8982 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8983 add_bb_to_loop (cont_bb, l1_bb->loop_father);
8984 gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
8985 gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
8986 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8987 make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
8988 make_edge (cont_bb, l1_bb, 0);
8989 l2_bb = create_empty_bb (cont_bb);
8990 broken_loop = false;
8993 expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
8994 cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
8995 ordered_lastprivate);
8996 if (counts[fd->collapse - 1])
8998 gcc_assert (fd->collapse == 1);
8999 gsi = gsi_last_bb (l0_bb);
9000 expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
9001 istart0, true);
9002 gsi = gsi_last_bb (cont_bb);
9003 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
9004 build_int_cst (fd->iter_type, 1));
9005 expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
9006 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
9007 size_zero_node, NULL_TREE, NULL_TREE);
9008 expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
9009 t = counts[fd->collapse - 1];
9011 else if (fd->collapse > 1)
9012 t = fd->loop.v;
9013 else
9015 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
9016 fd->loops[0].v, fd->loops[0].n1);
9017 t = fold_convert (fd->iter_type, t);
9019 gsi = gsi_last_bb (l0_bb);
9020 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
9021 size_zero_node, NULL_TREE, NULL_TREE);
9022 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9023 false, GSI_CONTINUE_LINKING);
9024 expand_omp_build_assign (&gsi, aref, t, true);
9027 if (!broken_loop)
9029 /* Code to control the increment and predicate for the sequential
9030 loop goes in the CONT_BB. */
9031 gsi = gsi_last_bb (cont_bb);
9032 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
9033 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
9034 vmain = gimple_omp_continue_control_use (cont_stmt);
9035 vback = gimple_omp_continue_control_def (cont_stmt);
9037 if (!gimple_omp_for_combined_p (fd->for_stmt))
9039 if (POINTER_TYPE_P (type))
9040 t = fold_build_pointer_plus (vmain, fd->loop.step);
9041 else
9042 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
9043 t = force_gimple_operand_gsi (&gsi, t,
9044 DECL_P (vback)
9045 && TREE_ADDRESSABLE (vback),
9046 NULL_TREE, true, GSI_SAME_STMT);
9047 assign_stmt = gimple_build_assign (vback, t);
9048 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9050 if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
9052 if (fd->collapse > 1)
9053 t = fd->loop.v;
9054 else
9056 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
9057 fd->loops[0].v, fd->loops[0].n1);
9058 t = fold_convert (fd->iter_type, t);
9060 tree aref = build4 (ARRAY_REF, fd->iter_type,
9061 counts[fd->ordered], size_zero_node,
9062 NULL_TREE, NULL_TREE);
9063 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9064 true, GSI_SAME_STMT);
9065 expand_omp_build_assign (&gsi, aref, t);
9068 t = build2 (fd->loop.cond_code, boolean_type_node,
9069 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
9070 iend);
9071 gcond *cond_stmt = gimple_build_cond_empty (t);
9072 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9075 /* Remove GIMPLE_OMP_CONTINUE. */
9076 gsi_remove (&gsi, true);
9078 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
9079 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
9081 /* Emit code to get the next parallel iteration in L2_BB. */
9082 gsi = gsi_start_bb (l2_bb);
9084 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
9085 build_fold_addr_expr (istart0),
9086 build_fold_addr_expr (iend0));
9087 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9088 false, GSI_CONTINUE_LINKING);
9089 if (TREE_TYPE (t) != boolean_type_node)
9090 t = fold_build2 (NE_EXPR, boolean_type_node,
9091 t, build_int_cst (TREE_TYPE (t), 0));
9092 gcond *cond_stmt = gimple_build_cond_empty (t);
9093 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
9096 /* Add the loop cleanup function. */
9097 gsi = gsi_last_bb (exit_bb);
9098 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
9099 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
9100 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
9101 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
9102 else
9103 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
9104 gcall *call_stmt = gimple_build_call (t, 0);
9105 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
9106 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
9107 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
9108 if (fd->ordered)
9110 tree arr = counts[fd->ordered];
9111 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
9112 TREE_THIS_VOLATILE (clobber) = 1;
9113 gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
9114 GSI_SAME_STMT);
9116 gsi_remove (&gsi, true);
9118 /* Connect the new blocks. */
9119 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
9120 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
9122 if (!broken_loop)
9124 gimple_seq phis;
9126 e = find_edge (cont_bb, l3_bb);
9127 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
9129 phis = phi_nodes (l3_bb);
9130 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
9132 gimple *phi = gsi_stmt (gsi);
9133 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
9134 PHI_ARG_DEF_FROM_EDGE (phi, e));
9136 remove_edge (e);
9138 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
9139 e = find_edge (cont_bb, l1_bb);
9140 if (e == NULL)
9142 e = BRANCH_EDGE (cont_bb);
9143 gcc_assert (single_succ (e->dest) == l1_bb);
9145 if (gimple_omp_for_combined_p (fd->for_stmt))
9147 remove_edge (e);
9148 e = NULL;
9150 else if (fd->collapse > 1)
9152 remove_edge (e);
9153 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
9155 else
9156 e->flags = EDGE_TRUE_VALUE;
9157 if (e)
9159 e->probability = REG_BR_PROB_BASE * 7 / 8;
9160 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
9162 else
9164 e = find_edge (cont_bb, l2_bb);
9165 e->flags = EDGE_FALLTHRU;
9167 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
9169 if (gimple_in_ssa_p (cfun))
9171 /* Add phis to the outer loop that connect to the phis in the inner,
9172 original loop, and move the loop entry value of the inner phi to
9173 the loop entry value of the outer phi. */
9174 gphi_iterator psi;
9175 for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
9177 source_location locus;
9178 gphi *nphi;
9179 gphi *exit_phi = psi.phi ();
9181 edge l2_to_l3 = find_edge (l2_bb, l3_bb);
9182 tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
9184 basic_block latch = BRANCH_EDGE (cont_bb)->dest;
9185 edge latch_to_l1 = find_edge (latch, l1_bb);
9186 gphi *inner_phi
9187 = find_phi_with_arg_on_edge (exit_res, latch_to_l1);
9189 tree t = gimple_phi_result (exit_phi);
9190 tree new_res = copy_ssa_name (t, NULL);
9191 nphi = create_phi_node (new_res, l0_bb);
9193 edge l0_to_l1 = find_edge (l0_bb, l1_bb);
9194 t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
9195 locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
9196 edge entry_to_l0 = find_edge (entry_bb, l0_bb);
9197 add_phi_arg (nphi, t, entry_to_l0, locus);
9199 edge l2_to_l0 = find_edge (l2_bb, l0_bb);
9200 add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
9202 add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
9206 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
9207 recompute_dominator (CDI_DOMINATORS, l2_bb));
9208 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
9209 recompute_dominator (CDI_DOMINATORS, l3_bb));
9210 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
9211 recompute_dominator (CDI_DOMINATORS, l0_bb));
9212 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
9213 recompute_dominator (CDI_DOMINATORS, l1_bb));
9215 /* We enter expand_omp_for_generic with a loop. This original loop may
9216 have its own loop struct, or it may be part of an outer loop struct
9217 (which may be the fake loop). */
9218 struct loop *outer_loop = entry_bb->loop_father;
9219 bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
9221 add_bb_to_loop (l2_bb, outer_loop);
9223 /* We've added a new loop around the original loop. Allocate the
9224 corresponding loop struct. */
9225 struct loop *new_loop = alloc_loop ();
9226 new_loop->header = l0_bb;
9227 new_loop->latch = l2_bb;
9228 add_loop (new_loop, outer_loop);
9230 /* Allocate a loop structure for the original loop unless we already
9231 had one. */
9232 if (!orig_loop_has_loop_struct
9233 && !gimple_omp_for_combined_p (fd->for_stmt))
9235 struct loop *orig_loop = alloc_loop ();
9236 orig_loop->header = l1_bb;
9237 /* The loop may have multiple latches. */
9238 add_loop (orig_loop, new_loop);
9244 /* A subroutine of expand_omp_for. Generate code for a parallel
9245 loop with static schedule and no specified chunk size. Given
9246 parameters:
9248 for (V = N1; V cond N2; V += STEP) BODY;
9250 where COND is "<" or ">", we generate pseudocode
9252 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
9253 if (cond is <)
9254 adj = STEP - 1;
9255 else
9256 adj = STEP + 1;
9257 if ((__typeof (V)) -1 > 0 && cond is >)
9258 n = -(adj + N2 - N1) / -STEP;
9259 else
9260 n = (adj + N2 - N1) / STEP;
9261 q = n / nthreads;
9262 tt = n % nthreads;
9263 if (threadid < tt) goto L3; else goto L4;
9265 tt = 0;
9266 q = q + 1;
9268 s0 = q * threadid + tt;
9269 e0 = s0 + q;
9270 V = s0 * STEP + N1;
9271 if (s0 >= e0) goto L2; else goto L0;
9273 e = e0 * STEP + N1;
9275 BODY;
9276 V += STEP;
9277 if (V cond e) goto L1;
9281 static void
9282 expand_omp_for_static_nochunk (struct omp_region *region,
9283 struct omp_for_data *fd,
9284 gimple *inner_stmt)
9286 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
9287 tree type, itype, vmain, vback;
9288 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
9289 basic_block body_bb, cont_bb, collapse_bb = NULL;
9290 basic_block fin_bb;
9291 gimple_stmt_iterator gsi;
9292 edge ep;
9293 bool broken_loop = region->cont == NULL;
9294 tree *counts = NULL;
9295 tree n1, n2, step;
9297 itype = type = TREE_TYPE (fd->loop.v);
9298 if (POINTER_TYPE_P (type))
9299 itype = signed_type_for (type);
9301 entry_bb = region->entry;
9302 cont_bb = region->cont;
9303 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
9304 fin_bb = BRANCH_EDGE (entry_bb)->dest;
9305 gcc_assert (broken_loop
9306 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
9307 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
9308 body_bb = single_succ (seq_start_bb);
9309 if (!broken_loop)
9311 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
9312 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
9313 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
9315 exit_bb = region->exit;
9317 /* Iteration space partitioning goes in ENTRY_BB. */
9318 gsi = gsi_last_bb (entry_bb);
9319 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9321 if (fd->collapse > 1)
9323 int first_zero_iter = -1, dummy = -1;
9324 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
9326 counts = XALLOCAVEC (tree, fd->collapse);
9327 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
9328 fin_bb, first_zero_iter,
9329 dummy_bb, dummy, l2_dom_bb);
9330 t = NULL_TREE;
9332 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
9333 t = integer_one_node;
9334 else
9335 t = fold_binary (fd->loop.cond_code, boolean_type_node,
9336 fold_convert (type, fd->loop.n1),
9337 fold_convert (type, fd->loop.n2));
9338 if (fd->collapse == 1
9339 && TYPE_UNSIGNED (type)
9340 && (t == NULL_TREE || !integer_onep (t)))
9342 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
9343 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
9344 true, GSI_SAME_STMT);
9345 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
9346 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
9347 true, GSI_SAME_STMT);
9348 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
9349 NULL_TREE, NULL_TREE);
9350 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9351 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
9352 expand_omp_regimplify_p, NULL, NULL)
9353 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
9354 expand_omp_regimplify_p, NULL, NULL))
9356 gsi = gsi_for_stmt (cond_stmt);
9357 gimple_regimplify_operands (cond_stmt, &gsi);
9359 ep = split_block (entry_bb, cond_stmt);
9360 ep->flags = EDGE_TRUE_VALUE;
9361 entry_bb = ep->dest;
9362 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
9363 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
9364 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
9365 if (gimple_in_ssa_p (cfun))
9367 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
9368 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
9369 !gsi_end_p (gpi); gsi_next (&gpi))
9371 gphi *phi = gpi.phi ();
9372 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
9373 ep, UNKNOWN_LOCATION);
9376 gsi = gsi_last_bb (entry_bb);
9379 switch (gimple_omp_for_kind (fd->for_stmt))
9381 case GF_OMP_FOR_KIND_FOR:
9382 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
9383 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9384 break;
9385 case GF_OMP_FOR_KIND_DISTRIBUTE:
9386 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
9387 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
9388 break;
9389 default:
9390 gcc_unreachable ();
9392 nthreads = build_call_expr (nthreads, 0);
9393 nthreads = fold_convert (itype, nthreads);
9394 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
9395 true, GSI_SAME_STMT);
9396 threadid = build_call_expr (threadid, 0);
9397 threadid = fold_convert (itype, threadid);
9398 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
9399 true, GSI_SAME_STMT);
9401 n1 = fd->loop.n1;
9402 n2 = fd->loop.n2;
9403 step = fd->loop.step;
9404 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9406 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
9407 OMP_CLAUSE__LOOPTEMP_);
9408 gcc_assert (innerc);
9409 n1 = OMP_CLAUSE_DECL (innerc);
9410 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9411 OMP_CLAUSE__LOOPTEMP_);
9412 gcc_assert (innerc);
9413 n2 = OMP_CLAUSE_DECL (innerc);
9415 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
9416 true, NULL_TREE, true, GSI_SAME_STMT);
9417 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
9418 true, NULL_TREE, true, GSI_SAME_STMT);
9419 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
9420 true, NULL_TREE, true, GSI_SAME_STMT);
9422 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
9423 t = fold_build2 (PLUS_EXPR, itype, step, t);
9424 t = fold_build2 (PLUS_EXPR, itype, t, n2);
9425 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
9426 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
9427 t = fold_build2 (TRUNC_DIV_EXPR, itype,
9428 fold_build1 (NEGATE_EXPR, itype, t),
9429 fold_build1 (NEGATE_EXPR, itype, step));
9430 else
9431 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
9432 t = fold_convert (itype, t);
9433 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9435 q = create_tmp_reg (itype, "q");
9436 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
9437 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
9438 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
9440 tt = create_tmp_reg (itype, "tt");
9441 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
9442 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
9443 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
9445 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
9446 gcond *cond_stmt = gimple_build_cond_empty (t);
9447 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9449 second_bb = split_block (entry_bb, cond_stmt)->dest;
9450 gsi = gsi_last_bb (second_bb);
9451 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9453 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
9454 GSI_SAME_STMT);
9455 gassign *assign_stmt
9456 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
9457 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9459 third_bb = split_block (second_bb, assign_stmt)->dest;
9460 gsi = gsi_last_bb (third_bb);
9461 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9463 t = build2 (MULT_EXPR, itype, q, threadid);
9464 t = build2 (PLUS_EXPR, itype, t, tt);
9465 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9467 t = fold_build2 (PLUS_EXPR, itype, s0, q);
9468 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9470 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
9471 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
9473 /* Remove the GIMPLE_OMP_FOR statement. */
9474 gsi_remove (&gsi, true);
9476 /* Setup code for sequential iteration goes in SEQ_START_BB. */
9477 gsi = gsi_start_bb (seq_start_bb);
9479 tree startvar = fd->loop.v;
9480 tree endvar = NULL_TREE;
9482 if (gimple_omp_for_combined_p (fd->for_stmt))
9484 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
9485 ? gimple_omp_parallel_clauses (inner_stmt)
9486 : gimple_omp_for_clauses (inner_stmt);
9487 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
9488 gcc_assert (innerc);
9489 startvar = OMP_CLAUSE_DECL (innerc);
9490 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9491 OMP_CLAUSE__LOOPTEMP_);
9492 gcc_assert (innerc);
9493 endvar = OMP_CLAUSE_DECL (innerc);
9494 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
9495 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
9497 int i;
9498 for (i = 1; i < fd->collapse; i++)
9500 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9501 OMP_CLAUSE__LOOPTEMP_);
9502 gcc_assert (innerc);
9504 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9505 OMP_CLAUSE__LOOPTEMP_);
9506 if (innerc)
9508 /* If needed (distribute parallel for with lastprivate),
9509 propagate down the total number of iterations. */
9510 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
9511 fd->loop.n2);
9512 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
9513 GSI_CONTINUE_LINKING);
9514 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
9515 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9519 t = fold_convert (itype, s0);
9520 t = fold_build2 (MULT_EXPR, itype, t, step);
9521 if (POINTER_TYPE_P (type))
9522 t = fold_build_pointer_plus (n1, t);
9523 else
9524 t = fold_build2 (PLUS_EXPR, type, t, n1);
9525 t = fold_convert (TREE_TYPE (startvar), t);
9526 t = force_gimple_operand_gsi (&gsi, t,
9527 DECL_P (startvar)
9528 && TREE_ADDRESSABLE (startvar),
9529 NULL_TREE, false, GSI_CONTINUE_LINKING);
9530 assign_stmt = gimple_build_assign (startvar, t);
9531 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9533 t = fold_convert (itype, e0);
9534 t = fold_build2 (MULT_EXPR, itype, t, step);
9535 if (POINTER_TYPE_P (type))
9536 t = fold_build_pointer_plus (n1, t);
9537 else
9538 t = fold_build2 (PLUS_EXPR, type, t, n1);
9539 t = fold_convert (TREE_TYPE (startvar), t);
9540 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9541 false, GSI_CONTINUE_LINKING);
9542 if (endvar)
9544 assign_stmt = gimple_build_assign (endvar, e);
9545 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9546 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
9547 assign_stmt = gimple_build_assign (fd->loop.v, e);
9548 else
9549 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
9550 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9552 /* Handle linear clause adjustments. */
9553 tree itercnt = NULL_TREE;
9554 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
9555 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
9556 c; c = OMP_CLAUSE_CHAIN (c))
9557 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
9558 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
9560 tree d = OMP_CLAUSE_DECL (c);
9561 bool is_ref = is_reference (d);
9562 tree t = d, a, dest;
9563 if (is_ref)
9564 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
9565 if (itercnt == NULL_TREE)
9567 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9569 itercnt = fold_build2 (MINUS_EXPR, itype,
9570 fold_convert (itype, n1),
9571 fold_convert (itype, fd->loop.n1));
9572 itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step);
9573 itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0);
9574 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
9575 NULL_TREE, false,
9576 GSI_CONTINUE_LINKING);
9578 else
9579 itercnt = s0;
9581 tree type = TREE_TYPE (t);
9582 if (POINTER_TYPE_P (type))
9583 type = sizetype;
9584 a = fold_build2 (MULT_EXPR, type,
9585 fold_convert (type, itercnt),
9586 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
9587 dest = unshare_expr (t);
9588 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
9589 : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a);
9590 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9591 false, GSI_CONTINUE_LINKING);
9592 assign_stmt = gimple_build_assign (dest, t);
9593 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9595 if (fd->collapse > 1)
9596 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
9598 if (!broken_loop)
9600 /* The code controlling the sequential loop replaces the
9601 GIMPLE_OMP_CONTINUE. */
9602 gsi = gsi_last_bb (cont_bb);
9603 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
9604 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
9605 vmain = gimple_omp_continue_control_use (cont_stmt);
9606 vback = gimple_omp_continue_control_def (cont_stmt);
9608 if (!gimple_omp_for_combined_p (fd->for_stmt))
9610 if (POINTER_TYPE_P (type))
9611 t = fold_build_pointer_plus (vmain, step);
9612 else
9613 t = fold_build2 (PLUS_EXPR, type, vmain, step);
9614 t = force_gimple_operand_gsi (&gsi, t,
9615 DECL_P (vback)
9616 && TREE_ADDRESSABLE (vback),
9617 NULL_TREE, true, GSI_SAME_STMT);
9618 assign_stmt = gimple_build_assign (vback, t);
9619 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9621 t = build2 (fd->loop.cond_code, boolean_type_node,
9622 DECL_P (vback) && TREE_ADDRESSABLE (vback)
9623 ? t : vback, e);
9624 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
9627 /* Remove the GIMPLE_OMP_CONTINUE statement. */
9628 gsi_remove (&gsi, true);
9630 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
9631 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
9634 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
9635 gsi = gsi_last_bb (exit_bb);
9636 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
9638 t = gimple_omp_return_lhs (gsi_stmt (gsi));
9639 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
9641 gsi_remove (&gsi, true);
9643 /* Connect all the blocks. */
9644 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
9645 ep->probability = REG_BR_PROB_BASE / 4 * 3;
9646 ep = find_edge (entry_bb, second_bb);
9647 ep->flags = EDGE_TRUE_VALUE;
9648 ep->probability = REG_BR_PROB_BASE / 4;
9649 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
9650 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
9652 if (!broken_loop)
9654 ep = find_edge (cont_bb, body_bb);
9655 if (ep == NULL)
9657 ep = BRANCH_EDGE (cont_bb);
9658 gcc_assert (single_succ (ep->dest) == body_bb);
9660 if (gimple_omp_for_combined_p (fd->for_stmt))
9662 remove_edge (ep);
9663 ep = NULL;
9665 else if (fd->collapse > 1)
9667 remove_edge (ep);
9668 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
9670 else
9671 ep->flags = EDGE_TRUE_VALUE;
9672 find_edge (cont_bb, fin_bb)->flags
9673 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
9676 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
9677 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
9678 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
9680 set_immediate_dominator (CDI_DOMINATORS, body_bb,
9681 recompute_dominator (CDI_DOMINATORS, body_bb));
9682 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
9683 recompute_dominator (CDI_DOMINATORS, fin_bb));
9685 struct loop *loop = body_bb->loop_father;
9686 if (loop != entry_bb->loop_father)
9688 gcc_assert (broken_loop || loop->header == body_bb);
9689 gcc_assert (broken_loop
9690 || loop->latch == region->cont
9691 || single_pred (loop->latch) == region->cont);
9692 return;
9695 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
9697 loop = alloc_loop ();
9698 loop->header = body_bb;
9699 if (collapse_bb == NULL)
9700 loop->latch = cont_bb;
9701 add_loop (loop, body_bb->loop_father);
9705 /* Return phi in E->DEST with ARG on edge E. */
9707 static gphi *
9708 find_phi_with_arg_on_edge (tree arg, edge e)
9710 basic_block bb = e->dest;
9712 for (gphi_iterator gpi = gsi_start_phis (bb);
9713 !gsi_end_p (gpi);
9714 gsi_next (&gpi))
9716 gphi *phi = gpi.phi ();
9717 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg)
9718 return phi;
9721 return NULL;
9724 /* A subroutine of expand_omp_for. Generate code for a parallel
9725 loop with static schedule and a specified chunk size. Given
9726 parameters:
9728 for (V = N1; V cond N2; V += STEP) BODY;
9730 where COND is "<" or ">", we generate pseudocode
9732 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
9733 if (cond is <)
9734 adj = STEP - 1;
9735 else
9736 adj = STEP + 1;
9737 if ((__typeof (V)) -1 > 0 && cond is >)
9738 n = -(adj + N2 - N1) / -STEP;
9739 else
9740 n = (adj + N2 - N1) / STEP;
9741 trip = 0;
9742 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
9743 here so that V is defined
9744 if the loop is not entered
9746 s0 = (trip * nthreads + threadid) * CHUNK;
9747 e0 = min(s0 + CHUNK, n);
9748 if (s0 < n) goto L1; else goto L4;
9750 V = s0 * STEP + N1;
9751 e = e0 * STEP + N1;
9753 BODY;
9754 V += STEP;
9755 if (V cond e) goto L2; else goto L3;
9757 trip += 1;
9758 goto L0;
9762 static void
9763 expand_omp_for_static_chunk (struct omp_region *region,
9764 struct omp_for_data *fd, gimple *inner_stmt)
9766 tree n, s0, e0, e, t;
9767 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
9768 tree type, itype, vmain, vback, vextra;
9769 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
9770 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
9771 gimple_stmt_iterator gsi;
9772 edge se;
9773 bool broken_loop = region->cont == NULL;
9774 tree *counts = NULL;
9775 tree n1, n2, step;
9777 itype = type = TREE_TYPE (fd->loop.v);
9778 if (POINTER_TYPE_P (type))
9779 itype = signed_type_for (type);
9781 entry_bb = region->entry;
9782 se = split_block (entry_bb, last_stmt (entry_bb));
9783 entry_bb = se->src;
9784 iter_part_bb = se->dest;
9785 cont_bb = region->cont;
9786 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
9787 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
9788 gcc_assert (broken_loop
9789 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
9790 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
9791 body_bb = single_succ (seq_start_bb);
9792 if (!broken_loop)
9794 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
9795 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
9796 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
9797 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
9799 exit_bb = region->exit;
9801 /* Trip and adjustment setup goes in ENTRY_BB. */
9802 gsi = gsi_last_bb (entry_bb);
9803 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9805 if (fd->collapse > 1)
9807 int first_zero_iter = -1, dummy = -1;
9808 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
9810 counts = XALLOCAVEC (tree, fd->collapse);
9811 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
9812 fin_bb, first_zero_iter,
9813 dummy_bb, dummy, l2_dom_bb);
9814 t = NULL_TREE;
9816 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
9817 t = integer_one_node;
9818 else
9819 t = fold_binary (fd->loop.cond_code, boolean_type_node,
9820 fold_convert (type, fd->loop.n1),
9821 fold_convert (type, fd->loop.n2));
9822 if (fd->collapse == 1
9823 && TYPE_UNSIGNED (type)
9824 && (t == NULL_TREE || !integer_onep (t)))
9826 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
9827 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
9828 true, GSI_SAME_STMT);
9829 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
9830 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
9831 true, GSI_SAME_STMT);
9832 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
9833 NULL_TREE, NULL_TREE);
9834 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9835 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
9836 expand_omp_regimplify_p, NULL, NULL)
9837 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
9838 expand_omp_regimplify_p, NULL, NULL))
9840 gsi = gsi_for_stmt (cond_stmt);
9841 gimple_regimplify_operands (cond_stmt, &gsi);
9843 se = split_block (entry_bb, cond_stmt);
9844 se->flags = EDGE_TRUE_VALUE;
9845 entry_bb = se->dest;
9846 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
9847 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
9848 se->probability = REG_BR_PROB_BASE / 2000 - 1;
9849 if (gimple_in_ssa_p (cfun))
9851 int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx;
9852 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
9853 !gsi_end_p (gpi); gsi_next (&gpi))
9855 gphi *phi = gpi.phi ();
9856 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
9857 se, UNKNOWN_LOCATION);
9860 gsi = gsi_last_bb (entry_bb);
9863 switch (gimple_omp_for_kind (fd->for_stmt))
9865 case GF_OMP_FOR_KIND_FOR:
9866 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
9867 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9868 break;
9869 case GF_OMP_FOR_KIND_DISTRIBUTE:
9870 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
9871 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
9872 break;
9873 default:
9874 gcc_unreachable ();
9876 nthreads = build_call_expr (nthreads, 0);
9877 nthreads = fold_convert (itype, nthreads);
9878 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
9879 true, GSI_SAME_STMT);
9880 threadid = build_call_expr (threadid, 0);
9881 threadid = fold_convert (itype, threadid);
9882 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
9883 true, GSI_SAME_STMT);
9885 n1 = fd->loop.n1;
9886 n2 = fd->loop.n2;
9887 step = fd->loop.step;
9888 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9890 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
9891 OMP_CLAUSE__LOOPTEMP_);
9892 gcc_assert (innerc);
9893 n1 = OMP_CLAUSE_DECL (innerc);
9894 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9895 OMP_CLAUSE__LOOPTEMP_);
9896 gcc_assert (innerc);
9897 n2 = OMP_CLAUSE_DECL (innerc);
9899 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
9900 true, NULL_TREE, true, GSI_SAME_STMT);
9901 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
9902 true, NULL_TREE, true, GSI_SAME_STMT);
9903 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
9904 true, NULL_TREE, true, GSI_SAME_STMT);
9905 tree chunk_size = fold_convert (itype, fd->chunk_size);
9906 chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule);
9907 chunk_size
9908 = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true,
9909 GSI_SAME_STMT);
9911 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
9912 t = fold_build2 (PLUS_EXPR, itype, step, t);
9913 t = fold_build2 (PLUS_EXPR, itype, t, n2);
9914 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
9915 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
9916 t = fold_build2 (TRUNC_DIV_EXPR, itype,
9917 fold_build1 (NEGATE_EXPR, itype, t),
9918 fold_build1 (NEGATE_EXPR, itype, step));
9919 else
9920 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
9921 t = fold_convert (itype, t);
9922 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9923 true, GSI_SAME_STMT);
9925 trip_var = create_tmp_reg (itype, ".trip");
9926 if (gimple_in_ssa_p (cfun))
9928 trip_init = make_ssa_name (trip_var);
9929 trip_main = make_ssa_name (trip_var);
9930 trip_back = make_ssa_name (trip_var);
9932 else
9934 trip_init = trip_var;
9935 trip_main = trip_var;
9936 trip_back = trip_var;
9939 gassign *assign_stmt
9940 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
9941 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9943 t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size);
9944 t = fold_build2 (MULT_EXPR, itype, t, step);
9945 if (POINTER_TYPE_P (type))
9946 t = fold_build_pointer_plus (n1, t);
9947 else
9948 t = fold_build2 (PLUS_EXPR, type, t, n1);
9949 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9950 true, GSI_SAME_STMT);
9952 /* Remove the GIMPLE_OMP_FOR. */
9953 gsi_remove (&gsi, true);
9955 gimple_stmt_iterator gsif = gsi;
9957 /* Iteration space partitioning goes in ITER_PART_BB. */
9958 gsi = gsi_last_bb (iter_part_bb);
9960 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
9961 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
9962 t = fold_build2 (MULT_EXPR, itype, t, chunk_size);
9963 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9964 false, GSI_CONTINUE_LINKING);
9966 t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size);
9967 t = fold_build2 (MIN_EXPR, itype, t, n);
9968 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9969 false, GSI_CONTINUE_LINKING);
9971 t = build2 (LT_EXPR, boolean_type_node, s0, n);
9972 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
9974 /* Setup code for sequential iteration goes in SEQ_START_BB. */
9975 gsi = gsi_start_bb (seq_start_bb);
9977 tree startvar = fd->loop.v;
9978 tree endvar = NULL_TREE;
9980 if (gimple_omp_for_combined_p (fd->for_stmt))
9982 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
9983 ? gimple_omp_parallel_clauses (inner_stmt)
9984 : gimple_omp_for_clauses (inner_stmt);
9985 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
9986 gcc_assert (innerc);
9987 startvar = OMP_CLAUSE_DECL (innerc);
9988 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9989 OMP_CLAUSE__LOOPTEMP_);
9990 gcc_assert (innerc);
9991 endvar = OMP_CLAUSE_DECL (innerc);
9992 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
9993 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
9995 int i;
9996 for (i = 1; i < fd->collapse; i++)
9998 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9999 OMP_CLAUSE__LOOPTEMP_);
10000 gcc_assert (innerc);
10002 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10003 OMP_CLAUSE__LOOPTEMP_);
10004 if (innerc)
10006 /* If needed (distribute parallel for with lastprivate),
10007 propagate down the total number of iterations. */
10008 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
10009 fd->loop.n2);
10010 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
10011 GSI_CONTINUE_LINKING);
10012 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
10013 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10018 t = fold_convert (itype, s0);
10019 t = fold_build2 (MULT_EXPR, itype, t, step);
10020 if (POINTER_TYPE_P (type))
10021 t = fold_build_pointer_plus (n1, t);
10022 else
10023 t = fold_build2 (PLUS_EXPR, type, t, n1);
10024 t = fold_convert (TREE_TYPE (startvar), t);
10025 t = force_gimple_operand_gsi (&gsi, t,
10026 DECL_P (startvar)
10027 && TREE_ADDRESSABLE (startvar),
10028 NULL_TREE, false, GSI_CONTINUE_LINKING);
10029 assign_stmt = gimple_build_assign (startvar, t);
10030 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10032 t = fold_convert (itype, e0);
10033 t = fold_build2 (MULT_EXPR, itype, t, step);
10034 if (POINTER_TYPE_P (type))
10035 t = fold_build_pointer_plus (n1, t);
10036 else
10037 t = fold_build2 (PLUS_EXPR, type, t, n1);
10038 t = fold_convert (TREE_TYPE (startvar), t);
10039 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10040 false, GSI_CONTINUE_LINKING);
10041 if (endvar)
10043 assign_stmt = gimple_build_assign (endvar, e);
10044 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10045 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
10046 assign_stmt = gimple_build_assign (fd->loop.v, e);
10047 else
10048 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
10049 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10051 /* Handle linear clause adjustments. */
10052 tree itercnt = NULL_TREE, itercntbias = NULL_TREE;
10053 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
10054 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
10055 c; c = OMP_CLAUSE_CHAIN (c))
10056 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
10057 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
10059 tree d = OMP_CLAUSE_DECL (c);
10060 bool is_ref = is_reference (d);
10061 tree t = d, a, dest;
10062 if (is_ref)
10063 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
10064 tree type = TREE_TYPE (t);
10065 if (POINTER_TYPE_P (type))
10066 type = sizetype;
10067 dest = unshare_expr (t);
10068 tree v = create_tmp_var (TREE_TYPE (t), NULL);
10069 expand_omp_build_assign (&gsif, v, t);
10070 if (itercnt == NULL_TREE)
10072 if (gimple_omp_for_combined_into_p (fd->for_stmt))
10074 itercntbias
10075 = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1),
10076 fold_convert (itype, fd->loop.n1));
10077 itercntbias = fold_build2 (EXACT_DIV_EXPR, itype,
10078 itercntbias, step);
10079 itercntbias
10080 = force_gimple_operand_gsi (&gsif, itercntbias, true,
10081 NULL_TREE, true,
10082 GSI_SAME_STMT);
10083 itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0);
10084 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
10085 NULL_TREE, false,
10086 GSI_CONTINUE_LINKING);
10088 else
10089 itercnt = s0;
10091 a = fold_build2 (MULT_EXPR, type,
10092 fold_convert (type, itercnt),
10093 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
10094 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
10095 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
10096 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10097 false, GSI_CONTINUE_LINKING);
10098 assign_stmt = gimple_build_assign (dest, t);
10099 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10101 if (fd->collapse > 1)
10102 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
10104 if (!broken_loop)
10106 /* The code controlling the sequential loop goes in CONT_BB,
10107 replacing the GIMPLE_OMP_CONTINUE. */
10108 gsi = gsi_last_bb (cont_bb);
10109 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
10110 vmain = gimple_omp_continue_control_use (cont_stmt);
10111 vback = gimple_omp_continue_control_def (cont_stmt);
10113 if (!gimple_omp_for_combined_p (fd->for_stmt))
10115 if (POINTER_TYPE_P (type))
10116 t = fold_build_pointer_plus (vmain, step);
10117 else
10118 t = fold_build2 (PLUS_EXPR, type, vmain, step);
10119 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
10120 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10121 true, GSI_SAME_STMT);
10122 assign_stmt = gimple_build_assign (vback, t);
10123 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
10125 if (tree_int_cst_equal (fd->chunk_size, integer_one_node))
10126 t = build2 (EQ_EXPR, boolean_type_node,
10127 build_int_cst (itype, 0),
10128 build_int_cst (itype, 1));
10129 else
10130 t = build2 (fd->loop.cond_code, boolean_type_node,
10131 DECL_P (vback) && TREE_ADDRESSABLE (vback)
10132 ? t : vback, e);
10133 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
10136 /* Remove GIMPLE_OMP_CONTINUE. */
10137 gsi_remove (&gsi, true);
10139 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
10140 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
10142 /* Trip update code goes into TRIP_UPDATE_BB. */
10143 gsi = gsi_start_bb (trip_update_bb);
10145 t = build_int_cst (itype, 1);
10146 t = build2 (PLUS_EXPR, itype, trip_main, t);
10147 assign_stmt = gimple_build_assign (trip_back, t);
10148 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10151 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
10152 gsi = gsi_last_bb (exit_bb);
10153 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
10155 t = gimple_omp_return_lhs (gsi_stmt (gsi));
10156 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
10158 gsi_remove (&gsi, true);
10160 /* Connect the new blocks. */
10161 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
10162 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
10164 if (!broken_loop)
10166 se = find_edge (cont_bb, body_bb);
10167 if (se == NULL)
10169 se = BRANCH_EDGE (cont_bb);
10170 gcc_assert (single_succ (se->dest) == body_bb);
10172 if (gimple_omp_for_combined_p (fd->for_stmt))
10174 remove_edge (se);
10175 se = NULL;
10177 else if (fd->collapse > 1)
10179 remove_edge (se);
10180 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
10182 else
10183 se->flags = EDGE_TRUE_VALUE;
10184 find_edge (cont_bb, trip_update_bb)->flags
10185 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
10187 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
10190 if (gimple_in_ssa_p (cfun))
10192 gphi_iterator psi;
10193 gphi *phi;
10194 edge re, ene;
10195 edge_var_map *vm;
10196 size_t i;
10198 gcc_assert (fd->collapse == 1 && !broken_loop);
10200 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
10201 remove arguments of the phi nodes in fin_bb. We need to create
10202 appropriate phi nodes in iter_part_bb instead. */
10203 se = find_edge (iter_part_bb, fin_bb);
10204 re = single_succ_edge (trip_update_bb);
10205 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
10206 ene = single_succ_edge (entry_bb);
10208 psi = gsi_start_phis (fin_bb);
10209 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
10210 gsi_next (&psi), ++i)
10212 gphi *nphi;
10213 source_location locus;
10215 phi = psi.phi ();
10216 t = gimple_phi_result (phi);
10217 gcc_assert (t == redirect_edge_var_map_result (vm));
10219 if (!single_pred_p (fin_bb))
10220 t = copy_ssa_name (t, phi);
10222 nphi = create_phi_node (t, iter_part_bb);
10224 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
10225 locus = gimple_phi_arg_location_from_edge (phi, se);
10227 /* A special case -- fd->loop.v is not yet computed in
10228 iter_part_bb, we need to use vextra instead. */
10229 if (t == fd->loop.v)
10230 t = vextra;
10231 add_phi_arg (nphi, t, ene, locus);
10232 locus = redirect_edge_var_map_location (vm);
10233 tree back_arg = redirect_edge_var_map_def (vm);
10234 add_phi_arg (nphi, back_arg, re, locus);
10235 edge ce = find_edge (cont_bb, body_bb);
10236 if (ce == NULL)
10238 ce = BRANCH_EDGE (cont_bb);
10239 gcc_assert (single_succ (ce->dest) == body_bb);
10240 ce = single_succ_edge (ce->dest);
10242 gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce);
10243 gcc_assert (inner_loop_phi != NULL);
10244 add_phi_arg (inner_loop_phi, gimple_phi_result (nphi),
10245 find_edge (seq_start_bb, body_bb), locus);
10247 if (!single_pred_p (fin_bb))
10248 add_phi_arg (phi, gimple_phi_result (nphi), se, locus);
10250 gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ()));
10251 redirect_edge_var_map_clear (re);
10252 if (single_pred_p (fin_bb))
10253 while (1)
10255 psi = gsi_start_phis (fin_bb);
10256 if (gsi_end_p (psi))
10257 break;
10258 remove_phi_node (&psi, false);
10261 /* Make phi node for trip. */
10262 phi = create_phi_node (trip_main, iter_part_bb);
10263 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
10264 UNKNOWN_LOCATION);
10265 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
10266 UNKNOWN_LOCATION);
10269 if (!broken_loop)
10270 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
10271 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
10272 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
10273 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
10274 recompute_dominator (CDI_DOMINATORS, fin_bb));
10275 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
10276 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
10277 set_immediate_dominator (CDI_DOMINATORS, body_bb,
10278 recompute_dominator (CDI_DOMINATORS, body_bb));
10280 if (!broken_loop)
10282 struct loop *loop = body_bb->loop_father;
10283 struct loop *trip_loop = alloc_loop ();
10284 trip_loop->header = iter_part_bb;
10285 trip_loop->latch = trip_update_bb;
10286 add_loop (trip_loop, iter_part_bb->loop_father);
10288 if (loop != entry_bb->loop_father)
10290 gcc_assert (loop->header == body_bb);
10291 gcc_assert (loop->latch == region->cont
10292 || single_pred (loop->latch) == region->cont);
10293 trip_loop->inner = loop;
10294 return;
10297 if (!gimple_omp_for_combined_p (fd->for_stmt))
10299 loop = alloc_loop ();
10300 loop->header = body_bb;
10301 if (collapse_bb == NULL)
10302 loop->latch = cont_bb;
10303 add_loop (loop, trip_loop);
10308 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
10309 Given parameters:
10310 for (V = N1; V cond N2; V += STEP) BODY;
10312 where COND is "<" or ">" or "!=", we generate pseudocode
10314 for (ind_var = low; ind_var < high; ind_var++)
10316 V = n1 + (ind_var * STEP)
10318 <BODY>
10321 In the above pseudocode, low and high are function parameters of the
10322 child function. In the function below, we are inserting a temp.
10323 variable that will be making a call to two OMP functions that will not be
10324 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
10325 with _Cilk_for). These functions are replaced with low and high
10326 by the function that handles taskreg. */
10329 static void
10330 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
10332 bool broken_loop = region->cont == NULL;
10333 basic_block entry_bb = region->entry;
10334 basic_block cont_bb = region->cont;
10336 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10337 gcc_assert (broken_loop
10338 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10339 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
10340 basic_block l1_bb, l2_bb;
10342 if (!broken_loop)
10344 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
10345 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
10346 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
10347 l2_bb = BRANCH_EDGE (entry_bb)->dest;
10349 else
10351 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
10352 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
10353 l2_bb = single_succ (l1_bb);
10355 basic_block exit_bb = region->exit;
10356 basic_block l2_dom_bb = NULL;
10358 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
10360 /* Below statements until the "tree high_val = ..." are pseudo statements
10361 used to pass information to be used by expand_omp_taskreg.
10362 low_val and high_val will be replaced by the __low and __high
10363 parameter from the child function.
10365 The call_exprs part is a place-holder, it is mainly used
10366 to distinctly identify to the top-level part that this is
10367 where we should put low and high (reasoning given in header
10368 comment). */
10370 tree child_fndecl
10371 = gimple_omp_parallel_child_fn (
10372 as_a <gomp_parallel *> (last_stmt (region->outer->entry)));
10373 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
10374 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
10376 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
10377 high_val = t;
10378 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
10379 low_val = t;
10381 gcc_assert (low_val && high_val);
10383 tree type = TREE_TYPE (low_val);
10384 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
10385 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
10387 /* Not needed in SSA form right now. */
10388 gcc_assert (!gimple_in_ssa_p (cfun));
10389 if (l2_dom_bb == NULL)
10390 l2_dom_bb = l1_bb;
10392 tree n1 = low_val;
10393 tree n2 = high_val;
10395 gimple *stmt = gimple_build_assign (ind_var, n1);
10397 /* Replace the GIMPLE_OMP_FOR statement. */
10398 gsi_replace (&gsi, stmt, true);
10400 if (!broken_loop)
10402 /* Code to control the increment goes in the CONT_BB. */
10403 gsi = gsi_last_bb (cont_bb);
10404 stmt = gsi_stmt (gsi);
10405 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
10406 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
10407 build_one_cst (type));
10409 /* Replace GIMPLE_OMP_CONTINUE. */
10410 gsi_replace (&gsi, stmt, true);
10413 /* Emit the condition in L1_BB. */
10414 gsi = gsi_after_labels (l1_bb);
10415 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
10416 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
10417 fd->loop.step);
10418 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
10419 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
10420 fd->loop.n1, fold_convert (sizetype, t));
10421 else
10422 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
10423 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
10424 t = fold_convert (TREE_TYPE (fd->loop.v), t);
10425 expand_omp_build_assign (&gsi, fd->loop.v, t);
10427 /* The condition is always '<' since the runtime will fill in the low
10428 and high values. */
10429 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
10430 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
10432 /* Remove GIMPLE_OMP_RETURN. */
10433 gsi = gsi_last_bb (exit_bb);
10434 gsi_remove (&gsi, true);
10436 /* Connect the new blocks. */
10437 remove_edge (FALLTHRU_EDGE (entry_bb));
10439 edge e, ne;
10440 if (!broken_loop)
10442 remove_edge (BRANCH_EDGE (entry_bb));
10443 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
10445 e = BRANCH_EDGE (l1_bb);
10446 ne = FALLTHRU_EDGE (l1_bb);
10447 e->flags = EDGE_TRUE_VALUE;
10449 else
10451 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
10453 ne = single_succ_edge (l1_bb);
10454 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
10457 ne->flags = EDGE_FALSE_VALUE;
10458 e->probability = REG_BR_PROB_BASE * 7 / 8;
10459 ne->probability = REG_BR_PROB_BASE / 8;
10461 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
10462 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
10463 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
10465 if (!broken_loop)
10467 struct loop *loop = alloc_loop ();
10468 loop->header = l1_bb;
10469 loop->latch = cont_bb;
10470 add_loop (loop, l1_bb->loop_father);
10471 loop->safelen = INT_MAX;
10474 /* Pick the correct library function based on the precision of the
10475 induction variable type. */
10476 tree lib_fun = NULL_TREE;
10477 if (TYPE_PRECISION (type) == 32)
10478 lib_fun = cilk_for_32_fndecl;
10479 else if (TYPE_PRECISION (type) == 64)
10480 lib_fun = cilk_for_64_fndecl;
10481 else
10482 gcc_unreachable ();
10484 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
10486 /* WS_ARGS contains the library function flavor to call:
10487 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
10488 user-defined grain value. If the user does not define one, then zero
10489 is passed in by the parser. */
10490 vec_alloc (region->ws_args, 2);
10491 region->ws_args->quick_push (lib_fun);
10492 region->ws_args->quick_push (fd->chunk_size);
10495 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
10496 loop. Given parameters:
10498 for (V = N1; V cond N2; V += STEP) BODY;
10500 where COND is "<" or ">", we generate pseudocode
10502 V = N1;
10503 goto L1;
10505 BODY;
10506 V += STEP;
10508 if (V cond N2) goto L0; else goto L2;
10511 For collapsed loops, given parameters:
10512 collapse(3)
10513 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
10514 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
10515 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
10516 BODY;
10518 we generate pseudocode
10520 if (cond3 is <)
10521 adj = STEP3 - 1;
10522 else
10523 adj = STEP3 + 1;
10524 count3 = (adj + N32 - N31) / STEP3;
10525 if (cond2 is <)
10526 adj = STEP2 - 1;
10527 else
10528 adj = STEP2 + 1;
10529 count2 = (adj + N22 - N21) / STEP2;
10530 if (cond1 is <)
10531 adj = STEP1 - 1;
10532 else
10533 adj = STEP1 + 1;
10534 count1 = (adj + N12 - N11) / STEP1;
10535 count = count1 * count2 * count3;
10536 V = 0;
10537 V1 = N11;
10538 V2 = N21;
10539 V3 = N31;
10540 goto L1;
10542 BODY;
10543 V += 1;
10544 V3 += STEP3;
10545 V2 += (V3 cond3 N32) ? 0 : STEP2;
10546 V3 = (V3 cond3 N32) ? V3 : N31;
10547 V1 += (V2 cond2 N22) ? 0 : STEP1;
10548 V2 = (V2 cond2 N22) ? V2 : N21;
10550 if (V < count) goto L0; else goto L2;
10555 static void
10556 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
10558 tree type, t;
10559 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
10560 gimple_stmt_iterator gsi;
10561 gimple *stmt;
10562 gcond *cond_stmt;
10563 bool broken_loop = region->cont == NULL;
10564 edge e, ne;
10565 tree *counts = NULL;
10566 int i;
10567 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10568 OMP_CLAUSE_SAFELEN);
10569 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10570 OMP_CLAUSE__SIMDUID_);
10571 tree n1, n2;
10573 type = TREE_TYPE (fd->loop.v);
10574 entry_bb = region->entry;
10575 cont_bb = region->cont;
10576 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10577 gcc_assert (broken_loop
10578 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10579 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
10580 if (!broken_loop)
10582 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
10583 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
10584 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
10585 l2_bb = BRANCH_EDGE (entry_bb)->dest;
10587 else
10589 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
10590 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
10591 l2_bb = single_succ (l1_bb);
10593 exit_bb = region->exit;
10594 l2_dom_bb = NULL;
10596 gsi = gsi_last_bb (entry_bb);
10598 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
10599 /* Not needed in SSA form right now. */
10600 gcc_assert (!gimple_in_ssa_p (cfun));
10601 if (fd->collapse > 1)
10603 int first_zero_iter = -1, dummy = -1;
10604 basic_block zero_iter_bb = l2_bb, dummy_bb = NULL;
10606 counts = XALLOCAVEC (tree, fd->collapse);
10607 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
10608 zero_iter_bb, first_zero_iter,
10609 dummy_bb, dummy, l2_dom_bb);
10611 if (l2_dom_bb == NULL)
10612 l2_dom_bb = l1_bb;
10614 n1 = fd->loop.n1;
10615 n2 = fd->loop.n2;
10616 if (gimple_omp_for_combined_into_p (fd->for_stmt))
10618 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10619 OMP_CLAUSE__LOOPTEMP_);
10620 gcc_assert (innerc);
10621 n1 = OMP_CLAUSE_DECL (innerc);
10622 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10623 OMP_CLAUSE__LOOPTEMP_);
10624 gcc_assert (innerc);
10625 n2 = OMP_CLAUSE_DECL (innerc);
10626 expand_omp_build_assign (&gsi, fd->loop.v,
10627 fold_convert (type, n1));
10628 if (fd->collapse > 1)
10630 gsi_prev (&gsi);
10631 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
10632 gsi_next (&gsi);
10635 else
10637 expand_omp_build_assign (&gsi, fd->loop.v,
10638 fold_convert (type, fd->loop.n1));
10639 if (fd->collapse > 1)
10640 for (i = 0; i < fd->collapse; i++)
10642 tree itype = TREE_TYPE (fd->loops[i].v);
10643 if (POINTER_TYPE_P (itype))
10644 itype = signed_type_for (itype);
10645 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
10646 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10650 /* Remove the GIMPLE_OMP_FOR statement. */
10651 gsi_remove (&gsi, true);
10653 if (!broken_loop)
10655 /* Code to control the increment goes in the CONT_BB. */
10656 gsi = gsi_last_bb (cont_bb);
10657 stmt = gsi_stmt (gsi);
10658 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
10660 if (POINTER_TYPE_P (type))
10661 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
10662 else
10663 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
10664 expand_omp_build_assign (&gsi, fd->loop.v, t);
10666 if (fd->collapse > 1)
10668 i = fd->collapse - 1;
10669 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
10671 t = fold_convert (sizetype, fd->loops[i].step);
10672 t = fold_build_pointer_plus (fd->loops[i].v, t);
10674 else
10676 t = fold_convert (TREE_TYPE (fd->loops[i].v),
10677 fd->loops[i].step);
10678 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
10679 fd->loops[i].v, t);
10681 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10683 for (i = fd->collapse - 1; i > 0; i--)
10685 tree itype = TREE_TYPE (fd->loops[i].v);
10686 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
10687 if (POINTER_TYPE_P (itype2))
10688 itype2 = signed_type_for (itype2);
10689 t = build3 (COND_EXPR, itype2,
10690 build2 (fd->loops[i].cond_code, boolean_type_node,
10691 fd->loops[i].v,
10692 fold_convert (itype, fd->loops[i].n2)),
10693 build_int_cst (itype2, 0),
10694 fold_convert (itype2, fd->loops[i - 1].step));
10695 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
10696 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
10697 else
10698 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
10699 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
10701 t = build3 (COND_EXPR, itype,
10702 build2 (fd->loops[i].cond_code, boolean_type_node,
10703 fd->loops[i].v,
10704 fold_convert (itype, fd->loops[i].n2)),
10705 fd->loops[i].v,
10706 fold_convert (itype, fd->loops[i].n1));
10707 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10711 /* Remove GIMPLE_OMP_CONTINUE. */
10712 gsi_remove (&gsi, true);
10715 /* Emit the condition in L1_BB. */
10716 gsi = gsi_start_bb (l1_bb);
10718 t = fold_convert (type, n2);
10719 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10720 false, GSI_CONTINUE_LINKING);
10721 tree v = fd->loop.v;
10722 if (DECL_P (v) && TREE_ADDRESSABLE (v))
10723 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
10724 false, GSI_CONTINUE_LINKING);
10725 t = build2 (fd->loop.cond_code, boolean_type_node, v, t);
10726 cond_stmt = gimple_build_cond_empty (t);
10727 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
10728 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
10729 NULL, NULL)
10730 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
10731 NULL, NULL))
10733 gsi = gsi_for_stmt (cond_stmt);
10734 gimple_regimplify_operands (cond_stmt, &gsi);
10737 /* Remove GIMPLE_OMP_RETURN. */
10738 gsi = gsi_last_bb (exit_bb);
10739 gsi_remove (&gsi, true);
10741 /* Connect the new blocks. */
10742 remove_edge (FALLTHRU_EDGE (entry_bb));
10744 if (!broken_loop)
10746 remove_edge (BRANCH_EDGE (entry_bb));
10747 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
10749 e = BRANCH_EDGE (l1_bb);
10750 ne = FALLTHRU_EDGE (l1_bb);
10751 e->flags = EDGE_TRUE_VALUE;
10753 else
10755 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
10757 ne = single_succ_edge (l1_bb);
10758 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
10761 ne->flags = EDGE_FALSE_VALUE;
10762 e->probability = REG_BR_PROB_BASE * 7 / 8;
10763 ne->probability = REG_BR_PROB_BASE / 8;
10765 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
10766 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
10767 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
10769 if (!broken_loop)
10771 struct loop *loop = alloc_loop ();
10772 loop->header = l1_bb;
10773 loop->latch = cont_bb;
10774 add_loop (loop, l1_bb->loop_father);
10775 if (safelen == NULL_TREE)
10776 loop->safelen = INT_MAX;
10777 else
10779 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
10780 if (TREE_CODE (safelen) != INTEGER_CST)
10781 loop->safelen = 0;
10782 else if (!tree_fits_uhwi_p (safelen)
10783 || tree_to_uhwi (safelen) > INT_MAX)
10784 loop->safelen = INT_MAX;
10785 else
10786 loop->safelen = tree_to_uhwi (safelen);
10787 if (loop->safelen == 1)
10788 loop->safelen = 0;
10790 if (simduid)
10792 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
10793 cfun->has_simduid_loops = true;
10795 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
10796 the loop. */
10797 if ((flag_tree_loop_vectorize
10798 || (!global_options_set.x_flag_tree_loop_vectorize
10799 && !global_options_set.x_flag_tree_vectorize))
10800 && flag_tree_loop_optimize
10801 && loop->safelen > 1)
10803 loop->force_vectorize = true;
10804 cfun->has_force_vectorize_loops = true;
10807 else if (simduid)
10808 cfun->has_simduid_loops = true;
10811 /* Taskloop construct is represented after gimplification with
10812 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
10813 in between them. This routine expands the outer GIMPLE_OMP_FOR,
10814 which should just compute all the needed loop temporaries
10815 for GIMPLE_OMP_TASK. */
10817 static void
10818 expand_omp_taskloop_for_outer (struct omp_region *region,
10819 struct omp_for_data *fd,
10820 gimple *inner_stmt)
10822 tree type, bias = NULL_TREE;
10823 basic_block entry_bb, cont_bb, exit_bb;
10824 gimple_stmt_iterator gsi;
10825 gassign *assign_stmt;
10826 tree *counts = NULL;
10827 int i;
10829 gcc_assert (inner_stmt);
10830 gcc_assert (region->cont);
10831 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK
10832 && gimple_omp_task_taskloop_p (inner_stmt));
10833 type = TREE_TYPE (fd->loop.v);
10835 /* See if we need to bias by LLONG_MIN. */
10836 if (fd->iter_type == long_long_unsigned_type_node
10837 && TREE_CODE (type) == INTEGER_TYPE
10838 && !TYPE_UNSIGNED (type))
10840 tree n1, n2;
10842 if (fd->loop.cond_code == LT_EXPR)
10844 n1 = fd->loop.n1;
10845 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
10847 else
10849 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
10850 n2 = fd->loop.n1;
10852 if (TREE_CODE (n1) != INTEGER_CST
10853 || TREE_CODE (n2) != INTEGER_CST
10854 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
10855 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
10858 entry_bb = region->entry;
10859 cont_bb = region->cont;
10860 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10861 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10862 exit_bb = region->exit;
10864 gsi = gsi_last_bb (entry_bb);
10865 gimple *for_stmt = gsi_stmt (gsi);
10866 gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
10867 if (fd->collapse > 1)
10869 int first_zero_iter = -1, dummy = -1;
10870 basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL;
10872 counts = XALLOCAVEC (tree, fd->collapse);
10873 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
10874 zero_iter_bb, first_zero_iter,
10875 dummy_bb, dummy, l2_dom_bb);
10877 if (zero_iter_bb)
10879 /* Some counts[i] vars might be uninitialized if
10880 some loop has zero iterations. But the body shouldn't
10881 be executed in that case, so just avoid uninit warnings. */
10882 for (i = first_zero_iter; i < fd->collapse; i++)
10883 if (SSA_VAR_P (counts[i]))
10884 TREE_NO_WARNING (counts[i]) = 1;
10885 gsi_prev (&gsi);
10886 edge e = split_block (entry_bb, gsi_stmt (gsi));
10887 entry_bb = e->dest;
10888 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
10889 gsi = gsi_last_bb (entry_bb);
10890 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
10891 get_immediate_dominator (CDI_DOMINATORS,
10892 zero_iter_bb));
10896 tree t0, t1;
10897 t1 = fd->loop.n2;
10898 t0 = fd->loop.n1;
10899 if (POINTER_TYPE_P (TREE_TYPE (t0))
10900 && TYPE_PRECISION (TREE_TYPE (t0))
10901 != TYPE_PRECISION (fd->iter_type))
10903 /* Avoid casting pointers to integer of a different size. */
10904 tree itype = signed_type_for (type);
10905 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
10906 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
10908 else
10910 t1 = fold_convert (fd->iter_type, t1);
10911 t0 = fold_convert (fd->iter_type, t0);
10913 if (bias)
10915 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
10916 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
10919 tree innerc = find_omp_clause (gimple_omp_task_clauses (inner_stmt),
10920 OMP_CLAUSE__LOOPTEMP_);
10921 gcc_assert (innerc);
10922 tree startvar = OMP_CLAUSE_DECL (innerc);
10923 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
10924 gcc_assert (innerc);
10925 tree endvar = OMP_CLAUSE_DECL (innerc);
10926 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
10928 gcc_assert (innerc);
10929 for (i = 1; i < fd->collapse; i++)
10931 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10932 OMP_CLAUSE__LOOPTEMP_);
10933 gcc_assert (innerc);
10935 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10936 OMP_CLAUSE__LOOPTEMP_);
10937 if (innerc)
10939 /* If needed (inner taskloop has lastprivate clause), propagate
10940 down the total number of iterations. */
10941 tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false,
10942 NULL_TREE, false,
10943 GSI_CONTINUE_LINKING);
10944 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
10945 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10949 t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false,
10950 GSI_CONTINUE_LINKING);
10951 assign_stmt = gimple_build_assign (startvar, t0);
10952 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10954 t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false,
10955 GSI_CONTINUE_LINKING);
10956 assign_stmt = gimple_build_assign (endvar, t1);
10957 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10958 if (fd->collapse > 1)
10959 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
10961 /* Remove the GIMPLE_OMP_FOR statement. */
10962 gsi = gsi_for_stmt (for_stmt);
10963 gsi_remove (&gsi, true);
10965 gsi = gsi_last_bb (cont_bb);
10966 gsi_remove (&gsi, true);
10968 gsi = gsi_last_bb (exit_bb);
10969 gsi_remove (&gsi, true);
10971 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
10972 remove_edge (BRANCH_EDGE (entry_bb));
10973 FALLTHRU_EDGE (cont_bb)->probability = REG_BR_PROB_BASE;
10974 remove_edge (BRANCH_EDGE (cont_bb));
10975 set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb);
10976 set_immediate_dominator (CDI_DOMINATORS, region->entry,
10977 recompute_dominator (CDI_DOMINATORS, region->entry));
10980 /* Taskloop construct is represented after gimplification with
10981 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
10982 in between them. This routine expands the inner GIMPLE_OMP_FOR.
10983 GOMP_taskloop{,_ull} function arranges for each task to be given just
10984 a single range of iterations. */
10986 static void
10987 expand_omp_taskloop_for_inner (struct omp_region *region,
10988 struct omp_for_data *fd,
10989 gimple *inner_stmt)
10991 tree e, t, type, itype, vmain, vback, bias = NULL_TREE;
10992 basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL;
10993 basic_block fin_bb;
10994 gimple_stmt_iterator gsi;
10995 edge ep;
10996 bool broken_loop = region->cont == NULL;
10997 tree *counts = NULL;
10998 tree n1, n2, step;
11000 itype = type = TREE_TYPE (fd->loop.v);
11001 if (POINTER_TYPE_P (type))
11002 itype = signed_type_for (type);
11004 /* See if we need to bias by LLONG_MIN. */
11005 if (fd->iter_type == long_long_unsigned_type_node
11006 && TREE_CODE (type) == INTEGER_TYPE
11007 && !TYPE_UNSIGNED (type))
11009 tree n1, n2;
11011 if (fd->loop.cond_code == LT_EXPR)
11013 n1 = fd->loop.n1;
11014 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
11016 else
11018 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
11019 n2 = fd->loop.n1;
11021 if (TREE_CODE (n1) != INTEGER_CST
11022 || TREE_CODE (n2) != INTEGER_CST
11023 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
11024 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
11027 entry_bb = region->entry;
11028 cont_bb = region->cont;
11029 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
11030 fin_bb = BRANCH_EDGE (entry_bb)->dest;
11031 gcc_assert (broken_loop
11032 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
11033 body_bb = FALLTHRU_EDGE (entry_bb)->dest;
11034 if (!broken_loop)
11036 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
11037 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
11039 exit_bb = region->exit;
11041 /* Iteration space partitioning goes in ENTRY_BB. */
11042 gsi = gsi_last_bb (entry_bb);
11043 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
11045 if (fd->collapse > 1)
11047 int first_zero_iter = -1, dummy = -1;
11048 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
11050 counts = XALLOCAVEC (tree, fd->collapse);
11051 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
11052 fin_bb, first_zero_iter,
11053 dummy_bb, dummy, l2_dom_bb);
11054 t = NULL_TREE;
11056 else
11057 t = integer_one_node;
11059 step = fd->loop.step;
11060 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
11061 OMP_CLAUSE__LOOPTEMP_);
11062 gcc_assert (innerc);
11063 n1 = OMP_CLAUSE_DECL (innerc);
11064 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
11065 gcc_assert (innerc);
11066 n2 = OMP_CLAUSE_DECL (innerc);
11067 if (bias)
11069 n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias);
11070 n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias);
11072 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
11073 true, NULL_TREE, true, GSI_SAME_STMT);
11074 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
11075 true, NULL_TREE, true, GSI_SAME_STMT);
11076 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
11077 true, NULL_TREE, true, GSI_SAME_STMT);
11079 tree startvar = fd->loop.v;
11080 tree endvar = NULL_TREE;
11082 if (gimple_omp_for_combined_p (fd->for_stmt))
11084 tree clauses = gimple_omp_for_clauses (inner_stmt);
11085 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
11086 gcc_assert (innerc);
11087 startvar = OMP_CLAUSE_DECL (innerc);
11088 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
11089 OMP_CLAUSE__LOOPTEMP_);
11090 gcc_assert (innerc);
11091 endvar = OMP_CLAUSE_DECL (innerc);
11093 t = fold_convert (TREE_TYPE (startvar), n1);
11094 t = force_gimple_operand_gsi (&gsi, t,
11095 DECL_P (startvar)
11096 && TREE_ADDRESSABLE (startvar),
11097 NULL_TREE, false, GSI_CONTINUE_LINKING);
11098 gimple *assign_stmt = gimple_build_assign (startvar, t);
11099 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
11101 t = fold_convert (TREE_TYPE (startvar), n2);
11102 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
11103 false, GSI_CONTINUE_LINKING);
11104 if (endvar)
11106 assign_stmt = gimple_build_assign (endvar, e);
11107 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
11108 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
11109 assign_stmt = gimple_build_assign (fd->loop.v, e);
11110 else
11111 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
11112 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
11114 if (fd->collapse > 1)
11115 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
11117 if (!broken_loop)
11119 /* The code controlling the sequential loop replaces the
11120 GIMPLE_OMP_CONTINUE. */
11121 gsi = gsi_last_bb (cont_bb);
11122 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
11123 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
11124 vmain = gimple_omp_continue_control_use (cont_stmt);
11125 vback = gimple_omp_continue_control_def (cont_stmt);
11127 if (!gimple_omp_for_combined_p (fd->for_stmt))
11129 if (POINTER_TYPE_P (type))
11130 t = fold_build_pointer_plus (vmain, step);
11131 else
11132 t = fold_build2 (PLUS_EXPR, type, vmain, step);
11133 t = force_gimple_operand_gsi (&gsi, t,
11134 DECL_P (vback)
11135 && TREE_ADDRESSABLE (vback),
11136 NULL_TREE, true, GSI_SAME_STMT);
11137 assign_stmt = gimple_build_assign (vback, t);
11138 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
11140 t = build2 (fd->loop.cond_code, boolean_type_node,
11141 DECL_P (vback) && TREE_ADDRESSABLE (vback)
11142 ? t : vback, e);
11143 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
11146 /* Remove the GIMPLE_OMP_CONTINUE statement. */
11147 gsi_remove (&gsi, true);
11149 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
11150 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
11153 /* Remove the GIMPLE_OMP_FOR statement. */
11154 gsi = gsi_for_stmt (fd->for_stmt);
11155 gsi_remove (&gsi, true);
11157 /* Remove the GIMPLE_OMP_RETURN statement. */
11158 gsi = gsi_last_bb (exit_bb);
11159 gsi_remove (&gsi, true);
11161 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
11162 if (!broken_loop)
11163 remove_edge (BRANCH_EDGE (entry_bb));
11164 else
11166 remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb));
11167 region->outer->cont = NULL;
11170 /* Connect all the blocks. */
11171 if (!broken_loop)
11173 ep = find_edge (cont_bb, body_bb);
11174 if (gimple_omp_for_combined_p (fd->for_stmt))
11176 remove_edge (ep);
11177 ep = NULL;
11179 else if (fd->collapse > 1)
11181 remove_edge (ep);
11182 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
11184 else
11185 ep->flags = EDGE_TRUE_VALUE;
11186 find_edge (cont_bb, fin_bb)->flags
11187 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
11190 set_immediate_dominator (CDI_DOMINATORS, body_bb,
11191 recompute_dominator (CDI_DOMINATORS, body_bb));
11192 if (!broken_loop)
11193 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
11194 recompute_dominator (CDI_DOMINATORS, fin_bb));
11196 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
11198 struct loop *loop = alloc_loop ();
11199 loop->header = body_bb;
11200 if (collapse_bb == NULL)
11201 loop->latch = cont_bb;
11202 add_loop (loop, body_bb->loop_father);
11206 /* A subroutine of expand_omp_for. Generate code for an OpenACC
11207 partitioned loop. The lowering here is abstracted, in that the
11208 loop parameters are passed through internal functions, which are
11209 further lowered by oacc_device_lower, once we get to the target
11210 compiler. The loop is of the form:
11212 for (V = B; V LTGT E; V += S) {BODY}
11214 where LTGT is < or >. We may have a specified chunking size, CHUNKING
11215 (constant 0 for no chunking) and we will have a GWV partitioning
11216 mask, specifying dimensions over which the loop is to be
11217 partitioned (see note below). We generate code that looks like:
11219 <entry_bb> [incoming FALL->body, BRANCH->exit]
11220 typedef signedintify (typeof (V)) T; // underlying signed integral type
11221 T range = E - B;
11222 T chunk_no = 0;
11223 T DIR = LTGT == '<' ? +1 : -1;
11224 T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
11225 T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
11227 <head_bb> [created by splitting end of entry_bb]
11228 T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
11229 T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
11230 if (!(offset LTGT bound)) goto bottom_bb;
11232 <body_bb> [incoming]
11233 V = B + offset;
11234 {BODY}
11236 <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
11237 offset += step;
11238 if (offset LTGT bound) goto body_bb; [*]
11240 <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
11241 chunk_no++;
11242 if (chunk < chunk_max) goto head_bb;
11244 <exit_bb> [incoming]
11245 V = B + ((range -/+ 1) / S +/- 1) * S [*]
11247 [*] Needed if V live at end of loop
11249 Note: CHUNKING & GWV mask are specified explicitly here. This is a
11250 transition, and will be specified by a more general mechanism shortly.
11253 static void
11254 expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
11256 tree v = fd->loop.v;
11257 enum tree_code cond_code = fd->loop.cond_code;
11258 enum tree_code plus_code = PLUS_EXPR;
11260 tree chunk_size = integer_minus_one_node;
11261 tree gwv = integer_zero_node;
11262 tree iter_type = TREE_TYPE (v);
11263 tree diff_type = iter_type;
11264 tree plus_type = iter_type;
11265 struct oacc_collapse *counts = NULL;
11267 gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt)
11268 == GF_OMP_FOR_KIND_OACC_LOOP);
11269 gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt));
11270 gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR);
11272 if (POINTER_TYPE_P (iter_type))
11274 plus_code = POINTER_PLUS_EXPR;
11275 plus_type = sizetype;
11277 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
11278 diff_type = signed_type_for (diff_type);
11280 basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */
11281 basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */
11282 basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */
11283 basic_block bottom_bb = NULL;
11285 /* entry_bb has two sucessors; the branch edge is to the exit
11286 block, fallthrough edge to body. */
11287 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2
11288 && BRANCH_EDGE (entry_bb)->dest == exit_bb);
11290 /* If cont_bb non-NULL, it has 2 successors. The branch successor is
11291 body_bb, or to a block whose only successor is the body_bb. Its
11292 fallthrough successor is the final block (same as the branch
11293 successor of the entry_bb). */
11294 if (cont_bb)
11296 basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest;
11297 basic_block bed = BRANCH_EDGE (cont_bb)->dest;
11299 gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb);
11300 gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb);
11302 else
11303 gcc_assert (!gimple_in_ssa_p (cfun));
11305 /* The exit block only has entry_bb and cont_bb as predecessors. */
11306 gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL));
11308 tree chunk_no;
11309 tree chunk_max = NULL_TREE;
11310 tree bound, offset;
11311 tree step = create_tmp_var (diff_type, ".step");
11312 bool up = cond_code == LT_EXPR;
11313 tree dir = build_int_cst (diff_type, up ? +1 : -1);
11314 bool chunking = !gimple_in_ssa_p (cfun);;
11315 bool negating;
11317 /* SSA instances. */
11318 tree offset_incr = NULL_TREE;
11319 tree offset_init = NULL_TREE;
11321 gimple_stmt_iterator gsi;
11322 gassign *ass;
11323 gcall *call;
11324 gimple *stmt;
11325 tree expr;
11326 location_t loc;
11327 edge split, be, fte;
11329 /* Split the end of entry_bb to create head_bb. */
11330 split = split_block (entry_bb, last_stmt (entry_bb));
11331 basic_block head_bb = split->dest;
11332 entry_bb = split->src;
11334 /* Chunk setup goes at end of entry_bb, replacing the omp_for. */
11335 gsi = gsi_last_bb (entry_bb);
11336 gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
11337 loc = gimple_location (for_stmt);
11339 if (gimple_in_ssa_p (cfun))
11341 offset_init = gimple_omp_for_index (for_stmt, 0);
11342 gcc_assert (integer_zerop (fd->loop.n1));
11343 /* The SSA parallelizer does gang parallelism. */
11344 gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG));
11347 if (fd->collapse > 1)
11349 counts = XALLOCAVEC (struct oacc_collapse, fd->collapse);
11350 tree total = expand_oacc_collapse_init (fd, &gsi, counts,
11351 TREE_TYPE (fd->loop.n2));
11353 if (SSA_VAR_P (fd->loop.n2))
11355 total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE,
11356 true, GSI_SAME_STMT);
11357 ass = gimple_build_assign (fd->loop.n2, total);
11358 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11363 tree b = fd->loop.n1;
11364 tree e = fd->loop.n2;
11365 tree s = fd->loop.step;
11367 b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT);
11368 e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT);
11370 /* Convert the step, avoiding possible unsigned->signed overflow. */
11371 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
11372 if (negating)
11373 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
11374 s = fold_convert (diff_type, s);
11375 if (negating)
11376 s = fold_build1 (NEGATE_EXPR, diff_type, s);
11377 s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT);
11379 if (!chunking)
11380 chunk_size = integer_zero_node;
11381 expr = fold_convert (diff_type, chunk_size);
11382 chunk_size = force_gimple_operand_gsi (&gsi, expr, true,
11383 NULL_TREE, true, GSI_SAME_STMT);
11384 /* Determine the range, avoiding possible unsigned->signed overflow. */
11385 negating = !up && TYPE_UNSIGNED (iter_type);
11386 expr = fold_build2 (MINUS_EXPR, plus_type,
11387 fold_convert (plus_type, negating ? b : e),
11388 fold_convert (plus_type, negating ? e : b));
11389 expr = fold_convert (diff_type, expr);
11390 if (negating)
11391 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
11392 tree range = force_gimple_operand_gsi (&gsi, expr, true,
11393 NULL_TREE, true, GSI_SAME_STMT);
11395 chunk_no = build_int_cst (diff_type, 0);
11396 if (chunking)
11398 gcc_assert (!gimple_in_ssa_p (cfun));
11400 expr = chunk_no;
11401 chunk_max = create_tmp_var (diff_type, ".chunk_max");
11402 chunk_no = create_tmp_var (diff_type, ".chunk_no");
11404 ass = gimple_build_assign (chunk_no, expr);
11405 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11407 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
11408 build_int_cst (integer_type_node,
11409 IFN_GOACC_LOOP_CHUNKS),
11410 dir, range, s, chunk_size, gwv);
11411 gimple_call_set_lhs (call, chunk_max);
11412 gimple_set_location (call, loc);
11413 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
11415 else
11416 chunk_size = chunk_no;
11418 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
11419 build_int_cst (integer_type_node,
11420 IFN_GOACC_LOOP_STEP),
11421 dir, range, s, chunk_size, gwv);
11422 gimple_call_set_lhs (call, step);
11423 gimple_set_location (call, loc);
11424 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
11426 /* Remove the GIMPLE_OMP_FOR. */
11427 gsi_remove (&gsi, true);
11429 /* Fixup edges from head_bb */
11430 be = BRANCH_EDGE (head_bb);
11431 fte = FALLTHRU_EDGE (head_bb);
11432 be->flags |= EDGE_FALSE_VALUE;
11433 fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
11435 basic_block body_bb = fte->dest;
11437 if (gimple_in_ssa_p (cfun))
11439 gsi = gsi_last_bb (cont_bb);
11440 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
11442 offset = gimple_omp_continue_control_use (cont_stmt);
11443 offset_incr = gimple_omp_continue_control_def (cont_stmt);
11445 else
11447 offset = create_tmp_var (diff_type, ".offset");
11448 offset_init = offset_incr = offset;
11450 bound = create_tmp_var (TREE_TYPE (offset), ".bound");
11452 /* Loop offset & bound go into head_bb. */
11453 gsi = gsi_start_bb (head_bb);
11455 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
11456 build_int_cst (integer_type_node,
11457 IFN_GOACC_LOOP_OFFSET),
11458 dir, range, s,
11459 chunk_size, gwv, chunk_no);
11460 gimple_call_set_lhs (call, offset_init);
11461 gimple_set_location (call, loc);
11462 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
11464 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
11465 build_int_cst (integer_type_node,
11466 IFN_GOACC_LOOP_BOUND),
11467 dir, range, s,
11468 chunk_size, gwv, offset_init);
11469 gimple_call_set_lhs (call, bound);
11470 gimple_set_location (call, loc);
11471 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
11473 expr = build2 (cond_code, boolean_type_node, offset_init, bound);
11474 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
11475 GSI_CONTINUE_LINKING);
11477 /* V assignment goes into body_bb. */
11478 if (!gimple_in_ssa_p (cfun))
11480 gsi = gsi_start_bb (body_bb);
11482 expr = build2 (plus_code, iter_type, b,
11483 fold_convert (plus_type, offset));
11484 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11485 true, GSI_SAME_STMT);
11486 ass = gimple_build_assign (v, expr);
11487 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11488 if (fd->collapse > 1)
11489 expand_oacc_collapse_vars (fd, &gsi, counts, v);
11492 /* Loop increment goes into cont_bb. If this is not a loop, we
11493 will have spawned threads as if it was, and each one will
11494 execute one iteration. The specification is not explicit about
11495 whether such constructs are ill-formed or not, and they can
11496 occur, especially when noreturn routines are involved. */
11497 if (cont_bb)
11499 gsi = gsi_last_bb (cont_bb);
11500 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
11501 loc = gimple_location (cont_stmt);
11503 /* Increment offset. */
11504 if (gimple_in_ssa_p (cfun))
11505 expr= build2 (plus_code, iter_type, offset,
11506 fold_convert (plus_type, step));
11507 else
11508 expr = build2 (PLUS_EXPR, diff_type, offset, step);
11509 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11510 true, GSI_SAME_STMT);
11511 ass = gimple_build_assign (offset_incr, expr);
11512 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11513 expr = build2 (cond_code, boolean_type_node, offset_incr, bound);
11514 gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT);
11516 /* Remove the GIMPLE_OMP_CONTINUE. */
11517 gsi_remove (&gsi, true);
11519 /* Fixup edges from cont_bb */
11520 be = BRANCH_EDGE (cont_bb);
11521 fte = FALLTHRU_EDGE (cont_bb);
11522 be->flags |= EDGE_TRUE_VALUE;
11523 fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
11525 if (chunking)
11527 /* Split the beginning of exit_bb to make bottom_bb. We
11528 need to insert a nop at the start, because splitting is
11529 after a stmt, not before. */
11530 gsi = gsi_start_bb (exit_bb);
11531 stmt = gimple_build_nop ();
11532 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11533 split = split_block (exit_bb, stmt);
11534 bottom_bb = split->src;
11535 exit_bb = split->dest;
11536 gsi = gsi_last_bb (bottom_bb);
11538 /* Chunk increment and test goes into bottom_bb. */
11539 expr = build2 (PLUS_EXPR, diff_type, chunk_no,
11540 build_int_cst (diff_type, 1));
11541 ass = gimple_build_assign (chunk_no, expr);
11542 gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING);
11544 /* Chunk test at end of bottom_bb. */
11545 expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max);
11546 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
11547 GSI_CONTINUE_LINKING);
11549 /* Fixup edges from bottom_bb. */
11550 split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
11551 make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE);
11555 gsi = gsi_last_bb (exit_bb);
11556 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
11557 loc = gimple_location (gsi_stmt (gsi));
11559 if (!gimple_in_ssa_p (cfun))
11561 /* Insert the final value of V, in case it is live. This is the
11562 value for the only thread that survives past the join. */
11563 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
11564 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
11565 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
11566 expr = fold_build2 (MULT_EXPR, diff_type, expr, s);
11567 expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr));
11568 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11569 true, GSI_SAME_STMT);
11570 ass = gimple_build_assign (v, expr);
11571 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11574 /* Remove the OMP_RETURN. */
11575 gsi_remove (&gsi, true);
11577 if (cont_bb)
11579 /* We now have one or two nested loops. Update the loop
11580 structures. */
11581 struct loop *parent = entry_bb->loop_father;
11582 struct loop *body = body_bb->loop_father;
11584 if (chunking)
11586 struct loop *chunk_loop = alloc_loop ();
11587 chunk_loop->header = head_bb;
11588 chunk_loop->latch = bottom_bb;
11589 add_loop (chunk_loop, parent);
11590 parent = chunk_loop;
11592 else if (parent != body)
11594 gcc_assert (body->header == body_bb);
11595 gcc_assert (body->latch == cont_bb
11596 || single_pred (body->latch) == cont_bb);
11597 parent = NULL;
11600 if (parent)
11602 struct loop *body_loop = alloc_loop ();
11603 body_loop->header = body_bb;
11604 body_loop->latch = cont_bb;
11605 add_loop (body_loop, parent);
11610 /* Expand the OMP loop defined by REGION. */
11612 static void
11613 expand_omp_for (struct omp_region *region, gimple *inner_stmt)
11615 struct omp_for_data fd;
11616 struct omp_for_data_loop *loops;
11618 loops
11619 = (struct omp_for_data_loop *)
11620 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
11621 * sizeof (struct omp_for_data_loop));
11622 extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
11623 &fd, loops);
11624 region->sched_kind = fd.sched_kind;
11625 region->sched_modifiers = fd.sched_modifiers;
11627 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
11628 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
11629 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
11630 if (region->cont)
11632 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
11633 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
11634 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
11636 else
11637 /* If there isn't a continue then this is a degerate case where
11638 the introduction of abnormal edges during lowering will prevent
11639 original loops from being detected. Fix that up. */
11640 loops_state_set (LOOPS_NEED_FIXUP);
11642 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
11643 expand_omp_simd (region, &fd);
11644 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
11645 expand_cilk_for (region, &fd);
11646 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
11648 gcc_assert (!inner_stmt);
11649 expand_oacc_for (region, &fd);
11651 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP)
11653 if (gimple_omp_for_combined_into_p (fd.for_stmt))
11654 expand_omp_taskloop_for_inner (region, &fd, inner_stmt);
11655 else
11656 expand_omp_taskloop_for_outer (region, &fd, inner_stmt);
11658 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
11659 && !fd.have_ordered)
11661 if (fd.chunk_size == NULL)
11662 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
11663 else
11664 expand_omp_for_static_chunk (region, &fd, inner_stmt);
11666 else
11668 int fn_index, start_ix, next_ix;
11670 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
11671 == GF_OMP_FOR_KIND_FOR);
11672 if (fd.chunk_size == NULL
11673 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
11674 fd.chunk_size = integer_zero_node;
11675 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
11676 switch (fd.sched_kind)
11678 case OMP_CLAUSE_SCHEDULE_RUNTIME:
11679 fn_index = 3;
11680 break;
11681 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
11682 case OMP_CLAUSE_SCHEDULE_GUIDED:
11683 if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
11684 && !fd.ordered
11685 && !fd.have_ordered)
11687 fn_index = 3 + fd.sched_kind;
11688 break;
11690 /* FALLTHRU */
11691 default:
11692 fn_index = fd.sched_kind;
11693 break;
11695 if (!fd.ordered)
11696 fn_index += fd.have_ordered * 6;
11697 if (fd.ordered)
11698 start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index;
11699 else
11700 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
11701 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
11702 if (fd.iter_type == long_long_unsigned_type_node)
11704 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
11705 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
11706 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
11707 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
11709 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
11710 (enum built_in_function) next_ix, inner_stmt);
11713 if (gimple_in_ssa_p (cfun))
11714 update_ssa (TODO_update_ssa_only_virtuals);
11718 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
11720 v = GOMP_sections_start (n);
11722 switch (v)
11724 case 0:
11725 goto L2;
11726 case 1:
11727 section 1;
11728 goto L1;
11729 case 2:
11731 case n:
11733 default:
11734 abort ();
11737 v = GOMP_sections_next ();
11738 goto L0;
11740 reduction;
11742 If this is a combined parallel sections, replace the call to
11743 GOMP_sections_start with call to GOMP_sections_next. */
11745 static void
11746 expand_omp_sections (struct omp_region *region)
11748 tree t, u, vin = NULL, vmain, vnext, l2;
11749 unsigned len;
11750 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
11751 gimple_stmt_iterator si, switch_si;
11752 gomp_sections *sections_stmt;
11753 gimple *stmt;
11754 gomp_continue *cont;
11755 edge_iterator ei;
11756 edge e;
11757 struct omp_region *inner;
11758 unsigned i, casei;
11759 bool exit_reachable = region->cont != NULL;
11761 gcc_assert (region->exit != NULL);
11762 entry_bb = region->entry;
11763 l0_bb = single_succ (entry_bb);
11764 l1_bb = region->cont;
11765 l2_bb = region->exit;
11766 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
11767 l2 = gimple_block_label (l2_bb);
11768 else
11770 /* This can happen if there are reductions. */
11771 len = EDGE_COUNT (l0_bb->succs);
11772 gcc_assert (len > 0);
11773 e = EDGE_SUCC (l0_bb, len - 1);
11774 si = gsi_last_bb (e->dest);
11775 l2 = NULL_TREE;
11776 if (gsi_end_p (si)
11777 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
11778 l2 = gimple_block_label (e->dest);
11779 else
11780 FOR_EACH_EDGE (e, ei, l0_bb->succs)
11782 si = gsi_last_bb (e->dest);
11783 if (gsi_end_p (si)
11784 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
11786 l2 = gimple_block_label (e->dest);
11787 break;
11791 if (exit_reachable)
11792 default_bb = create_empty_bb (l1_bb->prev_bb);
11793 else
11794 default_bb = create_empty_bb (l0_bb);
11796 /* We will build a switch() with enough cases for all the
11797 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
11798 and a default case to abort if something goes wrong. */
11799 len = EDGE_COUNT (l0_bb->succs);
11801 /* Use vec::quick_push on label_vec throughout, since we know the size
11802 in advance. */
11803 auto_vec<tree> label_vec (len);
11805 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
11806 GIMPLE_OMP_SECTIONS statement. */
11807 si = gsi_last_bb (entry_bb);
11808 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
11809 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
11810 vin = gimple_omp_sections_control (sections_stmt);
11811 if (!is_combined_parallel (region))
11813 /* If we are not inside a combined parallel+sections region,
11814 call GOMP_sections_start. */
11815 t = build_int_cst (unsigned_type_node, len - 1);
11816 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
11817 stmt = gimple_build_call (u, 1, t);
11819 else
11821 /* Otherwise, call GOMP_sections_next. */
11822 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
11823 stmt = gimple_build_call (u, 0);
11825 gimple_call_set_lhs (stmt, vin);
11826 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11827 gsi_remove (&si, true);
11829 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
11830 L0_BB. */
11831 switch_si = gsi_last_bb (l0_bb);
11832 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
11833 if (exit_reachable)
11835 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
11836 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
11837 vmain = gimple_omp_continue_control_use (cont);
11838 vnext = gimple_omp_continue_control_def (cont);
11840 else
11842 vmain = vin;
11843 vnext = NULL_TREE;
11846 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
11847 label_vec.quick_push (t);
11848 i = 1;
11850 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
11851 for (inner = region->inner, casei = 1;
11852 inner;
11853 inner = inner->next, i++, casei++)
11855 basic_block s_entry_bb, s_exit_bb;
11857 /* Skip optional reduction region. */
11858 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
11860 --i;
11861 --casei;
11862 continue;
11865 s_entry_bb = inner->entry;
11866 s_exit_bb = inner->exit;
11868 t = gimple_block_label (s_entry_bb);
11869 u = build_int_cst (unsigned_type_node, casei);
11870 u = build_case_label (u, NULL, t);
11871 label_vec.quick_push (u);
11873 si = gsi_last_bb (s_entry_bb);
11874 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
11875 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
11876 gsi_remove (&si, true);
11877 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
11879 if (s_exit_bb == NULL)
11880 continue;
11882 si = gsi_last_bb (s_exit_bb);
11883 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
11884 gsi_remove (&si, true);
11886 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
11889 /* Error handling code goes in DEFAULT_BB. */
11890 t = gimple_block_label (default_bb);
11891 u = build_case_label (NULL, NULL, t);
11892 make_edge (l0_bb, default_bb, 0);
11893 add_bb_to_loop (default_bb, current_loops->tree_root);
11895 stmt = gimple_build_switch (vmain, u, label_vec);
11896 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
11897 gsi_remove (&switch_si, true);
11899 si = gsi_start_bb (default_bb);
11900 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
11901 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
11903 if (exit_reachable)
11905 tree bfn_decl;
11907 /* Code to get the next section goes in L1_BB. */
11908 si = gsi_last_bb (l1_bb);
11909 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
11911 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
11912 stmt = gimple_build_call (bfn_decl, 0);
11913 gimple_call_set_lhs (stmt, vnext);
11914 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11915 gsi_remove (&si, true);
11917 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
11920 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
11921 si = gsi_last_bb (l2_bb);
11922 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
11923 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
11924 else if (gimple_omp_return_lhs (gsi_stmt (si)))
11925 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
11926 else
11927 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
11928 stmt = gimple_build_call (t, 0);
11929 if (gimple_omp_return_lhs (gsi_stmt (si)))
11930 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
11931 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11932 gsi_remove (&si, true);
11934 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
11938 /* Expand code for an OpenMP single directive. We've already expanded
11939 much of the code, here we simply place the GOMP_barrier call. */
11941 static void
11942 expand_omp_single (struct omp_region *region)
11944 basic_block entry_bb, exit_bb;
11945 gimple_stmt_iterator si;
11947 entry_bb = region->entry;
11948 exit_bb = region->exit;
11950 si = gsi_last_bb (entry_bb);
11951 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
11952 gsi_remove (&si, true);
11953 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
11955 si = gsi_last_bb (exit_bb);
11956 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
11958 tree t = gimple_omp_return_lhs (gsi_stmt (si));
11959 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
11961 gsi_remove (&si, true);
11962 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
11966 /* Generic expansion for OpenMP synchronization directives: master,
11967 ordered and critical. All we need to do here is remove the entry
11968 and exit markers for REGION. */
11970 static void
11971 expand_omp_synch (struct omp_region *region)
11973 basic_block entry_bb, exit_bb;
11974 gimple_stmt_iterator si;
11976 entry_bb = region->entry;
11977 exit_bb = region->exit;
11979 si = gsi_last_bb (entry_bb);
11980 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
11981 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
11982 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
11983 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
11984 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
11985 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
11986 gsi_remove (&si, true);
11987 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
11989 if (exit_bb)
11991 si = gsi_last_bb (exit_bb);
11992 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
11993 gsi_remove (&si, true);
11994 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
11998 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
11999 operation as a normal volatile load. */
12001 static bool
12002 expand_omp_atomic_load (basic_block load_bb, tree addr,
12003 tree loaded_val, int index)
12005 enum built_in_function tmpbase;
12006 gimple_stmt_iterator gsi;
12007 basic_block store_bb;
12008 location_t loc;
12009 gimple *stmt;
12010 tree decl, call, type, itype;
12012 gsi = gsi_last_bb (load_bb);
12013 stmt = gsi_stmt (gsi);
12014 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
12015 loc = gimple_location (stmt);
12017 /* ??? If the target does not implement atomic_load_optab[mode], and mode
12018 is smaller than word size, then expand_atomic_load assumes that the load
12019 is atomic. We could avoid the builtin entirely in this case. */
12021 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
12022 decl = builtin_decl_explicit (tmpbase);
12023 if (decl == NULL_TREE)
12024 return false;
12026 type = TREE_TYPE (loaded_val);
12027 itype = TREE_TYPE (TREE_TYPE (decl));
12029 call = build_call_expr_loc (loc, decl, 2, addr,
12030 build_int_cst (NULL,
12031 gimple_omp_atomic_seq_cst_p (stmt)
12032 ? MEMMODEL_SEQ_CST
12033 : MEMMODEL_RELAXED));
12034 if (!useless_type_conversion_p (type, itype))
12035 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
12036 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
12038 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
12039 gsi_remove (&gsi, true);
12041 store_bb = single_succ (load_bb);
12042 gsi = gsi_last_bb (store_bb);
12043 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
12044 gsi_remove (&gsi, true);
12046 if (gimple_in_ssa_p (cfun))
12047 update_ssa (TODO_update_ssa_no_phi);
12049 return true;
12052 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
12053 operation as a normal volatile store. */
12055 static bool
12056 expand_omp_atomic_store (basic_block load_bb, tree addr,
12057 tree loaded_val, tree stored_val, int index)
12059 enum built_in_function tmpbase;
12060 gimple_stmt_iterator gsi;
12061 basic_block store_bb = single_succ (load_bb);
12062 location_t loc;
12063 gimple *stmt;
12064 tree decl, call, type, itype;
12065 machine_mode imode;
12066 bool exchange;
12068 gsi = gsi_last_bb (load_bb);
12069 stmt = gsi_stmt (gsi);
12070 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
12072 /* If the load value is needed, then this isn't a store but an exchange. */
12073 exchange = gimple_omp_atomic_need_value_p (stmt);
12075 gsi = gsi_last_bb (store_bb);
12076 stmt = gsi_stmt (gsi);
12077 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
12078 loc = gimple_location (stmt);
12080 /* ??? If the target does not implement atomic_store_optab[mode], and mode
12081 is smaller than word size, then expand_atomic_store assumes that the store
12082 is atomic. We could avoid the builtin entirely in this case. */
12084 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
12085 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
12086 decl = builtin_decl_explicit (tmpbase);
12087 if (decl == NULL_TREE)
12088 return false;
12090 type = TREE_TYPE (stored_val);
12092 /* Dig out the type of the function's second argument. */
12093 itype = TREE_TYPE (decl);
12094 itype = TYPE_ARG_TYPES (itype);
12095 itype = TREE_CHAIN (itype);
12096 itype = TREE_VALUE (itype);
12097 imode = TYPE_MODE (itype);
12099 if (exchange && !can_atomic_exchange_p (imode, true))
12100 return false;
12102 if (!useless_type_conversion_p (itype, type))
12103 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
12104 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
12105 build_int_cst (NULL,
12106 gimple_omp_atomic_seq_cst_p (stmt)
12107 ? MEMMODEL_SEQ_CST
12108 : MEMMODEL_RELAXED));
12109 if (exchange)
12111 if (!useless_type_conversion_p (type, itype))
12112 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
12113 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
12116 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
12117 gsi_remove (&gsi, true);
12119 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
12120 gsi = gsi_last_bb (load_bb);
12121 gsi_remove (&gsi, true);
12123 if (gimple_in_ssa_p (cfun))
12124 update_ssa (TODO_update_ssa_no_phi);
12126 return true;
12129 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
12130 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
12131 size of the data type, and thus usable to find the index of the builtin
12132 decl. Returns false if the expression is not of the proper form. */
12134 static bool
12135 expand_omp_atomic_fetch_op (basic_block load_bb,
12136 tree addr, tree loaded_val,
12137 tree stored_val, int index)
12139 enum built_in_function oldbase, newbase, tmpbase;
12140 tree decl, itype, call;
12141 tree lhs, rhs;
12142 basic_block store_bb = single_succ (load_bb);
12143 gimple_stmt_iterator gsi;
12144 gimple *stmt;
12145 location_t loc;
12146 enum tree_code code;
12147 bool need_old, need_new;
12148 machine_mode imode;
12149 bool seq_cst;
12151 /* We expect to find the following sequences:
12153 load_bb:
12154 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
12156 store_bb:
12157 val = tmp OP something; (or: something OP tmp)
12158 GIMPLE_OMP_STORE (val)
12160 ???FIXME: Allow a more flexible sequence.
12161 Perhaps use data flow to pick the statements.
12165 gsi = gsi_after_labels (store_bb);
12166 stmt = gsi_stmt (gsi);
12167 loc = gimple_location (stmt);
12168 if (!is_gimple_assign (stmt))
12169 return false;
12170 gsi_next (&gsi);
12171 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
12172 return false;
12173 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
12174 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
12175 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
12176 gcc_checking_assert (!need_old || !need_new);
12178 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
12179 return false;
12181 /* Check for one of the supported fetch-op operations. */
12182 code = gimple_assign_rhs_code (stmt);
12183 switch (code)
12185 case PLUS_EXPR:
12186 case POINTER_PLUS_EXPR:
12187 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
12188 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
12189 break;
12190 case MINUS_EXPR:
12191 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
12192 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
12193 break;
12194 case BIT_AND_EXPR:
12195 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
12196 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
12197 break;
12198 case BIT_IOR_EXPR:
12199 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
12200 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
12201 break;
12202 case BIT_XOR_EXPR:
12203 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
12204 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
12205 break;
12206 default:
12207 return false;
12210 /* Make sure the expression is of the proper form. */
12211 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
12212 rhs = gimple_assign_rhs2 (stmt);
12213 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
12214 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
12215 rhs = gimple_assign_rhs1 (stmt);
12216 else
12217 return false;
12219 tmpbase = ((enum built_in_function)
12220 ((need_new ? newbase : oldbase) + index + 1));
12221 decl = builtin_decl_explicit (tmpbase);
12222 if (decl == NULL_TREE)
12223 return false;
12224 itype = TREE_TYPE (TREE_TYPE (decl));
12225 imode = TYPE_MODE (itype);
12227 /* We could test all of the various optabs involved, but the fact of the
12228 matter is that (with the exception of i486 vs i586 and xadd) all targets
12229 that support any atomic operaton optab also implements compare-and-swap.
12230 Let optabs.c take care of expanding any compare-and-swap loop. */
12231 if (!can_compare_and_swap_p (imode, true))
12232 return false;
12234 gsi = gsi_last_bb (load_bb);
12235 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
12237 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
12238 It only requires that the operation happen atomically. Thus we can
12239 use the RELAXED memory model. */
12240 call = build_call_expr_loc (loc, decl, 3, addr,
12241 fold_convert_loc (loc, itype, rhs),
12242 build_int_cst (NULL,
12243 seq_cst ? MEMMODEL_SEQ_CST
12244 : MEMMODEL_RELAXED));
12246 if (need_old || need_new)
12248 lhs = need_old ? loaded_val : stored_val;
12249 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
12250 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
12252 else
12253 call = fold_convert_loc (loc, void_type_node, call);
12254 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
12255 gsi_remove (&gsi, true);
12257 gsi = gsi_last_bb (store_bb);
12258 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
12259 gsi_remove (&gsi, true);
12260 gsi = gsi_last_bb (store_bb);
12261 stmt = gsi_stmt (gsi);
12262 gsi_remove (&gsi, true);
12264 if (gimple_in_ssa_p (cfun))
12266 release_defs (stmt);
12267 update_ssa (TODO_update_ssa_no_phi);
12270 return true;
12273 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
12275 oldval = *addr;
12276 repeat:
12277 newval = rhs; // with oldval replacing *addr in rhs
12278 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
12279 if (oldval != newval)
12280 goto repeat;
12282 INDEX is log2 of the size of the data type, and thus usable to find the
12283 index of the builtin decl. */
12285 static bool
12286 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
12287 tree addr, tree loaded_val, tree stored_val,
12288 int index)
12290 tree loadedi, storedi, initial, new_storedi, old_vali;
12291 tree type, itype, cmpxchg, iaddr;
12292 gimple_stmt_iterator si;
12293 basic_block loop_header = single_succ (load_bb);
12294 gimple *phi, *stmt;
12295 edge e;
12296 enum built_in_function fncode;
12298 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
12299 order to use the RELAXED memory model effectively. */
12300 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
12301 + index + 1);
12302 cmpxchg = builtin_decl_explicit (fncode);
12303 if (cmpxchg == NULL_TREE)
12304 return false;
12305 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
12306 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
12308 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
12309 return false;
12311 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
12312 si = gsi_last_bb (load_bb);
12313 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
12315 /* For floating-point values, we'll need to view-convert them to integers
12316 so that we can perform the atomic compare and swap. Simplify the
12317 following code by always setting up the "i"ntegral variables. */
12318 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
12320 tree iaddr_val;
12322 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
12323 true));
12324 iaddr_val
12325 = force_gimple_operand_gsi (&si,
12326 fold_convert (TREE_TYPE (iaddr), addr),
12327 false, NULL_TREE, true, GSI_SAME_STMT);
12328 stmt = gimple_build_assign (iaddr, iaddr_val);
12329 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12330 loadedi = create_tmp_var (itype);
12331 if (gimple_in_ssa_p (cfun))
12332 loadedi = make_ssa_name (loadedi);
12334 else
12336 iaddr = addr;
12337 loadedi = loaded_val;
12340 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
12341 tree loaddecl = builtin_decl_explicit (fncode);
12342 if (loaddecl)
12343 initial
12344 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
12345 build_call_expr (loaddecl, 2, iaddr,
12346 build_int_cst (NULL_TREE,
12347 MEMMODEL_RELAXED)));
12348 else
12349 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
12350 build_int_cst (TREE_TYPE (iaddr), 0));
12352 initial
12353 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
12354 GSI_SAME_STMT);
12356 /* Move the value to the LOADEDI temporary. */
12357 if (gimple_in_ssa_p (cfun))
12359 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
12360 phi = create_phi_node (loadedi, loop_header);
12361 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
12362 initial);
12364 else
12365 gsi_insert_before (&si,
12366 gimple_build_assign (loadedi, initial),
12367 GSI_SAME_STMT);
12368 if (loadedi != loaded_val)
12370 gimple_stmt_iterator gsi2;
12371 tree x;
12373 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
12374 gsi2 = gsi_start_bb (loop_header);
12375 if (gimple_in_ssa_p (cfun))
12377 gassign *stmt;
12378 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
12379 true, GSI_SAME_STMT);
12380 stmt = gimple_build_assign (loaded_val, x);
12381 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
12383 else
12385 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
12386 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
12387 true, GSI_SAME_STMT);
12390 gsi_remove (&si, true);
12392 si = gsi_last_bb (store_bb);
12393 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
12395 if (iaddr == addr)
12396 storedi = stored_val;
12397 else
12398 storedi =
12399 force_gimple_operand_gsi (&si,
12400 build1 (VIEW_CONVERT_EXPR, itype,
12401 stored_val), true, NULL_TREE, true,
12402 GSI_SAME_STMT);
12404 /* Build the compare&swap statement. */
12405 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
12406 new_storedi = force_gimple_operand_gsi (&si,
12407 fold_convert (TREE_TYPE (loadedi),
12408 new_storedi),
12409 true, NULL_TREE,
12410 true, GSI_SAME_STMT);
12412 if (gimple_in_ssa_p (cfun))
12413 old_vali = loadedi;
12414 else
12416 old_vali = create_tmp_var (TREE_TYPE (loadedi));
12417 stmt = gimple_build_assign (old_vali, loadedi);
12418 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12420 stmt = gimple_build_assign (loadedi, new_storedi);
12421 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12424 /* Note that we always perform the comparison as an integer, even for
12425 floating point. This allows the atomic operation to properly
12426 succeed even with NaNs and -0.0. */
12427 stmt = gimple_build_cond_empty
12428 (build2 (NE_EXPR, boolean_type_node,
12429 new_storedi, old_vali));
12430 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12432 /* Update cfg. */
12433 e = single_succ_edge (store_bb);
12434 e->flags &= ~EDGE_FALLTHRU;
12435 e->flags |= EDGE_FALSE_VALUE;
12437 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
12439 /* Copy the new value to loadedi (we already did that before the condition
12440 if we are not in SSA). */
12441 if (gimple_in_ssa_p (cfun))
12443 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
12444 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
12447 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
12448 gsi_remove (&si, true);
12450 struct loop *loop = alloc_loop ();
12451 loop->header = loop_header;
12452 loop->latch = store_bb;
12453 add_loop (loop, loop_header->loop_father);
12455 if (gimple_in_ssa_p (cfun))
12456 update_ssa (TODO_update_ssa_no_phi);
12458 return true;
12461 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
12463 GOMP_atomic_start ();
12464 *addr = rhs;
12465 GOMP_atomic_end ();
12467 The result is not globally atomic, but works so long as all parallel
12468 references are within #pragma omp atomic directives. According to
12469 responses received from omp@openmp.org, appears to be within spec.
12470 Which makes sense, since that's how several other compilers handle
12471 this situation as well.
12472 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
12473 expanding. STORED_VAL is the operand of the matching
12474 GIMPLE_OMP_ATOMIC_STORE.
12476 We replace
12477 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
12478 loaded_val = *addr;
12480 and replace
12481 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
12482 *addr = stored_val;
12485 static bool
12486 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
12487 tree addr, tree loaded_val, tree stored_val)
12489 gimple_stmt_iterator si;
12490 gassign *stmt;
12491 tree t;
12493 si = gsi_last_bb (load_bb);
12494 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
12496 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
12497 t = build_call_expr (t, 0);
12498 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
12500 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
12501 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12502 gsi_remove (&si, true);
12504 si = gsi_last_bb (store_bb);
12505 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
12507 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
12508 stored_val);
12509 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12511 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
12512 t = build_call_expr (t, 0);
12513 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
12514 gsi_remove (&si, true);
12516 if (gimple_in_ssa_p (cfun))
12517 update_ssa (TODO_update_ssa_no_phi);
12518 return true;
12521 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
12522 using expand_omp_atomic_fetch_op. If it failed, we try to
12523 call expand_omp_atomic_pipeline, and if it fails too, the
12524 ultimate fallback is wrapping the operation in a mutex
12525 (expand_omp_atomic_mutex). REGION is the atomic region built
12526 by build_omp_regions_1(). */
12528 static void
12529 expand_omp_atomic (struct omp_region *region)
12531 basic_block load_bb = region->entry, store_bb = region->exit;
12532 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
12533 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
12534 tree loaded_val = gimple_omp_atomic_load_lhs (load);
12535 tree addr = gimple_omp_atomic_load_rhs (load);
12536 tree stored_val = gimple_omp_atomic_store_val (store);
12537 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
12538 HOST_WIDE_INT index;
12540 /* Make sure the type is one of the supported sizes. */
12541 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
12542 index = exact_log2 (index);
12543 if (index >= 0 && index <= 4)
12545 unsigned int align = TYPE_ALIGN_UNIT (type);
12547 /* __sync builtins require strict data alignment. */
12548 if (exact_log2 (align) >= index)
12550 /* Atomic load. */
12551 if (loaded_val == stored_val
12552 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
12553 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
12554 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
12555 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
12556 return;
12558 /* Atomic store. */
12559 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
12560 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
12561 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
12562 && store_bb == single_succ (load_bb)
12563 && first_stmt (store_bb) == store
12564 && expand_omp_atomic_store (load_bb, addr, loaded_val,
12565 stored_val, index))
12566 return;
12568 /* When possible, use specialized atomic update functions. */
12569 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
12570 && store_bb == single_succ (load_bb)
12571 && expand_omp_atomic_fetch_op (load_bb, addr,
12572 loaded_val, stored_val, index))
12573 return;
12575 /* If we don't have specialized __sync builtins, try and implement
12576 as a compare and swap loop. */
12577 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
12578 loaded_val, stored_val, index))
12579 return;
12583 /* The ultimate fallback is wrapping the operation in a mutex. */
12584 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
12588 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
12589 macro on gomp-constants.h. We do not check for overflow. */
12591 static tree
12592 oacc_launch_pack (unsigned code, tree device, unsigned op)
12594 tree res;
12596 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
12597 if (device)
12599 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
12600 device, build_int_cst (unsigned_type_node,
12601 GOMP_LAUNCH_DEVICE_SHIFT));
12602 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
12604 return res;
12607 /* Look for compute grid dimension clauses and convert to an attribute
12608 attached to FN. This permits the target-side code to (a) massage
12609 the dimensions, (b) emit that data and (c) optimize. Non-constant
12610 dimensions are pushed onto ARGS.
12612 The attribute value is a TREE_LIST. A set of dimensions is
12613 represented as a list of INTEGER_CST. Those that are runtime
12614 exprs are represented as an INTEGER_CST of zero.
12616 TOOO. Normally the attribute will just contain a single such list. If
12617 however it contains a list of lists, this will represent the use of
12618 device_type. Each member of the outer list is an assoc list of
12619 dimensions, keyed by the device type. The first entry will be the
12620 default. Well, that's the plan. */
12622 #define OACC_FN_ATTRIB "oacc function"
12624 /* Replace any existing oacc fn attribute with updated dimensions. */
12626 void
12627 replace_oacc_fn_attrib (tree fn, tree dims)
12629 tree ident = get_identifier (OACC_FN_ATTRIB);
12630 tree attribs = DECL_ATTRIBUTES (fn);
12632 /* If we happen to be present as the first attrib, drop it. */
12633 if (attribs && TREE_PURPOSE (attribs) == ident)
12634 attribs = TREE_CHAIN (attribs);
12635 DECL_ATTRIBUTES (fn) = tree_cons (ident, dims, attribs);
12638 /* Scan CLAUSES for launch dimensions and attach them to the oacc
12639 function attribute. Push any that are non-constant onto the ARGS
12640 list, along with an appropriate GOMP_LAUNCH_DIM tag. IS_KERNEL is
12641 true, if these are for a kernels region offload function. */
12643 void
12644 set_oacc_fn_attrib (tree fn, tree clauses, bool is_kernel, vec<tree> *args)
12646 /* Must match GOMP_DIM ordering. */
12647 static const omp_clause_code ids[]
12648 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
12649 OMP_CLAUSE_VECTOR_LENGTH };
12650 unsigned ix;
12651 tree dims[GOMP_DIM_MAX];
12652 tree attr = NULL_TREE;
12653 unsigned non_const = 0;
12655 for (ix = GOMP_DIM_MAX; ix--;)
12657 tree clause = find_omp_clause (clauses, ids[ix]);
12658 tree dim = NULL_TREE;
12660 if (clause)
12661 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
12662 dims[ix] = dim;
12663 if (dim && TREE_CODE (dim) != INTEGER_CST)
12665 dim = integer_zero_node;
12666 non_const |= GOMP_DIM_MASK (ix);
12668 attr = tree_cons (NULL_TREE, dim, attr);
12669 /* Note kernelness with TREE_PUBLIC. */
12670 if (is_kernel)
12671 TREE_PUBLIC (attr) = 1;
12674 replace_oacc_fn_attrib (fn, attr);
12676 if (non_const)
12678 /* Push a dynamic argument set. */
12679 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
12680 NULL_TREE, non_const));
12681 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
12682 if (non_const & GOMP_DIM_MASK (ix))
12683 args->safe_push (dims[ix]);
12687 /* Process the routine's dimension clauess to generate an attribute
12688 value. Issue diagnostics as appropriate. We default to SEQ
12689 (OpenACC 2.5 clarifies this). All dimensions have a size of zero
12690 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
12691 can have a loop partitioned on it. non-zero indicates
12692 yes, zero indicates no. By construction once a non-zero has been
12693 reached, further inner dimensions must also be non-zero. We set
12694 TREE_VALUE to zero for the dimensions that may be partitioned and
12695 1 for the other ones -- if a loop is (erroneously) spawned at
12696 an outer level, we don't want to try and partition it. */
12698 tree
12699 build_oacc_routine_dims (tree clauses)
12701 /* Must match GOMP_DIM ordering. */
12702 static const omp_clause_code ids[] =
12703 {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
12704 int ix;
12705 int level = -1;
12707 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
12708 for (ix = GOMP_DIM_MAX + 1; ix--;)
12709 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
12711 if (level >= 0)
12712 error_at (OMP_CLAUSE_LOCATION (clauses),
12713 "multiple loop axes specified for routine");
12714 level = ix;
12715 break;
12718 /* Default to SEQ. */
12719 if (level < 0)
12720 level = GOMP_DIM_MAX;
12722 tree dims = NULL_TREE;
12724 for (ix = GOMP_DIM_MAX; ix--;)
12725 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
12726 build_int_cst (integer_type_node, ix < level), dims);
12728 return dims;
12731 /* Retrieve the oacc function attrib and return it. Non-oacc
12732 functions will return NULL. */
12734 tree
12735 get_oacc_fn_attrib (tree fn)
12737 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
12740 /* Return true if this oacc fn attrib is for a kernels offload
12741 region. We use the TREE_PUBLIC flag of each dimension -- only
12742 need to check the first one. */
12744 bool
12745 oacc_fn_attrib_kernels_p (tree attr)
12747 return TREE_PUBLIC (TREE_VALUE (attr));
12750 /* Return level at which oacc routine may spawn a partitioned loop, or
12751 -1 if it is not a routine (i.e. is an offload fn). */
12753 static int
12754 oacc_fn_attrib_level (tree attr)
12756 tree pos = TREE_VALUE (attr);
12758 if (!TREE_PURPOSE (pos))
12759 return -1;
12761 int ix = 0;
12762 for (ix = 0; ix != GOMP_DIM_MAX;
12763 ix++, pos = TREE_CHAIN (pos))
12764 if (!integer_zerop (TREE_PURPOSE (pos)))
12765 break;
12767 return ix;
12770 /* Extract an oacc execution dimension from FN. FN must be an
12771 offloaded function or routine that has already had its execution
12772 dimensions lowered to the target-specific values. */
12775 get_oacc_fn_dim_size (tree fn, int axis)
12777 tree attrs = get_oacc_fn_attrib (fn);
12779 gcc_assert (axis < GOMP_DIM_MAX);
12781 tree dims = TREE_VALUE (attrs);
12782 while (axis--)
12783 dims = TREE_CHAIN (dims);
12785 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
12787 return size;
12790 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
12791 IFN_GOACC_DIM_SIZE call. */
12794 get_oacc_ifn_dim_arg (const gimple *stmt)
12796 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
12797 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
12798 tree arg = gimple_call_arg (stmt, 0);
12799 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
12801 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
12802 return (int) axis;
12805 /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
12806 at REGION_EXIT. */
12808 static void
12809 mark_loops_in_oacc_kernels_region (basic_block region_entry,
12810 basic_block region_exit)
12812 struct loop *outer = region_entry->loop_father;
12813 gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
12815 /* Don't parallelize the kernels region if it contains more than one outer
12816 loop. */
12817 unsigned int nr_outer_loops = 0;
12818 struct loop *single_outer = NULL;
12819 for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
12821 gcc_assert (loop_outer (loop) == outer);
12823 if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry))
12824 continue;
12826 if (region_exit != NULL
12827 && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit))
12828 continue;
12830 nr_outer_loops++;
12831 single_outer = loop;
12833 if (nr_outer_loops != 1)
12834 return;
12836 for (struct loop *loop = single_outer->inner; loop != NULL; loop = loop->inner)
12837 if (loop->next)
12838 return;
12840 /* Mark the loops in the region. */
12841 for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
12842 loop->in_oacc_kernels_region = true;
12845 /* Types used to pass grid and wortkgroup sizes to kernel invocation. */
12847 struct GTY(()) grid_launch_attributes_trees
12849 tree kernel_dim_array_type;
12850 tree kernel_lattrs_dimnum_decl;
12851 tree kernel_lattrs_grid_decl;
12852 tree kernel_lattrs_group_decl;
12853 tree kernel_launch_attributes_type;
12856 static GTY(()) struct grid_launch_attributes_trees *grid_attr_trees;
12858 /* Create types used to pass kernel launch attributes to target. */
12860 static void
12861 grid_create_kernel_launch_attr_types (void)
12863 if (grid_attr_trees)
12864 return;
12865 grid_attr_trees = ggc_alloc <grid_launch_attributes_trees> ();
12867 tree dim_arr_index_type
12868 = build_index_type (build_int_cst (integer_type_node, 2));
12869 grid_attr_trees->kernel_dim_array_type
12870 = build_array_type (uint32_type_node, dim_arr_index_type);
12872 grid_attr_trees->kernel_launch_attributes_type = make_node (RECORD_TYPE);
12873 grid_attr_trees->kernel_lattrs_dimnum_decl
12874 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("ndim"),
12875 uint32_type_node);
12876 DECL_CHAIN (grid_attr_trees->kernel_lattrs_dimnum_decl) = NULL_TREE;
12878 grid_attr_trees->kernel_lattrs_grid_decl
12879 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("grid_size"),
12880 grid_attr_trees->kernel_dim_array_type);
12881 DECL_CHAIN (grid_attr_trees->kernel_lattrs_grid_decl)
12882 = grid_attr_trees->kernel_lattrs_dimnum_decl;
12883 grid_attr_trees->kernel_lattrs_group_decl
12884 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_size"),
12885 grid_attr_trees->kernel_dim_array_type);
12886 DECL_CHAIN (grid_attr_trees->kernel_lattrs_group_decl)
12887 = grid_attr_trees->kernel_lattrs_grid_decl;
12888 finish_builtin_struct (grid_attr_trees->kernel_launch_attributes_type,
12889 "__gomp_kernel_launch_attributes",
12890 grid_attr_trees->kernel_lattrs_group_decl, NULL_TREE);
12893 /* Insert before the current statement in GSI a store of VALUE to INDEX of
12894 array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be
12895 of type uint32_type_node. */
12897 static void
12898 grid_insert_store_range_dim (gimple_stmt_iterator *gsi, tree range_var,
12899 tree fld_decl, int index, tree value)
12901 tree ref = build4 (ARRAY_REF, uint32_type_node,
12902 build3 (COMPONENT_REF,
12903 grid_attr_trees->kernel_dim_array_type,
12904 range_var, fld_decl, NULL_TREE),
12905 build_int_cst (integer_type_node, index),
12906 NULL_TREE, NULL_TREE);
12907 gsi_insert_before (gsi, gimple_build_assign (ref, value), GSI_SAME_STMT);
12910 /* Return a tree representation of a pointer to a structure with grid and
12911 work-group size information. Statements filling that information will be
12912 inserted before GSI, TGT_STMT is the target statement which has the
12913 necessary information in it. */
12915 static tree
12916 grid_get_kernel_launch_attributes (gimple_stmt_iterator *gsi,
12917 gomp_target *tgt_stmt)
12919 grid_create_kernel_launch_attr_types ();
12920 tree u32_one = build_one_cst (uint32_type_node);
12921 tree lattrs = create_tmp_var (grid_attr_trees->kernel_launch_attributes_type,
12922 "__kernel_launch_attrs");
12924 unsigned max_dim = 0;
12925 for (tree clause = gimple_omp_target_clauses (tgt_stmt);
12926 clause;
12927 clause = OMP_CLAUSE_CHAIN (clause))
12929 if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE__GRIDDIM_)
12930 continue;
12932 unsigned dim = OMP_CLAUSE__GRIDDIM__DIMENSION (clause);
12933 max_dim = MAX (dim, max_dim);
12935 grid_insert_store_range_dim (gsi, lattrs,
12936 grid_attr_trees->kernel_lattrs_grid_decl,
12937 dim, OMP_CLAUSE__GRIDDIM__SIZE (clause));
12938 grid_insert_store_range_dim (gsi, lattrs,
12939 grid_attr_trees->kernel_lattrs_group_decl,
12940 dim, OMP_CLAUSE__GRIDDIM__GROUP (clause));
12943 tree dimref = build3 (COMPONENT_REF, uint32_type_node, lattrs,
12944 grid_attr_trees->kernel_lattrs_dimnum_decl, NULL_TREE);
12945 /* At this moment we cannot gridify a loop with a collapse clause. */
12946 /* TODO: Adjust when we support bigger collapse. */
12947 gcc_assert (max_dim == 0);
12948 gsi_insert_before (gsi, gimple_build_assign (dimref, u32_one), GSI_SAME_STMT);
12949 TREE_ADDRESSABLE (lattrs) = 1;
12950 return build_fold_addr_expr (lattrs);
12953 /* Build target argument identifier from the DEVICE identifier, value
12954 identifier ID and whether the element also has a SUBSEQUENT_PARAM. */
12956 static tree
12957 get_target_argument_identifier_1 (int device, bool subseqent_param, int id)
12959 tree t = build_int_cst (integer_type_node, device);
12960 if (subseqent_param)
12961 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
12962 build_int_cst (integer_type_node,
12963 GOMP_TARGET_ARG_SUBSEQUENT_PARAM));
12964 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
12965 build_int_cst (integer_type_node, id));
12966 return t;
12969 /* Like above but return it in type that can be directly stored as an element
12970 of the argument array. */
12972 static tree
12973 get_target_argument_identifier (int device, bool subseqent_param, int id)
12975 tree t = get_target_argument_identifier_1 (device, subseqent_param, id);
12976 return fold_convert (ptr_type_node, t);
12979 /* Return a target argument consisting of DEVICE identifier, value identifier
12980 ID, and the actual VALUE. */
12982 static tree
12983 get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id,
12984 tree value)
12986 tree t = fold_build2 (LSHIFT_EXPR, integer_type_node,
12987 fold_convert (integer_type_node, value),
12988 build_int_cst (unsigned_type_node,
12989 GOMP_TARGET_ARG_VALUE_SHIFT));
12990 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
12991 get_target_argument_identifier_1 (device, false, id));
12992 t = fold_convert (ptr_type_node, t);
12993 return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT);
12996 /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15,
12997 push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it,
12998 otherwise push an identifier (with DEVICE and ID) and the VALUE in two
12999 arguments. */
13001 static void
13002 push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device,
13003 int id, tree value, vec <tree> *args)
13005 if (tree_fits_shwi_p (value)
13006 && tree_to_shwi (value) > -(1 << 15)
13007 && tree_to_shwi (value) < (1 << 15))
13008 args->quick_push (get_target_argument_value (gsi, device, id, value));
13009 else
13011 args->quick_push (get_target_argument_identifier (device, true, id));
13012 value = fold_convert (ptr_type_node, value);
13013 value = force_gimple_operand_gsi (gsi, value, true, NULL, true,
13014 GSI_SAME_STMT);
13015 args->quick_push (value);
13019 /* Create an array of arguments that is then passed to GOMP_target. */
13021 static tree
13022 get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt)
13024 auto_vec <tree, 6> args;
13025 tree clauses = gimple_omp_target_clauses (tgt_stmt);
13026 tree t, c = find_omp_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
13027 if (c)
13028 t = OMP_CLAUSE_NUM_TEAMS_EXPR (c);
13029 else
13030 t = integer_minus_one_node;
13031 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
13032 GOMP_TARGET_ARG_NUM_TEAMS, t, &args);
13034 c = find_omp_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
13035 if (c)
13036 t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c);
13037 else
13038 t = integer_minus_one_node;
13039 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
13040 GOMP_TARGET_ARG_THREAD_LIMIT, t,
13041 &args);
13043 /* Add HSA-specific grid sizes, if available. */
13044 if (find_omp_clause (gimple_omp_target_clauses (tgt_stmt),
13045 OMP_CLAUSE__GRIDDIM_))
13047 t = get_target_argument_identifier (GOMP_DEVICE_HSA, true,
13048 GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES);
13049 args.quick_push (t);
13050 args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt));
13053 /* Produce more, perhaps device specific, arguments here. */
13055 tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node,
13056 args.length () + 1),
13057 ".omp_target_args");
13058 for (unsigned i = 0; i < args.length (); i++)
13060 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
13061 build_int_cst (integer_type_node, i),
13062 NULL_TREE, NULL_TREE);
13063 gsi_insert_before (gsi, gimple_build_assign (ref, args[i]),
13064 GSI_SAME_STMT);
13066 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
13067 build_int_cst (integer_type_node, args.length ()),
13068 NULL_TREE, NULL_TREE);
13069 gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node),
13070 GSI_SAME_STMT);
13071 TREE_ADDRESSABLE (argarray) = 1;
13072 return build_fold_addr_expr (argarray);
13075 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
13077 static void
13078 expand_omp_target (struct omp_region *region)
13080 basic_block entry_bb, exit_bb, new_bb;
13081 struct function *child_cfun;
13082 tree child_fn, block, t;
13083 gimple_stmt_iterator gsi;
13084 gomp_target *entry_stmt;
13085 gimple *stmt;
13086 edge e;
13087 bool offloaded, data_region;
13089 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
13090 new_bb = region->entry;
13092 offloaded = is_gimple_omp_offloaded (entry_stmt);
13093 switch (gimple_omp_target_kind (entry_stmt))
13095 case GF_OMP_TARGET_KIND_REGION:
13096 case GF_OMP_TARGET_KIND_UPDATE:
13097 case GF_OMP_TARGET_KIND_ENTER_DATA:
13098 case GF_OMP_TARGET_KIND_EXIT_DATA:
13099 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
13100 case GF_OMP_TARGET_KIND_OACC_KERNELS:
13101 case GF_OMP_TARGET_KIND_OACC_UPDATE:
13102 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
13103 case GF_OMP_TARGET_KIND_OACC_DECLARE:
13104 data_region = false;
13105 break;
13106 case GF_OMP_TARGET_KIND_DATA:
13107 case GF_OMP_TARGET_KIND_OACC_DATA:
13108 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
13109 data_region = true;
13110 break;
13111 default:
13112 gcc_unreachable ();
13115 child_fn = NULL_TREE;
13116 child_cfun = NULL;
13117 if (offloaded)
13119 child_fn = gimple_omp_target_child_fn (entry_stmt);
13120 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
13123 /* Supported by expand_omp_taskreg, but not here. */
13124 if (child_cfun != NULL)
13125 gcc_checking_assert (!child_cfun->cfg);
13126 gcc_checking_assert (!gimple_in_ssa_p (cfun));
13128 entry_bb = region->entry;
13129 exit_bb = region->exit;
13131 if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
13132 mark_loops_in_oacc_kernels_region (region->entry, region->exit);
13134 if (offloaded)
13136 unsigned srcidx, dstidx, num;
13138 /* If the offloading region needs data sent from the parent
13139 function, then the very first statement (except possible
13140 tree profile counter updates) of the offloading body
13141 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
13142 &.OMP_DATA_O is passed as an argument to the child function,
13143 we need to replace it with the argument as seen by the child
13144 function.
13146 In most cases, this will end up being the identity assignment
13147 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
13148 a function call that has been inlined, the original PARM_DECL
13149 .OMP_DATA_I may have been converted into a different local
13150 variable. In which case, we need to keep the assignment. */
13151 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
13152 if (data_arg)
13154 basic_block entry_succ_bb = single_succ (entry_bb);
13155 gimple_stmt_iterator gsi;
13156 tree arg;
13157 gimple *tgtcopy_stmt = NULL;
13158 tree sender = TREE_VEC_ELT (data_arg, 0);
13160 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
13162 gcc_assert (!gsi_end_p (gsi));
13163 stmt = gsi_stmt (gsi);
13164 if (gimple_code (stmt) != GIMPLE_ASSIGN)
13165 continue;
13167 if (gimple_num_ops (stmt) == 2)
13169 tree arg = gimple_assign_rhs1 (stmt);
13171 /* We're ignoring the subcode because we're
13172 effectively doing a STRIP_NOPS. */
13174 if (TREE_CODE (arg) == ADDR_EXPR
13175 && TREE_OPERAND (arg, 0) == sender)
13177 tgtcopy_stmt = stmt;
13178 break;
13183 gcc_assert (tgtcopy_stmt != NULL);
13184 arg = DECL_ARGUMENTS (child_fn);
13186 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
13187 gsi_remove (&gsi, true);
13190 /* Declare local variables needed in CHILD_CFUN. */
13191 block = DECL_INITIAL (child_fn);
13192 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
13193 /* The gimplifier could record temporaries in the offloading block
13194 rather than in containing function's local_decls chain,
13195 which would mean cgraph missed finalizing them. Do it now. */
13196 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
13197 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
13198 varpool_node::finalize_decl (t);
13199 DECL_SAVED_TREE (child_fn) = NULL;
13200 /* We'll create a CFG for child_fn, so no gimple body is needed. */
13201 gimple_set_body (child_fn, NULL);
13202 TREE_USED (block) = 1;
13204 /* Reset DECL_CONTEXT on function arguments. */
13205 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
13206 DECL_CONTEXT (t) = child_fn;
13208 /* Split ENTRY_BB at GIMPLE_*,
13209 so that it can be moved to the child function. */
13210 gsi = gsi_last_bb (entry_bb);
13211 stmt = gsi_stmt (gsi);
13212 gcc_assert (stmt
13213 && gimple_code (stmt) == gimple_code (entry_stmt));
13214 e = split_block (entry_bb, stmt);
13215 gsi_remove (&gsi, true);
13216 entry_bb = e->dest;
13217 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
13219 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
13220 if (exit_bb)
13222 gsi = gsi_last_bb (exit_bb);
13223 gcc_assert (!gsi_end_p (gsi)
13224 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
13225 stmt = gimple_build_return (NULL);
13226 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
13227 gsi_remove (&gsi, true);
13230 /* Move the offloading region into CHILD_CFUN. */
13232 block = gimple_block (entry_stmt);
13234 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
13235 if (exit_bb)
13236 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
13237 /* When the OMP expansion process cannot guarantee an up-to-date
13238 loop tree arrange for the child function to fixup loops. */
13239 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
13240 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
13242 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
13243 num = vec_safe_length (child_cfun->local_decls);
13244 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
13246 t = (*child_cfun->local_decls)[srcidx];
13247 if (DECL_CONTEXT (t) == cfun->decl)
13248 continue;
13249 if (srcidx != dstidx)
13250 (*child_cfun->local_decls)[dstidx] = t;
13251 dstidx++;
13253 if (dstidx != num)
13254 vec_safe_truncate (child_cfun->local_decls, dstidx);
13256 /* Inform the callgraph about the new function. */
13257 child_cfun->curr_properties = cfun->curr_properties;
13258 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
13259 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
13260 cgraph_node *node = cgraph_node::get_create (child_fn);
13261 node->parallelized_function = 1;
13262 cgraph_node::add_new_function (child_fn, true);
13264 /* Add the new function to the offload table. */
13265 if (ENABLE_OFFLOADING)
13266 vec_safe_push (offload_funcs, child_fn);
13268 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
13269 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
13271 /* Fix the callgraph edges for child_cfun. Those for cfun will be
13272 fixed in a following pass. */
13273 push_cfun (child_cfun);
13274 if (need_asm)
13275 assign_assembler_name_if_neeeded (child_fn);
13276 cgraph_edge::rebuild_edges ();
13278 /* Some EH regions might become dead, see PR34608. If
13279 pass_cleanup_cfg isn't the first pass to happen with the
13280 new child, these dead EH edges might cause problems.
13281 Clean them up now. */
13282 if (flag_exceptions)
13284 basic_block bb;
13285 bool changed = false;
13287 FOR_EACH_BB_FN (bb, cfun)
13288 changed |= gimple_purge_dead_eh_edges (bb);
13289 if (changed)
13290 cleanup_tree_cfg ();
13292 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
13293 verify_loop_structure ();
13294 pop_cfun ();
13296 if (dump_file && !gimple_in_ssa_p (cfun))
13298 omp_any_child_fn_dumped = true;
13299 dump_function_header (dump_file, child_fn, dump_flags);
13300 dump_function_to_file (child_fn, dump_file, dump_flags);
13304 /* Emit a library call to launch the offloading region, or do data
13305 transfers. */
13306 tree t1, t2, t3, t4, device, cond, depend, c, clauses;
13307 enum built_in_function start_ix;
13308 location_t clause_loc;
13309 unsigned int flags_i = 0;
13310 bool oacc_kernels_p = false;
13312 switch (gimple_omp_target_kind (entry_stmt))
13314 case GF_OMP_TARGET_KIND_REGION:
13315 start_ix = BUILT_IN_GOMP_TARGET;
13316 break;
13317 case GF_OMP_TARGET_KIND_DATA:
13318 start_ix = BUILT_IN_GOMP_TARGET_DATA;
13319 break;
13320 case GF_OMP_TARGET_KIND_UPDATE:
13321 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
13322 break;
13323 case GF_OMP_TARGET_KIND_ENTER_DATA:
13324 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
13325 break;
13326 case GF_OMP_TARGET_KIND_EXIT_DATA:
13327 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
13328 flags_i |= GOMP_TARGET_FLAG_EXIT_DATA;
13329 break;
13330 case GF_OMP_TARGET_KIND_OACC_KERNELS:
13331 oacc_kernels_p = true;
13332 /* FALLTHROUGH */
13333 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
13334 start_ix = BUILT_IN_GOACC_PARALLEL;
13335 break;
13336 case GF_OMP_TARGET_KIND_OACC_DATA:
13337 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
13338 start_ix = BUILT_IN_GOACC_DATA_START;
13339 break;
13340 case GF_OMP_TARGET_KIND_OACC_UPDATE:
13341 start_ix = BUILT_IN_GOACC_UPDATE;
13342 break;
13343 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
13344 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
13345 break;
13346 case GF_OMP_TARGET_KIND_OACC_DECLARE:
13347 start_ix = BUILT_IN_GOACC_DECLARE;
13348 break;
13349 default:
13350 gcc_unreachable ();
13353 clauses = gimple_omp_target_clauses (entry_stmt);
13355 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
13356 library choose) and there is no conditional. */
13357 cond = NULL_TREE;
13358 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
13360 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
13361 if (c)
13362 cond = OMP_CLAUSE_IF_EXPR (c);
13364 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
13365 if (c)
13367 /* Even if we pass it to all library function calls, it is currently only
13368 defined/used for the OpenMP target ones. */
13369 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
13370 || start_ix == BUILT_IN_GOMP_TARGET_DATA
13371 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE
13372 || start_ix == BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA);
13374 device = OMP_CLAUSE_DEVICE_ID (c);
13375 clause_loc = OMP_CLAUSE_LOCATION (c);
13377 else
13378 clause_loc = gimple_location (entry_stmt);
13380 c = find_omp_clause (clauses, OMP_CLAUSE_NOWAIT);
13381 if (c)
13382 flags_i |= GOMP_TARGET_FLAG_NOWAIT;
13384 /* Ensure 'device' is of the correct type. */
13385 device = fold_convert_loc (clause_loc, integer_type_node, device);
13387 /* If we found the clause 'if (cond)', build
13388 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
13389 if (cond)
13391 cond = gimple_boolify (cond);
13393 basic_block cond_bb, then_bb, else_bb;
13394 edge e;
13395 tree tmp_var;
13397 tmp_var = create_tmp_var (TREE_TYPE (device));
13398 if (offloaded)
13399 e = split_block_after_labels (new_bb);
13400 else
13402 gsi = gsi_last_bb (new_bb);
13403 gsi_prev (&gsi);
13404 e = split_block (new_bb, gsi_stmt (gsi));
13406 cond_bb = e->src;
13407 new_bb = e->dest;
13408 remove_edge (e);
13410 then_bb = create_empty_bb (cond_bb);
13411 else_bb = create_empty_bb (then_bb);
13412 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
13413 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
13415 stmt = gimple_build_cond_empty (cond);
13416 gsi = gsi_last_bb (cond_bb);
13417 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
13419 gsi = gsi_start_bb (then_bb);
13420 stmt = gimple_build_assign (tmp_var, device);
13421 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
13423 gsi = gsi_start_bb (else_bb);
13424 stmt = gimple_build_assign (tmp_var,
13425 build_int_cst (integer_type_node,
13426 GOMP_DEVICE_HOST_FALLBACK));
13427 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
13429 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
13430 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
13431 add_bb_to_loop (then_bb, cond_bb->loop_father);
13432 add_bb_to_loop (else_bb, cond_bb->loop_father);
13433 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
13434 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
13436 device = tmp_var;
13437 gsi = gsi_last_bb (new_bb);
13439 else
13441 gsi = gsi_last_bb (new_bb);
13442 device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE,
13443 true, GSI_SAME_STMT);
13446 t = gimple_omp_target_data_arg (entry_stmt);
13447 if (t == NULL)
13449 t1 = size_zero_node;
13450 t2 = build_zero_cst (ptr_type_node);
13451 t3 = t2;
13452 t4 = t2;
13454 else
13456 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
13457 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
13458 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
13459 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
13460 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
13463 gimple *g;
13464 bool tagging = false;
13465 /* The maximum number used by any start_ix, without varargs. */
13466 auto_vec<tree, 11> args;
13467 args.quick_push (device);
13468 if (offloaded)
13469 args.quick_push (build_fold_addr_expr (child_fn));
13470 args.quick_push (t1);
13471 args.quick_push (t2);
13472 args.quick_push (t3);
13473 args.quick_push (t4);
13474 switch (start_ix)
13476 case BUILT_IN_GOACC_DATA_START:
13477 case BUILT_IN_GOACC_DECLARE:
13478 case BUILT_IN_GOMP_TARGET_DATA:
13479 break;
13480 case BUILT_IN_GOMP_TARGET:
13481 case BUILT_IN_GOMP_TARGET_UPDATE:
13482 case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA:
13483 args.quick_push (build_int_cst (unsigned_type_node, flags_i));
13484 c = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
13485 if (c)
13486 depend = OMP_CLAUSE_DECL (c);
13487 else
13488 depend = build_int_cst (ptr_type_node, 0);
13489 args.quick_push (depend);
13490 if (start_ix == BUILT_IN_GOMP_TARGET)
13491 args.quick_push (get_target_arguments (&gsi, entry_stmt));
13492 break;
13493 case BUILT_IN_GOACC_PARALLEL:
13495 set_oacc_fn_attrib (child_fn, clauses, oacc_kernels_p, &args);
13496 tagging = true;
13498 /* FALLTHRU */
13499 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
13500 case BUILT_IN_GOACC_UPDATE:
13502 tree t_async = NULL_TREE;
13504 /* If present, use the value specified by the respective
13505 clause, making sure that is of the correct type. */
13506 c = find_omp_clause (clauses, OMP_CLAUSE_ASYNC);
13507 if (c)
13508 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
13509 integer_type_node,
13510 OMP_CLAUSE_ASYNC_EXPR (c));
13511 else if (!tagging)
13512 /* Default values for t_async. */
13513 t_async = fold_convert_loc (gimple_location (entry_stmt),
13514 integer_type_node,
13515 build_int_cst (integer_type_node,
13516 GOMP_ASYNC_SYNC));
13517 if (tagging && t_async)
13519 unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX;
13521 if (TREE_CODE (t_async) == INTEGER_CST)
13523 /* See if we can pack the async arg in to the tag's
13524 operand. */
13525 i_async = TREE_INT_CST_LOW (t_async);
13526 if (i_async < GOMP_LAUNCH_OP_MAX)
13527 t_async = NULL_TREE;
13528 else
13529 i_async = GOMP_LAUNCH_OP_MAX;
13531 args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE,
13532 i_async));
13534 if (t_async)
13535 args.safe_push (t_async);
13537 /* Save the argument index, and ... */
13538 unsigned t_wait_idx = args.length ();
13539 unsigned num_waits = 0;
13540 c = find_omp_clause (clauses, OMP_CLAUSE_WAIT);
13541 if (!tagging || c)
13542 /* ... push a placeholder. */
13543 args.safe_push (integer_zero_node);
13545 for (; c; c = OMP_CLAUSE_CHAIN (c))
13546 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
13548 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
13549 integer_type_node,
13550 OMP_CLAUSE_WAIT_EXPR (c)));
13551 num_waits++;
13554 if (!tagging || num_waits)
13556 tree len;
13558 /* Now that we know the number, update the placeholder. */
13559 if (tagging)
13560 len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits);
13561 else
13562 len = build_int_cst (integer_type_node, num_waits);
13563 len = fold_convert_loc (gimple_location (entry_stmt),
13564 unsigned_type_node, len);
13565 args[t_wait_idx] = len;
13568 break;
13569 default:
13570 gcc_unreachable ();
13572 if (tagging)
13573 /* Push terminal marker - zero. */
13574 args.safe_push (oacc_launch_pack (0, NULL_TREE, 0));
13576 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
13577 gimple_set_location (g, gimple_location (entry_stmt));
13578 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
13579 if (!offloaded)
13581 g = gsi_stmt (gsi);
13582 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
13583 gsi_remove (&gsi, true);
13585 if (data_region && region->exit)
13587 gsi = gsi_last_bb (region->exit);
13588 g = gsi_stmt (gsi);
13589 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
13590 gsi_remove (&gsi, true);
13594 /* Expand KFOR loop as a GPGPU kernel, i.e. as a body only with iteration
13595 variable derived from the thread number. */
13597 static void
13598 grid_expand_omp_for_loop (struct omp_region *kfor)
13600 tree t, threadid;
13601 tree type, itype;
13602 gimple_stmt_iterator gsi;
13603 tree n1, step;
13604 struct omp_for_data fd;
13606 gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry));
13607 gcc_checking_assert (gimple_omp_for_kind (for_stmt)
13608 == GF_OMP_FOR_KIND_GRID_LOOP);
13609 basic_block body_bb = FALLTHRU_EDGE (kfor->entry)->dest;
13611 gcc_assert (gimple_omp_for_collapse (for_stmt) == 1);
13612 gcc_assert (kfor->cont);
13613 extract_omp_for_data (for_stmt, &fd, NULL);
13615 itype = type = TREE_TYPE (fd.loop.v);
13616 if (POINTER_TYPE_P (type))
13617 itype = signed_type_for (type);
13619 gsi = gsi_start_bb (body_bb);
13621 n1 = fd.loop.n1;
13622 step = fd.loop.step;
13623 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
13624 true, NULL_TREE, true, GSI_SAME_STMT);
13625 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
13626 true, NULL_TREE, true, GSI_SAME_STMT);
13627 threadid = build_call_expr (builtin_decl_explicit
13628 (BUILT_IN_OMP_GET_THREAD_NUM), 0);
13629 threadid = fold_convert (itype, threadid);
13630 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
13631 true, GSI_SAME_STMT);
13633 tree startvar = fd.loop.v;
13634 t = fold_build2 (MULT_EXPR, itype, threadid, step);
13635 if (POINTER_TYPE_P (type))
13636 t = fold_build_pointer_plus (n1, t);
13637 else
13638 t = fold_build2 (PLUS_EXPR, type, t, n1);
13639 t = fold_convert (type, t);
13640 t = force_gimple_operand_gsi (&gsi, t,
13641 DECL_P (startvar)
13642 && TREE_ADDRESSABLE (startvar),
13643 NULL_TREE, true, GSI_SAME_STMT);
13644 gassign *assign_stmt = gimple_build_assign (startvar, t);
13645 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
13647 /* Remove the omp for statement */
13648 gsi = gsi_last_bb (kfor->entry);
13649 gsi_remove (&gsi, true);
13651 /* Remove the GIMPLE_OMP_CONTINUE statement. */
13652 gsi = gsi_last_bb (kfor->cont);
13653 gcc_assert (!gsi_end_p (gsi)
13654 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE);
13655 gsi_remove (&gsi, true);
13657 /* Replace the GIMPLE_OMP_RETURN with a real return. */
13658 gsi = gsi_last_bb (kfor->exit);
13659 gcc_assert (!gsi_end_p (gsi)
13660 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
13661 gsi_remove (&gsi, true);
13663 /* Fixup the much simpler CFG. */
13664 remove_edge (find_edge (kfor->cont, body_bb));
13666 if (kfor->cont != body_bb)
13667 set_immediate_dominator (CDI_DOMINATORS, kfor->cont, body_bb);
13668 set_immediate_dominator (CDI_DOMINATORS, kfor->exit, kfor->cont);
13671 /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap
13672 argument_decls. */
13674 struct grid_arg_decl_map
13676 tree old_arg;
13677 tree new_arg;
13680 /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones
13681 pertaining to kernel function. */
13683 static tree
13684 grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data)
13686 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
13687 struct grid_arg_decl_map *adm = (struct grid_arg_decl_map *) wi->info;
13688 tree t = *tp;
13690 if (t == adm->old_arg)
13691 *tp = adm->new_arg;
13692 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
13693 return NULL_TREE;
13696 static void expand_omp (struct omp_region *region);
13698 /* If TARGET region contains a kernel body for loop, remove its region from the
13699 TARGET and expand it in GPGPU kernel fashion. */
13701 static void
13702 grid_expand_target_grid_body (struct omp_region *target)
13704 if (!hsa_gen_requested_p ())
13705 return;
13707 gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (target->entry));
13708 struct omp_region **pp;
13710 for (pp = &target->inner; *pp; pp = &(*pp)->next)
13711 if ((*pp)->type == GIMPLE_OMP_GRID_BODY)
13712 break;
13714 struct omp_region *gpukernel = *pp;
13716 tree orig_child_fndecl = gimple_omp_target_child_fn (tgt_stmt);
13717 if (!gpukernel)
13719 /* HSA cannot handle OACC stuff. */
13720 if (gimple_omp_target_kind (tgt_stmt) != GF_OMP_TARGET_KIND_REGION)
13721 return;
13722 gcc_checking_assert (orig_child_fndecl);
13723 gcc_assert (!find_omp_clause (gimple_omp_target_clauses (tgt_stmt),
13724 OMP_CLAUSE__GRIDDIM_));
13725 cgraph_node *n = cgraph_node::get (orig_child_fndecl);
13727 hsa_register_kernel (n);
13728 return;
13731 gcc_assert (find_omp_clause (gimple_omp_target_clauses (tgt_stmt),
13732 OMP_CLAUSE__GRIDDIM_));
13733 tree inside_block = gimple_block (first_stmt (single_succ (gpukernel->entry)));
13734 *pp = gpukernel->next;
13735 for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next)
13736 if ((*pp)->type == GIMPLE_OMP_FOR)
13737 break;
13739 struct omp_region *kfor = *pp;
13740 gcc_assert (kfor);
13741 gcc_assert (gimple_omp_for_kind (last_stmt ((kfor)->entry))
13742 == GF_OMP_FOR_KIND_GRID_LOOP);
13743 *pp = kfor->next;
13744 if (kfor->inner)
13745 expand_omp (kfor->inner);
13746 if (gpukernel->inner)
13747 expand_omp (gpukernel->inner);
13749 tree kern_fndecl = copy_node (orig_child_fndecl);
13750 DECL_NAME (kern_fndecl) = clone_function_name (kern_fndecl, "kernel");
13751 SET_DECL_ASSEMBLER_NAME (kern_fndecl, DECL_NAME (kern_fndecl));
13752 tree tgtblock = gimple_block (tgt_stmt);
13753 tree fniniblock = make_node (BLOCK);
13754 BLOCK_ABSTRACT_ORIGIN (fniniblock) = tgtblock;
13755 BLOCK_SOURCE_LOCATION (fniniblock) = BLOCK_SOURCE_LOCATION (tgtblock);
13756 BLOCK_SOURCE_END_LOCATION (fniniblock) = BLOCK_SOURCE_END_LOCATION (tgtblock);
13757 BLOCK_SUPERCONTEXT (fniniblock) = kern_fndecl;
13758 DECL_INITIAL (kern_fndecl) = fniniblock;
13759 push_struct_function (kern_fndecl);
13760 cfun->function_end_locus = gimple_location (tgt_stmt);
13761 init_tree_ssa (cfun);
13762 pop_cfun ();
13764 tree old_parm_decl = DECL_ARGUMENTS (kern_fndecl);
13765 gcc_assert (!DECL_CHAIN (old_parm_decl));
13766 tree new_parm_decl = copy_node (DECL_ARGUMENTS (kern_fndecl));
13767 DECL_CONTEXT (new_parm_decl) = kern_fndecl;
13768 DECL_ARGUMENTS (kern_fndecl) = new_parm_decl;
13769 gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl))));
13770 DECL_RESULT (kern_fndecl) = copy_node (DECL_RESULT (kern_fndecl));
13771 DECL_CONTEXT (DECL_RESULT (kern_fndecl)) = kern_fndecl;
13772 struct function *kern_cfun = DECL_STRUCT_FUNCTION (kern_fndecl);
13773 kern_cfun->curr_properties = cfun->curr_properties;
13775 remove_edge (BRANCH_EDGE (kfor->entry));
13776 grid_expand_omp_for_loop (kfor);
13778 /* Remove the omp for statement */
13779 gimple_stmt_iterator gsi = gsi_last_bb (gpukernel->entry);
13780 gsi_remove (&gsi, true);
13781 /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real
13782 return. */
13783 gsi = gsi_last_bb (gpukernel->exit);
13784 gcc_assert (!gsi_end_p (gsi)
13785 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
13786 gimple *ret_stmt = gimple_build_return (NULL);
13787 gsi_insert_after (&gsi, ret_stmt, GSI_SAME_STMT);
13788 gsi_remove (&gsi, true);
13790 /* Statements in the first BB in the target construct have been produced by
13791 target lowering and must be copied inside the GPUKERNEL, with the two
13792 exceptions of the first OMP statement and the OMP_DATA assignment
13793 statement. */
13794 gsi = gsi_start_bb (single_succ (gpukernel->entry));
13795 tree data_arg = gimple_omp_target_data_arg (tgt_stmt);
13796 tree sender = data_arg ? TREE_VEC_ELT (data_arg, 0) : NULL;
13797 for (gimple_stmt_iterator tsi = gsi_start_bb (single_succ (target->entry));
13798 !gsi_end_p (tsi); gsi_next (&tsi))
13800 gimple *stmt = gsi_stmt (tsi);
13801 if (is_gimple_omp (stmt))
13802 break;
13803 if (sender
13804 && is_gimple_assign (stmt)
13805 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
13806 && TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == sender)
13807 continue;
13808 gimple *copy = gimple_copy (stmt);
13809 gsi_insert_before (&gsi, copy, GSI_SAME_STMT);
13810 gimple_set_block (copy, fniniblock);
13813 move_sese_region_to_fn (kern_cfun, single_succ (gpukernel->entry),
13814 gpukernel->exit, inside_block);
13816 cgraph_node *kcn = cgraph_node::get_create (kern_fndecl);
13817 kcn->mark_force_output ();
13818 cgraph_node *orig_child = cgraph_node::get (orig_child_fndecl);
13820 hsa_register_kernel (kcn, orig_child);
13822 cgraph_node::add_new_function (kern_fndecl, true);
13823 push_cfun (kern_cfun);
13824 cgraph_edge::rebuild_edges ();
13826 /* Re-map any mention of the PARM_DECL of the original function to the
13827 PARM_DECL of the new one.
13829 TODO: It would be great if lowering produced references into the GPU
13830 kernel decl straight away and we did not have to do this. */
13831 struct grid_arg_decl_map adm;
13832 adm.old_arg = old_parm_decl;
13833 adm.new_arg = new_parm_decl;
13834 basic_block bb;
13835 FOR_EACH_BB_FN (bb, kern_cfun)
13837 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
13839 gimple *stmt = gsi_stmt (gsi);
13840 struct walk_stmt_info wi;
13841 memset (&wi, 0, sizeof (wi));
13842 wi.info = &adm;
13843 walk_gimple_op (stmt, grid_remap_kernel_arg_accesses, &wi);
13846 pop_cfun ();
13848 return;
13851 /* Expand the parallel region tree rooted at REGION. Expansion
13852 proceeds in depth-first order. Innermost regions are expanded
13853 first. This way, parallel regions that require a new function to
13854 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
13855 internal dependencies in their body. */
13857 static void
13858 expand_omp (struct omp_region *region)
13860 omp_any_child_fn_dumped = false;
13861 while (region)
13863 location_t saved_location;
13864 gimple *inner_stmt = NULL;
13866 /* First, determine whether this is a combined parallel+workshare
13867 region. */
13868 if (region->type == GIMPLE_OMP_PARALLEL)
13869 determine_parallel_type (region);
13870 else if (region->type == GIMPLE_OMP_TARGET)
13871 grid_expand_target_grid_body (region);
13873 if (region->type == GIMPLE_OMP_FOR
13874 && gimple_omp_for_combined_p (last_stmt (region->entry)))
13875 inner_stmt = last_stmt (region->inner->entry);
13877 if (region->inner)
13878 expand_omp (region->inner);
13880 saved_location = input_location;
13881 if (gimple_has_location (last_stmt (region->entry)))
13882 input_location = gimple_location (last_stmt (region->entry));
13884 switch (region->type)
13886 case GIMPLE_OMP_PARALLEL:
13887 case GIMPLE_OMP_TASK:
13888 expand_omp_taskreg (region);
13889 break;
13891 case GIMPLE_OMP_FOR:
13892 expand_omp_for (region, inner_stmt);
13893 break;
13895 case GIMPLE_OMP_SECTIONS:
13896 expand_omp_sections (region);
13897 break;
13899 case GIMPLE_OMP_SECTION:
13900 /* Individual omp sections are handled together with their
13901 parent GIMPLE_OMP_SECTIONS region. */
13902 break;
13904 case GIMPLE_OMP_SINGLE:
13905 expand_omp_single (region);
13906 break;
13908 case GIMPLE_OMP_ORDERED:
13910 gomp_ordered *ord_stmt
13911 = as_a <gomp_ordered *> (last_stmt (region->entry));
13912 if (find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
13913 OMP_CLAUSE_DEPEND))
13915 /* We'll expand these when expanding corresponding
13916 worksharing region with ordered(n) clause. */
13917 gcc_assert (region->outer
13918 && region->outer->type == GIMPLE_OMP_FOR);
13919 region->ord_stmt = ord_stmt;
13920 break;
13923 /* FALLTHRU */
13924 case GIMPLE_OMP_MASTER:
13925 case GIMPLE_OMP_TASKGROUP:
13926 case GIMPLE_OMP_CRITICAL:
13927 case GIMPLE_OMP_TEAMS:
13928 expand_omp_synch (region);
13929 break;
13931 case GIMPLE_OMP_ATOMIC_LOAD:
13932 expand_omp_atomic (region);
13933 break;
13935 case GIMPLE_OMP_TARGET:
13936 expand_omp_target (region);
13937 break;
13939 default:
13940 gcc_unreachable ();
13943 input_location = saved_location;
13944 region = region->next;
13946 if (omp_any_child_fn_dumped)
13948 if (dump_file)
13949 dump_function_header (dump_file, current_function_decl, dump_flags);
13950 omp_any_child_fn_dumped = false;
13955 /* Helper for build_omp_regions. Scan the dominator tree starting at
13956 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
13957 true, the function ends once a single tree is built (otherwise, whole
13958 forest of OMP constructs may be built). */
13960 static void
13961 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
13962 bool single_tree)
13964 gimple_stmt_iterator gsi;
13965 gimple *stmt;
13966 basic_block son;
13968 gsi = gsi_last_bb (bb);
13969 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
13971 struct omp_region *region;
13972 enum gimple_code code;
13974 stmt = gsi_stmt (gsi);
13975 code = gimple_code (stmt);
13976 if (code == GIMPLE_OMP_RETURN)
13978 /* STMT is the return point out of region PARENT. Mark it
13979 as the exit point and make PARENT the immediately
13980 enclosing region. */
13981 gcc_assert (parent);
13982 region = parent;
13983 region->exit = bb;
13984 parent = parent->outer;
13986 else if (code == GIMPLE_OMP_ATOMIC_STORE)
13988 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
13989 GIMPLE_OMP_RETURN, but matches with
13990 GIMPLE_OMP_ATOMIC_LOAD. */
13991 gcc_assert (parent);
13992 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
13993 region = parent;
13994 region->exit = bb;
13995 parent = parent->outer;
13997 else if (code == GIMPLE_OMP_CONTINUE)
13999 gcc_assert (parent);
14000 parent->cont = bb;
14002 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
14004 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
14005 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
14007 else
14009 region = new_omp_region (bb, code, parent);
14010 /* Otherwise... */
14011 if (code == GIMPLE_OMP_TARGET)
14013 switch (gimple_omp_target_kind (stmt))
14015 case GF_OMP_TARGET_KIND_REGION:
14016 case GF_OMP_TARGET_KIND_DATA:
14017 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
14018 case GF_OMP_TARGET_KIND_OACC_KERNELS:
14019 case GF_OMP_TARGET_KIND_OACC_DATA:
14020 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
14021 break;
14022 case GF_OMP_TARGET_KIND_UPDATE:
14023 case GF_OMP_TARGET_KIND_ENTER_DATA:
14024 case GF_OMP_TARGET_KIND_EXIT_DATA:
14025 case GF_OMP_TARGET_KIND_OACC_UPDATE:
14026 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
14027 case GF_OMP_TARGET_KIND_OACC_DECLARE:
14028 /* ..., other than for those stand-alone directives... */
14029 region = NULL;
14030 break;
14031 default:
14032 gcc_unreachable ();
14035 else if (code == GIMPLE_OMP_ORDERED
14036 && find_omp_clause (gimple_omp_ordered_clauses
14037 (as_a <gomp_ordered *> (stmt)),
14038 OMP_CLAUSE_DEPEND))
14039 /* #pragma omp ordered depend is also just a stand-alone
14040 directive. */
14041 region = NULL;
14042 /* ..., this directive becomes the parent for a new region. */
14043 if (region)
14044 parent = region;
14048 if (single_tree && !parent)
14049 return;
14051 for (son = first_dom_son (CDI_DOMINATORS, bb);
14052 son;
14053 son = next_dom_son (CDI_DOMINATORS, son))
14054 build_omp_regions_1 (son, parent, single_tree);
14057 /* Builds the tree of OMP regions rooted at ROOT, storing it to
14058 root_omp_region. */
14060 static void
14061 build_omp_regions_root (basic_block root)
14063 gcc_assert (root_omp_region == NULL);
14064 build_omp_regions_1 (root, NULL, true);
14065 gcc_assert (root_omp_region != NULL);
14068 /* Expands omp construct (and its subconstructs) starting in HEAD. */
14070 void
14071 omp_expand_local (basic_block head)
14073 build_omp_regions_root (head);
14074 if (dump_file && (dump_flags & TDF_DETAILS))
14076 fprintf (dump_file, "\nOMP region tree\n\n");
14077 dump_omp_region (dump_file, root_omp_region, 0);
14078 fprintf (dump_file, "\n");
14081 remove_exit_barriers (root_omp_region);
14082 expand_omp (root_omp_region);
14084 free_omp_regions ();
14087 /* Scan the CFG and build a tree of OMP regions. Return the root of
14088 the OMP region tree. */
14090 static void
14091 build_omp_regions (void)
14093 gcc_assert (root_omp_region == NULL);
14094 calculate_dominance_info (CDI_DOMINATORS);
14095 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
14098 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
14100 static unsigned int
14101 execute_expand_omp (void)
14103 build_omp_regions ();
14105 if (!root_omp_region)
14106 return 0;
14108 if (dump_file)
14110 fprintf (dump_file, "\nOMP region tree\n\n");
14111 dump_omp_region (dump_file, root_omp_region, 0);
14112 fprintf (dump_file, "\n");
14115 remove_exit_barriers (root_omp_region);
14117 expand_omp (root_omp_region);
14119 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
14120 verify_loop_structure ();
14121 cleanup_tree_cfg ();
14123 free_omp_regions ();
14125 return 0;
14128 /* OMP expansion -- the default pass, run before creation of SSA form. */
14130 namespace {
14132 const pass_data pass_data_expand_omp =
14134 GIMPLE_PASS, /* type */
14135 "ompexp", /* name */
14136 OPTGROUP_NONE, /* optinfo_flags */
14137 TV_NONE, /* tv_id */
14138 PROP_gimple_any, /* properties_required */
14139 PROP_gimple_eomp, /* properties_provided */
14140 0, /* properties_destroyed */
14141 0, /* todo_flags_start */
14142 0, /* todo_flags_finish */
14145 class pass_expand_omp : public gimple_opt_pass
14147 public:
14148 pass_expand_omp (gcc::context *ctxt)
14149 : gimple_opt_pass (pass_data_expand_omp, ctxt)
14152 /* opt_pass methods: */
14153 virtual unsigned int execute (function *)
14155 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
14156 || flag_openmp_simd != 0)
14157 && !seen_error ());
14159 /* This pass always runs, to provide PROP_gimple_eomp.
14160 But often, there is nothing to do. */
14161 if (!gate)
14162 return 0;
14164 return execute_expand_omp ();
14167 }; // class pass_expand_omp
14169 } // anon namespace
14171 gimple_opt_pass *
14172 make_pass_expand_omp (gcc::context *ctxt)
14174 return new pass_expand_omp (ctxt);
14177 namespace {
14179 const pass_data pass_data_expand_omp_ssa =
14181 GIMPLE_PASS, /* type */
14182 "ompexpssa", /* name */
14183 OPTGROUP_NONE, /* optinfo_flags */
14184 TV_NONE, /* tv_id */
14185 PROP_cfg | PROP_ssa, /* properties_required */
14186 PROP_gimple_eomp, /* properties_provided */
14187 0, /* properties_destroyed */
14188 0, /* todo_flags_start */
14189 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
14192 class pass_expand_omp_ssa : public gimple_opt_pass
14194 public:
14195 pass_expand_omp_ssa (gcc::context *ctxt)
14196 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
14199 /* opt_pass methods: */
14200 virtual bool gate (function *fun)
14202 return !(fun->curr_properties & PROP_gimple_eomp);
14204 virtual unsigned int execute (function *) { return execute_expand_omp (); }
14205 opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); }
14207 }; // class pass_expand_omp_ssa
14209 } // anon namespace
14211 gimple_opt_pass *
14212 make_pass_expand_omp_ssa (gcc::context *ctxt)
14214 return new pass_expand_omp_ssa (ctxt);
14217 /* Routines to lower OMP directives into OMP-GIMPLE. */
14219 /* If ctx is a worksharing context inside of a cancellable parallel
14220 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
14221 and conditional branch to parallel's cancel_label to handle
14222 cancellation in the implicit barrier. */
14224 static void
14225 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
14227 gimple *omp_return = gimple_seq_last_stmt (*body);
14228 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
14229 if (gimple_omp_return_nowait_p (omp_return))
14230 return;
14231 if (ctx->outer
14232 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
14233 && ctx->outer->cancellable)
14235 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
14236 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
14237 tree lhs = create_tmp_var (c_bool_type);
14238 gimple_omp_return_set_lhs (omp_return, lhs);
14239 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
14240 gimple *g = gimple_build_cond (NE_EXPR, lhs,
14241 fold_convert (c_bool_type,
14242 boolean_false_node),
14243 ctx->outer->cancel_label, fallthru_label);
14244 gimple_seq_add_stmt (body, g);
14245 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
14249 /* Lower the OpenMP sections directive in the current statement in GSI_P.
14250 CTX is the enclosing OMP context for the current statement. */
14252 static void
14253 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14255 tree block, control;
14256 gimple_stmt_iterator tgsi;
14257 gomp_sections *stmt;
14258 gimple *t;
14259 gbind *new_stmt, *bind;
14260 gimple_seq ilist, dlist, olist, new_body;
14262 stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
14264 push_gimplify_context ();
14266 dlist = NULL;
14267 ilist = NULL;
14268 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
14269 &ilist, &dlist, ctx, NULL);
14271 new_body = gimple_omp_body (stmt);
14272 gimple_omp_set_body (stmt, NULL);
14273 tgsi = gsi_start (new_body);
14274 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
14276 omp_context *sctx;
14277 gimple *sec_start;
14279 sec_start = gsi_stmt (tgsi);
14280 sctx = maybe_lookup_ctx (sec_start);
14281 gcc_assert (sctx);
14283 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
14284 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
14285 GSI_CONTINUE_LINKING);
14286 gimple_omp_set_body (sec_start, NULL);
14288 if (gsi_one_before_end_p (tgsi))
14290 gimple_seq l = NULL;
14291 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
14292 &l, ctx);
14293 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
14294 gimple_omp_section_set_last (sec_start);
14297 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
14298 GSI_CONTINUE_LINKING);
14301 block = make_node (BLOCK);
14302 bind = gimple_build_bind (NULL, new_body, block);
14304 olist = NULL;
14305 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
14307 block = make_node (BLOCK);
14308 new_stmt = gimple_build_bind (NULL, NULL, block);
14309 gsi_replace (gsi_p, new_stmt, true);
14311 pop_gimplify_context (new_stmt);
14312 gimple_bind_append_vars (new_stmt, ctx->block_vars);
14313 BLOCK_VARS (block) = gimple_bind_vars (bind);
14314 if (BLOCK_VARS (block))
14315 TREE_USED (block) = 1;
14317 new_body = NULL;
14318 gimple_seq_add_seq (&new_body, ilist);
14319 gimple_seq_add_stmt (&new_body, stmt);
14320 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
14321 gimple_seq_add_stmt (&new_body, bind);
14323 control = create_tmp_var (unsigned_type_node, ".section");
14324 t = gimple_build_omp_continue (control, control);
14325 gimple_omp_sections_set_control (stmt, control);
14326 gimple_seq_add_stmt (&new_body, t);
14328 gimple_seq_add_seq (&new_body, olist);
14329 if (ctx->cancellable)
14330 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
14331 gimple_seq_add_seq (&new_body, dlist);
14333 new_body = maybe_catch_exception (new_body);
14335 t = gimple_build_omp_return
14336 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
14337 OMP_CLAUSE_NOWAIT));
14338 gimple_seq_add_stmt (&new_body, t);
14339 maybe_add_implicit_barrier_cancel (ctx, &new_body);
14341 gimple_bind_set_body (new_stmt, new_body);
14345 /* A subroutine of lower_omp_single. Expand the simple form of
14346 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
14348 if (GOMP_single_start ())
14349 BODY;
14350 [ GOMP_barrier (); ] -> unless 'nowait' is present.
14352 FIXME. It may be better to delay expanding the logic of this until
14353 pass_expand_omp. The expanded logic may make the job more difficult
14354 to a synchronization analysis pass. */
14356 static void
14357 lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
14359 location_t loc = gimple_location (single_stmt);
14360 tree tlabel = create_artificial_label (loc);
14361 tree flabel = create_artificial_label (loc);
14362 gimple *call, *cond;
14363 tree lhs, decl;
14365 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
14366 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
14367 call = gimple_build_call (decl, 0);
14368 gimple_call_set_lhs (call, lhs);
14369 gimple_seq_add_stmt (pre_p, call);
14371 cond = gimple_build_cond (EQ_EXPR, lhs,
14372 fold_convert_loc (loc, TREE_TYPE (lhs),
14373 boolean_true_node),
14374 tlabel, flabel);
14375 gimple_seq_add_stmt (pre_p, cond);
14376 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
14377 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
14378 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
14382 /* A subroutine of lower_omp_single. Expand the simple form of
14383 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
14385 #pragma omp single copyprivate (a, b, c)
14387 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
14390 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
14392 BODY;
14393 copyout.a = a;
14394 copyout.b = b;
14395 copyout.c = c;
14396 GOMP_single_copy_end (&copyout);
14398 else
14400 a = copyout_p->a;
14401 b = copyout_p->b;
14402 c = copyout_p->c;
14404 GOMP_barrier ();
14407 FIXME. It may be better to delay expanding the logic of this until
14408 pass_expand_omp. The expanded logic may make the job more difficult
14409 to a synchronization analysis pass. */
14411 static void
14412 lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
14413 omp_context *ctx)
14415 tree ptr_type, t, l0, l1, l2, bfn_decl;
14416 gimple_seq copyin_seq;
14417 location_t loc = gimple_location (single_stmt);
14419 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
14421 ptr_type = build_pointer_type (ctx->record_type);
14422 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
14424 l0 = create_artificial_label (loc);
14425 l1 = create_artificial_label (loc);
14426 l2 = create_artificial_label (loc);
14428 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
14429 t = build_call_expr_loc (loc, bfn_decl, 0);
14430 t = fold_convert_loc (loc, ptr_type, t);
14431 gimplify_assign (ctx->receiver_decl, t, pre_p);
14433 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
14434 build_int_cst (ptr_type, 0));
14435 t = build3 (COND_EXPR, void_type_node, t,
14436 build_and_jump (&l0), build_and_jump (&l1));
14437 gimplify_and_add (t, pre_p);
14439 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
14441 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
14443 copyin_seq = NULL;
14444 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
14445 &copyin_seq, ctx);
14447 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
14448 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
14449 t = build_call_expr_loc (loc, bfn_decl, 1, t);
14450 gimplify_and_add (t, pre_p);
14452 t = build_and_jump (&l2);
14453 gimplify_and_add (t, pre_p);
14455 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
14457 gimple_seq_add_seq (pre_p, copyin_seq);
14459 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
14463 /* Expand code for an OpenMP single directive. */
14465 static void
14466 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14468 tree block;
14469 gimple *t;
14470 gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
14471 gbind *bind;
14472 gimple_seq bind_body, bind_body_tail = NULL, dlist;
14474 push_gimplify_context ();
14476 block = make_node (BLOCK);
14477 bind = gimple_build_bind (NULL, NULL, block);
14478 gsi_replace (gsi_p, bind, true);
14479 bind_body = NULL;
14480 dlist = NULL;
14481 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
14482 &bind_body, &dlist, ctx, NULL);
14483 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
14485 gimple_seq_add_stmt (&bind_body, single_stmt);
14487 if (ctx->record_type)
14488 lower_omp_single_copy (single_stmt, &bind_body, ctx);
14489 else
14490 lower_omp_single_simple (single_stmt, &bind_body);
14492 gimple_omp_set_body (single_stmt, NULL);
14494 gimple_seq_add_seq (&bind_body, dlist);
14496 bind_body = maybe_catch_exception (bind_body);
14498 t = gimple_build_omp_return
14499 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
14500 OMP_CLAUSE_NOWAIT));
14501 gimple_seq_add_stmt (&bind_body_tail, t);
14502 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
14503 if (ctx->record_type)
14505 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
14506 tree clobber = build_constructor (ctx->record_type, NULL);
14507 TREE_THIS_VOLATILE (clobber) = 1;
14508 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
14509 clobber), GSI_SAME_STMT);
14511 gimple_seq_add_seq (&bind_body, bind_body_tail);
14512 gimple_bind_set_body (bind, bind_body);
14514 pop_gimplify_context (bind);
14516 gimple_bind_append_vars (bind, ctx->block_vars);
14517 BLOCK_VARS (block) = ctx->block_vars;
14518 if (BLOCK_VARS (block))
14519 TREE_USED (block) = 1;
14523 /* Expand code for an OpenMP master directive. */
14525 static void
14526 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14528 tree block, lab = NULL, x, bfn_decl;
14529 gimple *stmt = gsi_stmt (*gsi_p);
14530 gbind *bind;
14531 location_t loc = gimple_location (stmt);
14532 gimple_seq tseq;
14534 push_gimplify_context ();
14536 block = make_node (BLOCK);
14537 bind = gimple_build_bind (NULL, NULL, block);
14538 gsi_replace (gsi_p, bind, true);
14539 gimple_bind_add_stmt (bind, stmt);
14541 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
14542 x = build_call_expr_loc (loc, bfn_decl, 0);
14543 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
14544 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
14545 tseq = NULL;
14546 gimplify_and_add (x, &tseq);
14547 gimple_bind_add_seq (bind, tseq);
14549 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14550 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
14551 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14552 gimple_omp_set_body (stmt, NULL);
14554 gimple_bind_add_stmt (bind, gimple_build_label (lab));
14556 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14558 pop_gimplify_context (bind);
14560 gimple_bind_append_vars (bind, ctx->block_vars);
14561 BLOCK_VARS (block) = ctx->block_vars;
14565 /* Expand code for an OpenMP taskgroup directive. */
14567 static void
14568 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14570 gimple *stmt = gsi_stmt (*gsi_p);
14571 gcall *x;
14572 gbind *bind;
14573 tree block = make_node (BLOCK);
14575 bind = gimple_build_bind (NULL, NULL, block);
14576 gsi_replace (gsi_p, bind, true);
14577 gimple_bind_add_stmt (bind, stmt);
14579 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
14581 gimple_bind_add_stmt (bind, x);
14583 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14584 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14585 gimple_omp_set_body (stmt, NULL);
14587 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14589 gimple_bind_append_vars (bind, ctx->block_vars);
14590 BLOCK_VARS (block) = ctx->block_vars;
14594 /* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
14596 static void
14597 lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
14598 omp_context *ctx)
14600 struct omp_for_data fd;
14601 if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
14602 return;
14604 unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
14605 struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
14606 extract_omp_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
14607 if (!fd.ordered)
14608 return;
14610 tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
14611 tree c = gimple_omp_ordered_clauses (ord_stmt);
14612 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
14613 && OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
14615 /* Merge depend clauses from multiple adjacent
14616 #pragma omp ordered depend(sink:...) constructs
14617 into one #pragma omp ordered depend(sink:...), so that
14618 we can optimize them together. */
14619 gimple_stmt_iterator gsi = *gsi_p;
14620 gsi_next (&gsi);
14621 while (!gsi_end_p (gsi))
14623 gimple *stmt = gsi_stmt (gsi);
14624 if (is_gimple_debug (stmt)
14625 || gimple_code (stmt) == GIMPLE_NOP)
14627 gsi_next (&gsi);
14628 continue;
14630 if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
14631 break;
14632 gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
14633 c = gimple_omp_ordered_clauses (ord_stmt2);
14634 if (c == NULL_TREE
14635 || OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
14636 || OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
14637 break;
14638 while (*list_p)
14639 list_p = &OMP_CLAUSE_CHAIN (*list_p);
14640 *list_p = c;
14641 gsi_remove (&gsi, true);
14645 /* Canonicalize sink dependence clauses into one folded clause if
14646 possible.
14648 The basic algorithm is to create a sink vector whose first
14649 element is the GCD of all the first elements, and whose remaining
14650 elements are the minimum of the subsequent columns.
14652 We ignore dependence vectors whose first element is zero because
14653 such dependencies are known to be executed by the same thread.
14655 We take into account the direction of the loop, so a minimum
14656 becomes a maximum if the loop is iterating forwards. We also
14657 ignore sink clauses where the loop direction is unknown, or where
14658 the offsets are clearly invalid because they are not a multiple
14659 of the loop increment.
14661 For example:
14663 #pragma omp for ordered(2)
14664 for (i=0; i < N; ++i)
14665 for (j=0; j < M; ++j)
14667 #pragma omp ordered \
14668 depend(sink:i-8,j-2) \
14669 depend(sink:i,j-1) \ // Completely ignored because i+0.
14670 depend(sink:i-4,j-3) \
14671 depend(sink:i-6,j-4)
14672 #pragma omp ordered depend(source)
14675 Folded clause is:
14677 depend(sink:-gcd(8,4,6),-min(2,3,4))
14678 -or-
14679 depend(sink:-2,-2)
14682 /* FIXME: Computing GCD's where the first element is zero is
14683 non-trivial in the presence of collapsed loops. Do this later. */
14684 if (fd.collapse > 1)
14685 return;
14687 wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
14688 memset (folded_deps, 0, sizeof (*folded_deps) * (2 * len - 1));
14689 tree folded_dep = NULL_TREE;
14690 /* TRUE if the first dimension's offset is negative. */
14691 bool neg_offset_p = false;
14693 list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
14694 unsigned int i;
14695 while ((c = *list_p) != NULL)
14697 bool remove = false;
14699 gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
14700 if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
14701 goto next_ordered_clause;
14703 tree vec;
14704 for (vec = OMP_CLAUSE_DECL (c), i = 0;
14705 vec && TREE_CODE (vec) == TREE_LIST;
14706 vec = TREE_CHAIN (vec), ++i)
14708 gcc_assert (i < len);
14710 /* extract_omp_for_data has canonicalized the condition. */
14711 gcc_assert (fd.loops[i].cond_code == LT_EXPR
14712 || fd.loops[i].cond_code == GT_EXPR);
14713 bool forward = fd.loops[i].cond_code == LT_EXPR;
14714 bool maybe_lexically_later = true;
14716 /* While the committee makes up its mind, bail if we have any
14717 non-constant steps. */
14718 if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
14719 goto lower_omp_ordered_ret;
14721 tree itype = TREE_TYPE (TREE_VALUE (vec));
14722 if (POINTER_TYPE_P (itype))
14723 itype = sizetype;
14724 wide_int offset = wide_int::from (TREE_PURPOSE (vec),
14725 TYPE_PRECISION (itype),
14726 TYPE_SIGN (itype));
14728 /* Ignore invalid offsets that are not multiples of the step. */
14729 if (!wi::multiple_of_p
14730 (wi::abs (offset), wi::abs ((wide_int) fd.loops[i].step),
14731 UNSIGNED))
14733 warning_at (OMP_CLAUSE_LOCATION (c), 0,
14734 "ignoring sink clause with offset that is not "
14735 "a multiple of the loop step");
14736 remove = true;
14737 goto next_ordered_clause;
14740 /* Calculate the first dimension. The first dimension of
14741 the folded dependency vector is the GCD of the first
14742 elements, while ignoring any first elements whose offset
14743 is 0. */
14744 if (i == 0)
14746 /* Ignore dependence vectors whose first dimension is 0. */
14747 if (offset == 0)
14749 remove = true;
14750 goto next_ordered_clause;
14752 else
14754 if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
14756 error_at (OMP_CLAUSE_LOCATION (c),
14757 "first offset must be in opposite direction "
14758 "of loop iterations");
14759 goto lower_omp_ordered_ret;
14761 if (forward)
14762 offset = -offset;
14763 neg_offset_p = forward;
14764 /* Initialize the first time around. */
14765 if (folded_dep == NULL_TREE)
14767 folded_dep = c;
14768 folded_deps[0] = offset;
14770 else
14771 folded_deps[0] = wi::gcd (folded_deps[0],
14772 offset, UNSIGNED);
14775 /* Calculate minimum for the remaining dimensions. */
14776 else
14778 folded_deps[len + i - 1] = offset;
14779 if (folded_dep == c)
14780 folded_deps[i] = offset;
14781 else if (maybe_lexically_later
14782 && !wi::eq_p (folded_deps[i], offset))
14784 if (forward ^ wi::gts_p (folded_deps[i], offset))
14786 unsigned int j;
14787 folded_dep = c;
14788 for (j = 1; j <= i; j++)
14789 folded_deps[j] = folded_deps[len + j - 1];
14791 else
14792 maybe_lexically_later = false;
14796 gcc_assert (i == len);
14798 remove = true;
14800 next_ordered_clause:
14801 if (remove)
14802 *list_p = OMP_CLAUSE_CHAIN (c);
14803 else
14804 list_p = &OMP_CLAUSE_CHAIN (c);
14807 if (folded_dep)
14809 if (neg_offset_p)
14810 folded_deps[0] = -folded_deps[0];
14812 tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
14813 if (POINTER_TYPE_P (itype))
14814 itype = sizetype;
14816 TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
14817 = wide_int_to_tree (itype, folded_deps[0]);
14818 OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
14819 *gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
14822 lower_omp_ordered_ret:
14824 /* Ordered without clauses is #pragma omp threads, while we want
14825 a nop instead if we remove all clauses. */
14826 if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
14827 gsi_replace (gsi_p, gimple_build_nop (), true);
14831 /* Expand code for an OpenMP ordered directive. */
14833 static void
14834 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14836 tree block;
14837 gimple *stmt = gsi_stmt (*gsi_p);
14838 gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
14839 gcall *x;
14840 gbind *bind;
14841 bool simd = find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14842 OMP_CLAUSE_SIMD);
14843 bool threads = find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14844 OMP_CLAUSE_THREADS);
14846 if (find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14847 OMP_CLAUSE_DEPEND))
14849 /* FIXME: This is needs to be moved to the expansion to verify various
14850 conditions only testable on cfg with dominators computed, and also
14851 all the depend clauses to be merged still might need to be available
14852 for the runtime checks. */
14853 if (0)
14854 lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
14855 return;
14858 push_gimplify_context ();
14860 block = make_node (BLOCK);
14861 bind = gimple_build_bind (NULL, NULL, block);
14862 gsi_replace (gsi_p, bind, true);
14863 gimple_bind_add_stmt (bind, stmt);
14865 if (simd)
14867 x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
14868 build_int_cst (NULL_TREE, threads));
14869 cfun->has_simduid_loops = true;
14871 else
14872 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
14874 gimple_bind_add_stmt (bind, x);
14876 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14877 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
14878 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14879 gimple_omp_set_body (stmt, NULL);
14881 if (simd)
14882 x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
14883 build_int_cst (NULL_TREE, threads));
14884 else
14885 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
14887 gimple_bind_add_stmt (bind, x);
14889 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14891 pop_gimplify_context (bind);
14893 gimple_bind_append_vars (bind, ctx->block_vars);
14894 BLOCK_VARS (block) = gimple_bind_vars (bind);
14898 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
14899 substitution of a couple of function calls. But in the NAMED case,
14900 requires that languages coordinate a symbol name. It is therefore
14901 best put here in common code. */
14903 static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
14905 static void
14906 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14908 tree block;
14909 tree name, lock, unlock;
14910 gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
14911 gbind *bind;
14912 location_t loc = gimple_location (stmt);
14913 gimple_seq tbody;
14915 name = gimple_omp_critical_name (stmt);
14916 if (name)
14918 tree decl;
14920 if (!critical_name_mutexes)
14921 critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
14923 tree *n = critical_name_mutexes->get (name);
14924 if (n == NULL)
14926 char *new_str;
14928 decl = create_tmp_var_raw (ptr_type_node);
14930 new_str = ACONCAT ((".gomp_critical_user_",
14931 IDENTIFIER_POINTER (name), NULL));
14932 DECL_NAME (decl) = get_identifier (new_str);
14933 TREE_PUBLIC (decl) = 1;
14934 TREE_STATIC (decl) = 1;
14935 DECL_COMMON (decl) = 1;
14936 DECL_ARTIFICIAL (decl) = 1;
14937 DECL_IGNORED_P (decl) = 1;
14939 varpool_node::finalize_decl (decl);
14941 critical_name_mutexes->put (name, decl);
14943 else
14944 decl = *n;
14946 /* If '#pragma omp critical' is inside offloaded region or
14947 inside function marked as offloadable, the symbol must be
14948 marked as offloadable too. */
14949 omp_context *octx;
14950 if (cgraph_node::get (current_function_decl)->offloadable)
14951 varpool_node::get_create (decl)->offloadable = 1;
14952 else
14953 for (octx = ctx->outer; octx; octx = octx->outer)
14954 if (is_gimple_omp_offloaded (octx->stmt))
14956 varpool_node::get_create (decl)->offloadable = 1;
14957 break;
14960 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
14961 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
14963 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
14964 unlock = build_call_expr_loc (loc, unlock, 1,
14965 build_fold_addr_expr_loc (loc, decl));
14967 else
14969 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
14970 lock = build_call_expr_loc (loc, lock, 0);
14972 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
14973 unlock = build_call_expr_loc (loc, unlock, 0);
14976 push_gimplify_context ();
14978 block = make_node (BLOCK);
14979 bind = gimple_build_bind (NULL, NULL, block);
14980 gsi_replace (gsi_p, bind, true);
14981 gimple_bind_add_stmt (bind, stmt);
14983 tbody = gimple_bind_body (bind);
14984 gimplify_and_add (lock, &tbody);
14985 gimple_bind_set_body (bind, tbody);
14987 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14988 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
14989 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14990 gimple_omp_set_body (stmt, NULL);
14992 tbody = gimple_bind_body (bind);
14993 gimplify_and_add (unlock, &tbody);
14994 gimple_bind_set_body (bind, tbody);
14996 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14998 pop_gimplify_context (bind);
14999 gimple_bind_append_vars (bind, ctx->block_vars);
15000 BLOCK_VARS (block) = gimple_bind_vars (bind);
15004 /* A subroutine of lower_omp_for. Generate code to emit the predicate
15005 for a lastprivate clause. Given a loop control predicate of (V
15006 cond N2), we gate the clause on (!(V cond N2)). The lowered form
15007 is appended to *DLIST, iterator initialization is appended to
15008 *BODY_P. */
15010 static void
15011 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
15012 gimple_seq *dlist, struct omp_context *ctx)
15014 tree clauses, cond, vinit;
15015 enum tree_code cond_code;
15016 gimple_seq stmts;
15018 cond_code = fd->loop.cond_code;
15019 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
15021 /* When possible, use a strict equality expression. This can let VRP
15022 type optimizations deduce the value and remove a copy. */
15023 if (tree_fits_shwi_p (fd->loop.step))
15025 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
15026 if (step == 1 || step == -1)
15027 cond_code = EQ_EXPR;
15030 tree n2 = fd->loop.n2;
15031 if (fd->collapse > 1
15032 && TREE_CODE (n2) != INTEGER_CST
15033 && gimple_omp_for_combined_into_p (fd->for_stmt))
15035 struct omp_context *taskreg_ctx = NULL;
15036 if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
15038 gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
15039 if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
15040 || gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
15042 if (gimple_omp_for_combined_into_p (gfor))
15044 gcc_assert (ctx->outer->outer
15045 && is_parallel_ctx (ctx->outer->outer));
15046 taskreg_ctx = ctx->outer->outer;
15048 else
15050 struct omp_for_data outer_fd;
15051 extract_omp_for_data (gfor, &outer_fd, NULL);
15052 n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
15055 else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
15056 taskreg_ctx = ctx->outer->outer;
15058 else if (is_taskreg_ctx (ctx->outer))
15059 taskreg_ctx = ctx->outer;
15060 if (taskreg_ctx)
15062 int i;
15063 tree innerc
15064 = find_omp_clause (gimple_omp_taskreg_clauses (taskreg_ctx->stmt),
15065 OMP_CLAUSE__LOOPTEMP_);
15066 gcc_assert (innerc);
15067 for (i = 0; i < fd->collapse; i++)
15069 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
15070 OMP_CLAUSE__LOOPTEMP_);
15071 gcc_assert (innerc);
15073 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
15074 OMP_CLAUSE__LOOPTEMP_);
15075 if (innerc)
15076 n2 = fold_convert (TREE_TYPE (n2),
15077 lookup_decl (OMP_CLAUSE_DECL (innerc),
15078 taskreg_ctx));
15081 cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
15083 clauses = gimple_omp_for_clauses (fd->for_stmt);
15084 stmts = NULL;
15085 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
15086 if (!gimple_seq_empty_p (stmts))
15088 gimple_seq_add_seq (&stmts, *dlist);
15089 *dlist = stmts;
15091 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
15092 vinit = fd->loop.n1;
15093 if (cond_code == EQ_EXPR
15094 && tree_fits_shwi_p (fd->loop.n2)
15095 && ! integer_zerop (fd->loop.n2))
15096 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
15097 else
15098 vinit = unshare_expr (vinit);
15100 /* Initialize the iterator variable, so that threads that don't execute
15101 any iterations don't execute the lastprivate clauses by accident. */
15102 gimplify_assign (fd->loop.v, vinit, body_p);
15107 /* Lower code for an OMP loop directive. */
15109 static void
15110 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
15112 tree *rhs_p, block;
15113 struct omp_for_data fd, *fdp = NULL;
15114 gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
15115 gbind *new_stmt;
15116 gimple_seq omp_for_body, body, dlist;
15117 gimple_seq oacc_head = NULL, oacc_tail = NULL;
15118 size_t i;
15120 push_gimplify_context ();
15122 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
15124 block = make_node (BLOCK);
15125 new_stmt = gimple_build_bind (NULL, NULL, block);
15126 /* Replace at gsi right away, so that 'stmt' is no member
15127 of a sequence anymore as we're going to add to a different
15128 one below. */
15129 gsi_replace (gsi_p, new_stmt, true);
15131 /* Move declaration of temporaries in the loop body before we make
15132 it go away. */
15133 omp_for_body = gimple_omp_body (stmt);
15134 if (!gimple_seq_empty_p (omp_for_body)
15135 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
15137 gbind *inner_bind
15138 = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
15139 tree vars = gimple_bind_vars (inner_bind);
15140 gimple_bind_append_vars (new_stmt, vars);
15141 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
15142 keep them on the inner_bind and it's block. */
15143 gimple_bind_set_vars (inner_bind, NULL_TREE);
15144 if (gimple_bind_block (inner_bind))
15145 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
15148 if (gimple_omp_for_combined_into_p (stmt))
15150 extract_omp_for_data (stmt, &fd, NULL);
15151 fdp = &fd;
15153 /* We need two temporaries with fd.loop.v type (istart/iend)
15154 and then (fd.collapse - 1) temporaries with the same
15155 type for count2 ... countN-1 vars if not constant. */
15156 size_t count = 2;
15157 tree type = fd.iter_type;
15158 if (fd.collapse > 1
15159 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
15160 count += fd.collapse - 1;
15161 bool taskreg_for
15162 = (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
15163 || gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
15164 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
15165 tree clauses = *pc;
15166 if (taskreg_for)
15167 outerc
15168 = find_omp_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
15169 OMP_CLAUSE__LOOPTEMP_);
15170 for (i = 0; i < count; i++)
15172 tree temp;
15173 if (taskreg_for)
15175 gcc_assert (outerc);
15176 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
15177 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
15178 OMP_CLAUSE__LOOPTEMP_);
15180 else
15182 temp = create_tmp_var (type);
15183 insert_decl_map (&ctx->outer->cb, temp, temp);
15185 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
15186 OMP_CLAUSE_DECL (*pc) = temp;
15187 pc = &OMP_CLAUSE_CHAIN (*pc);
15189 *pc = clauses;
15192 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
15193 dlist = NULL;
15194 body = NULL;
15195 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
15196 fdp);
15197 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
15199 lower_omp (gimple_omp_body_ptr (stmt), ctx);
15201 /* Lower the header expressions. At this point, we can assume that
15202 the header is of the form:
15204 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
15206 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
15207 using the .omp_data_s mapping, if needed. */
15208 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
15210 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
15211 if (!is_gimple_min_invariant (*rhs_p))
15212 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
15214 rhs_p = gimple_omp_for_final_ptr (stmt, i);
15215 if (!is_gimple_min_invariant (*rhs_p))
15216 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
15218 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
15219 if (!is_gimple_min_invariant (*rhs_p))
15220 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
15223 /* Once lowered, extract the bounds and clauses. */
15224 extract_omp_for_data (stmt, &fd, NULL);
15226 if (is_gimple_omp_oacc (ctx->stmt)
15227 && !ctx_in_oacc_kernels_region (ctx))
15228 lower_oacc_head_tail (gimple_location (stmt),
15229 gimple_omp_for_clauses (stmt),
15230 &oacc_head, &oacc_tail, ctx);
15232 /* Add OpenACC partitioning and reduction markers just before the loop */
15233 if (oacc_head)
15234 gimple_seq_add_seq (&body, oacc_head);
15236 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
15238 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
15239 for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
15240 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
15241 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
15243 OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
15244 if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
15245 OMP_CLAUSE_LINEAR_STEP (c)
15246 = maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
15247 ctx);
15250 if (!gimple_omp_for_grid_phony (stmt))
15251 gimple_seq_add_stmt (&body, stmt);
15252 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
15254 if (!gimple_omp_for_grid_phony (stmt))
15255 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
15256 fd.loop.v));
15258 /* After the loop, add exit clauses. */
15259 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
15261 if (ctx->cancellable)
15262 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
15264 gimple_seq_add_seq (&body, dlist);
15266 body = maybe_catch_exception (body);
15268 if (!gimple_omp_for_grid_phony (stmt))
15270 /* Region exit marker goes at the end of the loop body. */
15271 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
15272 maybe_add_implicit_barrier_cancel (ctx, &body);
15275 /* Add OpenACC joining and reduction markers just after the loop. */
15276 if (oacc_tail)
15277 gimple_seq_add_seq (&body, oacc_tail);
15279 pop_gimplify_context (new_stmt);
15281 gimple_bind_append_vars (new_stmt, ctx->block_vars);
15282 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
15283 if (BLOCK_VARS (block))
15284 TREE_USED (block) = 1;
15286 gimple_bind_set_body (new_stmt, body);
15287 gimple_omp_set_body (stmt, NULL);
15288 gimple_omp_for_set_pre_body (stmt, NULL);
15291 /* Callback for walk_stmts. Check if the current statement only contains
15292 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
15294 static tree
15295 check_combined_parallel (gimple_stmt_iterator *gsi_p,
15296 bool *handled_ops_p,
15297 struct walk_stmt_info *wi)
15299 int *info = (int *) wi->info;
15300 gimple *stmt = gsi_stmt (*gsi_p);
15302 *handled_ops_p = true;
15303 switch (gimple_code (stmt))
15305 WALK_SUBSTMTS;
15307 case GIMPLE_OMP_FOR:
15308 case GIMPLE_OMP_SECTIONS:
15309 *info = *info == 0 ? 1 : -1;
15310 break;
15311 default:
15312 *info = -1;
15313 break;
15315 return NULL;
15318 struct omp_taskcopy_context
15320 /* This field must be at the beginning, as we do "inheritance": Some
15321 callback functions for tree-inline.c (e.g., omp_copy_decl)
15322 receive a copy_body_data pointer that is up-casted to an
15323 omp_context pointer. */
15324 copy_body_data cb;
15325 omp_context *ctx;
15328 static tree
15329 task_copyfn_copy_decl (tree var, copy_body_data *cb)
15331 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
15333 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
15334 return create_tmp_var (TREE_TYPE (var));
15336 return var;
15339 static tree
15340 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
15342 tree name, new_fields = NULL, type, f;
15344 type = lang_hooks.types.make_type (RECORD_TYPE);
15345 name = DECL_NAME (TYPE_NAME (orig_type));
15346 name = build_decl (gimple_location (tcctx->ctx->stmt),
15347 TYPE_DECL, name, type);
15348 TYPE_NAME (type) = name;
15350 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
15352 tree new_f = copy_node (f);
15353 DECL_CONTEXT (new_f) = type;
15354 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
15355 TREE_CHAIN (new_f) = new_fields;
15356 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
15357 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
15358 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
15359 &tcctx->cb, NULL);
15360 new_fields = new_f;
15361 tcctx->cb.decl_map->put (f, new_f);
15363 TYPE_FIELDS (type) = nreverse (new_fields);
15364 layout_type (type);
15365 return type;
15368 /* Create task copyfn. */
15370 static void
15371 create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
15373 struct function *child_cfun;
15374 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
15375 tree record_type, srecord_type, bind, list;
15376 bool record_needs_remap = false, srecord_needs_remap = false;
15377 splay_tree_node n;
15378 struct omp_taskcopy_context tcctx;
15379 location_t loc = gimple_location (task_stmt);
15381 child_fn = gimple_omp_task_copy_fn (task_stmt);
15382 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
15383 gcc_assert (child_cfun->cfg == NULL);
15384 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
15386 /* Reset DECL_CONTEXT on function arguments. */
15387 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
15388 DECL_CONTEXT (t) = child_fn;
15390 /* Populate the function. */
15391 push_gimplify_context ();
15392 push_cfun (child_cfun);
15394 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
15395 TREE_SIDE_EFFECTS (bind) = 1;
15396 list = NULL;
15397 DECL_SAVED_TREE (child_fn) = bind;
15398 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
15400 /* Remap src and dst argument types if needed. */
15401 record_type = ctx->record_type;
15402 srecord_type = ctx->srecord_type;
15403 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
15404 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
15406 record_needs_remap = true;
15407 break;
15409 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
15410 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
15412 srecord_needs_remap = true;
15413 break;
15416 if (record_needs_remap || srecord_needs_remap)
15418 memset (&tcctx, '\0', sizeof (tcctx));
15419 tcctx.cb.src_fn = ctx->cb.src_fn;
15420 tcctx.cb.dst_fn = child_fn;
15421 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
15422 gcc_checking_assert (tcctx.cb.src_node);
15423 tcctx.cb.dst_node = tcctx.cb.src_node;
15424 tcctx.cb.src_cfun = ctx->cb.src_cfun;
15425 tcctx.cb.copy_decl = task_copyfn_copy_decl;
15426 tcctx.cb.eh_lp_nr = 0;
15427 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
15428 tcctx.cb.decl_map = new hash_map<tree, tree>;
15429 tcctx.ctx = ctx;
15431 if (record_needs_remap)
15432 record_type = task_copyfn_remap_type (&tcctx, record_type);
15433 if (srecord_needs_remap)
15434 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
15436 else
15437 tcctx.cb.decl_map = NULL;
15439 arg = DECL_ARGUMENTS (child_fn);
15440 TREE_TYPE (arg) = build_pointer_type (record_type);
15441 sarg = DECL_CHAIN (arg);
15442 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
15444 /* First pass: initialize temporaries used in record_type and srecord_type
15445 sizes and field offsets. */
15446 if (tcctx.cb.decl_map)
15447 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
15448 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15450 tree *p;
15452 decl = OMP_CLAUSE_DECL (c);
15453 p = tcctx.cb.decl_map->get (decl);
15454 if (p == NULL)
15455 continue;
15456 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
15457 sf = (tree) n->value;
15458 sf = *tcctx.cb.decl_map->get (sf);
15459 src = build_simple_mem_ref_loc (loc, sarg);
15460 src = omp_build_component_ref (src, sf);
15461 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
15462 append_to_statement_list (t, &list);
15465 /* Second pass: copy shared var pointers and copy construct non-VLA
15466 firstprivate vars. */
15467 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
15468 switch (OMP_CLAUSE_CODE (c))
15470 splay_tree_key key;
15471 case OMP_CLAUSE_SHARED:
15472 decl = OMP_CLAUSE_DECL (c);
15473 key = (splay_tree_key) decl;
15474 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
15475 key = (splay_tree_key) &DECL_UID (decl);
15476 n = splay_tree_lookup (ctx->field_map, key);
15477 if (n == NULL)
15478 break;
15479 f = (tree) n->value;
15480 if (tcctx.cb.decl_map)
15481 f = *tcctx.cb.decl_map->get (f);
15482 n = splay_tree_lookup (ctx->sfield_map, key);
15483 sf = (tree) n->value;
15484 if (tcctx.cb.decl_map)
15485 sf = *tcctx.cb.decl_map->get (sf);
15486 src = build_simple_mem_ref_loc (loc, sarg);
15487 src = omp_build_component_ref (src, sf);
15488 dst = build_simple_mem_ref_loc (loc, arg);
15489 dst = omp_build_component_ref (dst, f);
15490 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
15491 append_to_statement_list (t, &list);
15492 break;
15493 case OMP_CLAUSE_FIRSTPRIVATE:
15494 decl = OMP_CLAUSE_DECL (c);
15495 if (is_variable_sized (decl))
15496 break;
15497 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
15498 if (n == NULL)
15499 break;
15500 f = (tree) n->value;
15501 if (tcctx.cb.decl_map)
15502 f = *tcctx.cb.decl_map->get (f);
15503 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
15504 if (n != NULL)
15506 sf = (tree) n->value;
15507 if (tcctx.cb.decl_map)
15508 sf = *tcctx.cb.decl_map->get (sf);
15509 src = build_simple_mem_ref_loc (loc, sarg);
15510 src = omp_build_component_ref (src, sf);
15511 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
15512 src = build_simple_mem_ref_loc (loc, src);
15514 else
15515 src = decl;
15516 dst = build_simple_mem_ref_loc (loc, arg);
15517 dst = omp_build_component_ref (dst, f);
15518 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
15519 append_to_statement_list (t, &list);
15520 break;
15521 case OMP_CLAUSE_PRIVATE:
15522 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
15523 break;
15524 decl = OMP_CLAUSE_DECL (c);
15525 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
15526 f = (tree) n->value;
15527 if (tcctx.cb.decl_map)
15528 f = *tcctx.cb.decl_map->get (f);
15529 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
15530 if (n != NULL)
15532 sf = (tree) n->value;
15533 if (tcctx.cb.decl_map)
15534 sf = *tcctx.cb.decl_map->get (sf);
15535 src = build_simple_mem_ref_loc (loc, sarg);
15536 src = omp_build_component_ref (src, sf);
15537 if (use_pointer_for_field (decl, NULL))
15538 src = build_simple_mem_ref_loc (loc, src);
15540 else
15541 src = decl;
15542 dst = build_simple_mem_ref_loc (loc, arg);
15543 dst = omp_build_component_ref (dst, f);
15544 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
15545 append_to_statement_list (t, &list);
15546 break;
15547 default:
15548 break;
15551 /* Last pass: handle VLA firstprivates. */
15552 if (tcctx.cb.decl_map)
15553 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
15554 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15556 tree ind, ptr, df;
15558 decl = OMP_CLAUSE_DECL (c);
15559 if (!is_variable_sized (decl))
15560 continue;
15561 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
15562 if (n == NULL)
15563 continue;
15564 f = (tree) n->value;
15565 f = *tcctx.cb.decl_map->get (f);
15566 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
15567 ind = DECL_VALUE_EXPR (decl);
15568 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
15569 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
15570 n = splay_tree_lookup (ctx->sfield_map,
15571 (splay_tree_key) TREE_OPERAND (ind, 0));
15572 sf = (tree) n->value;
15573 sf = *tcctx.cb.decl_map->get (sf);
15574 src = build_simple_mem_ref_loc (loc, sarg);
15575 src = omp_build_component_ref (src, sf);
15576 src = build_simple_mem_ref_loc (loc, src);
15577 dst = build_simple_mem_ref_loc (loc, arg);
15578 dst = omp_build_component_ref (dst, f);
15579 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
15580 append_to_statement_list (t, &list);
15581 n = splay_tree_lookup (ctx->field_map,
15582 (splay_tree_key) TREE_OPERAND (ind, 0));
15583 df = (tree) n->value;
15584 df = *tcctx.cb.decl_map->get (df);
15585 ptr = build_simple_mem_ref_loc (loc, arg);
15586 ptr = omp_build_component_ref (ptr, df);
15587 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
15588 build_fold_addr_expr_loc (loc, dst));
15589 append_to_statement_list (t, &list);
15592 t = build1 (RETURN_EXPR, void_type_node, NULL);
15593 append_to_statement_list (t, &list);
15595 if (tcctx.cb.decl_map)
15596 delete tcctx.cb.decl_map;
15597 pop_gimplify_context (NULL);
15598 BIND_EXPR_BODY (bind) = list;
15599 pop_cfun ();
15602 static void
15603 lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
15605 tree c, clauses;
15606 gimple *g;
15607 size_t n_in = 0, n_out = 0, idx = 2, i;
15609 clauses = find_omp_clause (*pclauses, OMP_CLAUSE_DEPEND);
15610 gcc_assert (clauses);
15611 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
15612 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
15613 switch (OMP_CLAUSE_DEPEND_KIND (c))
15615 case OMP_CLAUSE_DEPEND_IN:
15616 n_in++;
15617 break;
15618 case OMP_CLAUSE_DEPEND_OUT:
15619 case OMP_CLAUSE_DEPEND_INOUT:
15620 n_out++;
15621 break;
15622 case OMP_CLAUSE_DEPEND_SOURCE:
15623 case OMP_CLAUSE_DEPEND_SINK:
15624 /* FALLTHRU */
15625 default:
15626 gcc_unreachable ();
15628 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
15629 tree array = create_tmp_var (type);
15630 TREE_ADDRESSABLE (array) = 1;
15631 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
15632 NULL_TREE);
15633 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
15634 gimple_seq_add_stmt (iseq, g);
15635 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
15636 NULL_TREE);
15637 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
15638 gimple_seq_add_stmt (iseq, g);
15639 for (i = 0; i < 2; i++)
15641 if ((i ? n_in : n_out) == 0)
15642 continue;
15643 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
15644 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
15645 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
15647 tree t = OMP_CLAUSE_DECL (c);
15648 t = fold_convert (ptr_type_node, t);
15649 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
15650 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
15651 NULL_TREE, NULL_TREE);
15652 g = gimple_build_assign (r, t);
15653 gimple_seq_add_stmt (iseq, g);
15656 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
15657 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
15658 OMP_CLAUSE_CHAIN (c) = *pclauses;
15659 *pclauses = c;
15660 tree clobber = build_constructor (type, NULL);
15661 TREE_THIS_VOLATILE (clobber) = 1;
15662 g = gimple_build_assign (array, clobber);
15663 gimple_seq_add_stmt (oseq, g);
15666 /* Lower the OpenMP parallel or task directive in the current statement
15667 in GSI_P. CTX holds context information for the directive. */
15669 static void
15670 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
15672 tree clauses;
15673 tree child_fn, t;
15674 gimple *stmt = gsi_stmt (*gsi_p);
15675 gbind *par_bind, *bind, *dep_bind = NULL;
15676 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
15677 location_t loc = gimple_location (stmt);
15679 clauses = gimple_omp_taskreg_clauses (stmt);
15680 par_bind
15681 = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
15682 par_body = gimple_bind_body (par_bind);
15683 child_fn = ctx->cb.dst_fn;
15684 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
15685 && !gimple_omp_parallel_combined_p (stmt))
15687 struct walk_stmt_info wi;
15688 int ws_num = 0;
15690 memset (&wi, 0, sizeof (wi));
15691 wi.info = &ws_num;
15692 wi.val_only = true;
15693 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
15694 if (ws_num == 1)
15695 gimple_omp_parallel_set_combined_p (stmt, true);
15697 gimple_seq dep_ilist = NULL;
15698 gimple_seq dep_olist = NULL;
15699 if (gimple_code (stmt) == GIMPLE_OMP_TASK
15700 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
15702 push_gimplify_context ();
15703 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
15704 lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
15705 &dep_ilist, &dep_olist);
15708 if (ctx->srecord_type)
15709 create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
15711 push_gimplify_context ();
15713 par_olist = NULL;
15714 par_ilist = NULL;
15715 par_rlist = NULL;
15716 bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
15717 && gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
15718 if (phony_construct && ctx->record_type)
15720 gcc_checking_assert (!ctx->receiver_decl);
15721 ctx->receiver_decl = create_tmp_var
15722 (build_reference_type (ctx->record_type), ".omp_rec");
15724 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
15725 lower_omp (&par_body, ctx);
15726 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
15727 lower_reduction_clauses (clauses, &par_rlist, ctx);
15729 /* Declare all the variables created by mapping and the variables
15730 declared in the scope of the parallel body. */
15731 record_vars_into (ctx->block_vars, child_fn);
15732 record_vars_into (gimple_bind_vars (par_bind), child_fn);
15734 if (ctx->record_type)
15736 ctx->sender_decl
15737 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
15738 : ctx->record_type, ".omp_data_o");
15739 DECL_NAMELESS (ctx->sender_decl) = 1;
15740 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
15741 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
15744 olist = NULL;
15745 ilist = NULL;
15746 lower_send_clauses (clauses, &ilist, &olist, ctx);
15747 lower_send_shared_vars (&ilist, &olist, ctx);
15749 if (ctx->record_type)
15751 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
15752 TREE_THIS_VOLATILE (clobber) = 1;
15753 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
15754 clobber));
15757 /* Once all the expansions are done, sequence all the different
15758 fragments inside gimple_omp_body. */
15760 new_body = NULL;
15762 if (ctx->record_type)
15764 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
15765 /* fixup_child_record_type might have changed receiver_decl's type. */
15766 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
15767 gimple_seq_add_stmt (&new_body,
15768 gimple_build_assign (ctx->receiver_decl, t));
15771 gimple_seq_add_seq (&new_body, par_ilist);
15772 gimple_seq_add_seq (&new_body, par_body);
15773 gimple_seq_add_seq (&new_body, par_rlist);
15774 if (ctx->cancellable)
15775 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
15776 gimple_seq_add_seq (&new_body, par_olist);
15777 new_body = maybe_catch_exception (new_body);
15778 if (gimple_code (stmt) == GIMPLE_OMP_TASK)
15779 gimple_seq_add_stmt (&new_body,
15780 gimple_build_omp_continue (integer_zero_node,
15781 integer_zero_node));
15782 if (!phony_construct)
15784 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
15785 gimple_omp_set_body (stmt, new_body);
15788 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
15789 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
15790 gimple_bind_add_seq (bind, ilist);
15791 if (!phony_construct)
15792 gimple_bind_add_stmt (bind, stmt);
15793 else
15794 gimple_bind_add_seq (bind, new_body);
15795 gimple_bind_add_seq (bind, olist);
15797 pop_gimplify_context (NULL);
15799 if (dep_bind)
15801 gimple_bind_add_seq (dep_bind, dep_ilist);
15802 gimple_bind_add_stmt (dep_bind, bind);
15803 gimple_bind_add_seq (dep_bind, dep_olist);
15804 pop_gimplify_context (dep_bind);
15808 /* Lower the GIMPLE_OMP_TARGET in the current statement
15809 in GSI_P. CTX holds context information for the directive. */
15811 static void
15812 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
15814 tree clauses;
15815 tree child_fn, t, c;
15816 gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
15817 gbind *tgt_bind, *bind, *dep_bind = NULL;
15818 gimple_seq tgt_body, olist, ilist, fplist, new_body;
15819 location_t loc = gimple_location (stmt);
15820 bool offloaded, data_region;
15821 unsigned int map_cnt = 0;
15823 offloaded = is_gimple_omp_offloaded (stmt);
15824 switch (gimple_omp_target_kind (stmt))
15826 case GF_OMP_TARGET_KIND_REGION:
15827 case GF_OMP_TARGET_KIND_UPDATE:
15828 case GF_OMP_TARGET_KIND_ENTER_DATA:
15829 case GF_OMP_TARGET_KIND_EXIT_DATA:
15830 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
15831 case GF_OMP_TARGET_KIND_OACC_KERNELS:
15832 case GF_OMP_TARGET_KIND_OACC_UPDATE:
15833 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
15834 case GF_OMP_TARGET_KIND_OACC_DECLARE:
15835 data_region = false;
15836 break;
15837 case GF_OMP_TARGET_KIND_DATA:
15838 case GF_OMP_TARGET_KIND_OACC_DATA:
15839 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
15840 data_region = true;
15841 break;
15842 default:
15843 gcc_unreachable ();
15846 clauses = gimple_omp_target_clauses (stmt);
15848 gimple_seq dep_ilist = NULL;
15849 gimple_seq dep_olist = NULL;
15850 if (find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
15852 push_gimplify_context ();
15853 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
15854 lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
15855 &dep_ilist, &dep_olist);
15858 tgt_bind = NULL;
15859 tgt_body = NULL;
15860 if (offloaded)
15862 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
15863 tgt_body = gimple_bind_body (tgt_bind);
15865 else if (data_region)
15866 tgt_body = gimple_omp_body (stmt);
15867 child_fn = ctx->cb.dst_fn;
15869 push_gimplify_context ();
15870 fplist = NULL;
15872 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
15873 switch (OMP_CLAUSE_CODE (c))
15875 tree var, x;
15877 default:
15878 break;
15879 case OMP_CLAUSE_MAP:
15880 #if CHECKING_P
15881 /* First check what we're prepared to handle in the following. */
15882 switch (OMP_CLAUSE_MAP_KIND (c))
15884 case GOMP_MAP_ALLOC:
15885 case GOMP_MAP_TO:
15886 case GOMP_MAP_FROM:
15887 case GOMP_MAP_TOFROM:
15888 case GOMP_MAP_POINTER:
15889 case GOMP_MAP_TO_PSET:
15890 case GOMP_MAP_DELETE:
15891 case GOMP_MAP_RELEASE:
15892 case GOMP_MAP_ALWAYS_TO:
15893 case GOMP_MAP_ALWAYS_FROM:
15894 case GOMP_MAP_ALWAYS_TOFROM:
15895 case GOMP_MAP_FIRSTPRIVATE_POINTER:
15896 case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
15897 case GOMP_MAP_STRUCT:
15898 case GOMP_MAP_ALWAYS_POINTER:
15899 break;
15900 case GOMP_MAP_FORCE_ALLOC:
15901 case GOMP_MAP_FORCE_TO:
15902 case GOMP_MAP_FORCE_FROM:
15903 case GOMP_MAP_FORCE_TOFROM:
15904 case GOMP_MAP_FORCE_PRESENT:
15905 case GOMP_MAP_FORCE_DEVICEPTR:
15906 case GOMP_MAP_DEVICE_RESIDENT:
15907 case GOMP_MAP_LINK:
15908 gcc_assert (is_gimple_omp_oacc (stmt));
15909 break;
15910 default:
15911 gcc_unreachable ();
15913 #endif
15914 /* FALLTHRU */
15915 case OMP_CLAUSE_TO:
15916 case OMP_CLAUSE_FROM:
15917 oacc_firstprivate:
15918 var = OMP_CLAUSE_DECL (c);
15919 if (!DECL_P (var))
15921 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
15922 || (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
15923 && (OMP_CLAUSE_MAP_KIND (c)
15924 != GOMP_MAP_FIRSTPRIVATE_POINTER)))
15925 map_cnt++;
15926 continue;
15929 if (DECL_SIZE (var)
15930 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
15932 tree var2 = DECL_VALUE_EXPR (var);
15933 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
15934 var2 = TREE_OPERAND (var2, 0);
15935 gcc_assert (DECL_P (var2));
15936 var = var2;
15939 if (offloaded
15940 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15941 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
15942 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
15944 if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15946 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
15947 && varpool_node::get_create (var)->offloadable)
15948 continue;
15950 tree type = build_pointer_type (TREE_TYPE (var));
15951 tree new_var = lookup_decl (var, ctx);
15952 x = create_tmp_var_raw (type, get_name (new_var));
15953 gimple_add_tmp_var (x);
15954 x = build_simple_mem_ref (x);
15955 SET_DECL_VALUE_EXPR (new_var, x);
15956 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15958 continue;
15961 if (!maybe_lookup_field (var, ctx))
15962 continue;
15964 /* Don't remap oacc parallel reduction variables, because the
15965 intermediate result must be local to each gang. */
15966 if (offloaded && !(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15967 && OMP_CLAUSE_MAP_IN_REDUCTION (c)))
15969 x = build_receiver_ref (var, true, ctx);
15970 tree new_var = lookup_decl (var, ctx);
15972 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15973 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
15974 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
15975 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15976 x = build_simple_mem_ref (x);
15977 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15979 gcc_assert (is_gimple_omp_oacc (ctx->stmt));
15980 if (is_reference (new_var))
15982 /* Create a local object to hold the instance
15983 value. */
15984 tree type = TREE_TYPE (TREE_TYPE (new_var));
15985 const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
15986 tree inst = create_tmp_var (type, id);
15987 gimplify_assign (inst, fold_indirect_ref (x), &fplist);
15988 x = build_fold_addr_expr (inst);
15990 gimplify_assign (new_var, x, &fplist);
15992 else if (DECL_P (new_var))
15994 SET_DECL_VALUE_EXPR (new_var, x);
15995 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15997 else
15998 gcc_unreachable ();
16000 map_cnt++;
16001 break;
16003 case OMP_CLAUSE_FIRSTPRIVATE:
16004 if (is_oacc_parallel (ctx))
16005 goto oacc_firstprivate;
16006 map_cnt++;
16007 var = OMP_CLAUSE_DECL (c);
16008 if (!is_reference (var)
16009 && !is_gimple_reg_type (TREE_TYPE (var)))
16011 tree new_var = lookup_decl (var, ctx);
16012 if (is_variable_sized (var))
16014 tree pvar = DECL_VALUE_EXPR (var);
16015 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16016 pvar = TREE_OPERAND (pvar, 0);
16017 gcc_assert (DECL_P (pvar));
16018 tree new_pvar = lookup_decl (pvar, ctx);
16019 x = build_fold_indirect_ref (new_pvar);
16020 TREE_THIS_NOTRAP (x) = 1;
16022 else
16023 x = build_receiver_ref (var, true, ctx);
16024 SET_DECL_VALUE_EXPR (new_var, x);
16025 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
16027 break;
16029 case OMP_CLAUSE_PRIVATE:
16030 if (is_gimple_omp_oacc (ctx->stmt))
16031 break;
16032 var = OMP_CLAUSE_DECL (c);
16033 if (is_variable_sized (var))
16035 tree new_var = lookup_decl (var, ctx);
16036 tree pvar = DECL_VALUE_EXPR (var);
16037 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16038 pvar = TREE_OPERAND (pvar, 0);
16039 gcc_assert (DECL_P (pvar));
16040 tree new_pvar = lookup_decl (pvar, ctx);
16041 x = build_fold_indirect_ref (new_pvar);
16042 TREE_THIS_NOTRAP (x) = 1;
16043 SET_DECL_VALUE_EXPR (new_var, x);
16044 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
16046 break;
16048 case OMP_CLAUSE_USE_DEVICE_PTR:
16049 case OMP_CLAUSE_IS_DEVICE_PTR:
16050 var = OMP_CLAUSE_DECL (c);
16051 map_cnt++;
16052 if (is_variable_sized (var))
16054 tree new_var = lookup_decl (var, ctx);
16055 tree pvar = DECL_VALUE_EXPR (var);
16056 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16057 pvar = TREE_OPERAND (pvar, 0);
16058 gcc_assert (DECL_P (pvar));
16059 tree new_pvar = lookup_decl (pvar, ctx);
16060 x = build_fold_indirect_ref (new_pvar);
16061 TREE_THIS_NOTRAP (x) = 1;
16062 SET_DECL_VALUE_EXPR (new_var, x);
16063 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
16065 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
16067 tree new_var = lookup_decl (var, ctx);
16068 tree type = build_pointer_type (TREE_TYPE (var));
16069 x = create_tmp_var_raw (type, get_name (new_var));
16070 gimple_add_tmp_var (x);
16071 x = build_simple_mem_ref (x);
16072 SET_DECL_VALUE_EXPR (new_var, x);
16073 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
16075 else
16077 tree new_var = lookup_decl (var, ctx);
16078 x = create_tmp_var_raw (TREE_TYPE (new_var), get_name (new_var));
16079 gimple_add_tmp_var (x);
16080 SET_DECL_VALUE_EXPR (new_var, x);
16081 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
16083 break;
16086 if (offloaded)
16088 target_nesting_level++;
16089 lower_omp (&tgt_body, ctx);
16090 target_nesting_level--;
16092 else if (data_region)
16093 lower_omp (&tgt_body, ctx);
16095 if (offloaded)
16097 /* Declare all the variables created by mapping and the variables
16098 declared in the scope of the target body. */
16099 record_vars_into (ctx->block_vars, child_fn);
16100 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
16103 olist = NULL;
16104 ilist = NULL;
16105 if (ctx->record_type)
16107 ctx->sender_decl
16108 = create_tmp_var (ctx->record_type, ".omp_data_arr");
16109 DECL_NAMELESS (ctx->sender_decl) = 1;
16110 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
16111 t = make_tree_vec (3);
16112 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
16113 TREE_VEC_ELT (t, 1)
16114 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
16115 ".omp_data_sizes");
16116 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
16117 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
16118 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
16119 tree tkind_type = short_unsigned_type_node;
16120 int talign_shift = 8;
16121 TREE_VEC_ELT (t, 2)
16122 = create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
16123 ".omp_data_kinds");
16124 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
16125 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
16126 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
16127 gimple_omp_target_set_data_arg (stmt, t);
16129 vec<constructor_elt, va_gc> *vsize;
16130 vec<constructor_elt, va_gc> *vkind;
16131 vec_alloc (vsize, map_cnt);
16132 vec_alloc (vkind, map_cnt);
16133 unsigned int map_idx = 0;
16135 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
16136 switch (OMP_CLAUSE_CODE (c))
16138 tree ovar, nc, s, purpose, var, x, type;
16139 unsigned int talign;
16141 default:
16142 break;
16144 case OMP_CLAUSE_MAP:
16145 case OMP_CLAUSE_TO:
16146 case OMP_CLAUSE_FROM:
16147 oacc_firstprivate_map:
16148 nc = c;
16149 ovar = OMP_CLAUSE_DECL (c);
16150 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
16151 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
16152 || (OMP_CLAUSE_MAP_KIND (c)
16153 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
16154 break;
16155 if (!DECL_P (ovar))
16157 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
16158 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
16160 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
16161 == get_base_address (ovar));
16162 nc = OMP_CLAUSE_CHAIN (c);
16163 ovar = OMP_CLAUSE_DECL (nc);
16165 else
16167 tree x = build_sender_ref (ovar, ctx);
16168 tree v
16169 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
16170 gimplify_assign (x, v, &ilist);
16171 nc = NULL_TREE;
16174 else
16176 if (DECL_SIZE (ovar)
16177 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
16179 tree ovar2 = DECL_VALUE_EXPR (ovar);
16180 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
16181 ovar2 = TREE_OPERAND (ovar2, 0);
16182 gcc_assert (DECL_P (ovar2));
16183 ovar = ovar2;
16185 if (!maybe_lookup_field (ovar, ctx))
16186 continue;
16189 talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
16190 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
16191 talign = DECL_ALIGN_UNIT (ovar);
16192 if (nc)
16194 var = lookup_decl_in_outer_ctx (ovar, ctx);
16195 x = build_sender_ref (ovar, ctx);
16197 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
16198 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
16199 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
16200 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
16202 gcc_assert (offloaded);
16203 tree avar
16204 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
16205 mark_addressable (avar);
16206 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
16207 talign = DECL_ALIGN_UNIT (avar);
16208 avar = build_fold_addr_expr (avar);
16209 gimplify_assign (x, avar, &ilist);
16211 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
16213 gcc_assert (is_gimple_omp_oacc (ctx->stmt));
16214 if (!is_reference (var))
16216 if (is_gimple_reg (var)
16217 && OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
16218 TREE_NO_WARNING (var) = 1;
16219 var = build_fold_addr_expr (var);
16221 else
16222 talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
16223 gimplify_assign (x, var, &ilist);
16225 else if (is_gimple_reg (var))
16227 gcc_assert (offloaded);
16228 tree avar = create_tmp_var (TREE_TYPE (var));
16229 mark_addressable (avar);
16230 enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
16231 if (GOMP_MAP_COPY_TO_P (map_kind)
16232 || map_kind == GOMP_MAP_POINTER
16233 || map_kind == GOMP_MAP_TO_PSET
16234 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
16236 /* If we need to initialize a temporary
16237 with VAR because it is not addressable, and
16238 the variable hasn't been initialized yet, then
16239 we'll get a warning for the store to avar.
16240 Don't warn in that case, the mapping might
16241 be implicit. */
16242 TREE_NO_WARNING (var) = 1;
16243 gimplify_assign (avar, var, &ilist);
16245 avar = build_fold_addr_expr (avar);
16246 gimplify_assign (x, avar, &ilist);
16247 if ((GOMP_MAP_COPY_FROM_P (map_kind)
16248 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
16249 && !TYPE_READONLY (TREE_TYPE (var)))
16251 x = unshare_expr (x);
16252 x = build_simple_mem_ref (x);
16253 gimplify_assign (var, x, &olist);
16256 else
16258 var = build_fold_addr_expr (var);
16259 gimplify_assign (x, var, &ilist);
16262 s = NULL_TREE;
16263 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
16265 gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
16266 s = TREE_TYPE (ovar);
16267 if (TREE_CODE (s) == REFERENCE_TYPE)
16268 s = TREE_TYPE (s);
16269 s = TYPE_SIZE_UNIT (s);
16271 else
16272 s = OMP_CLAUSE_SIZE (c);
16273 if (s == NULL_TREE)
16274 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
16275 s = fold_convert (size_type_node, s);
16276 purpose = size_int (map_idx++);
16277 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
16278 if (TREE_CODE (s) != INTEGER_CST)
16279 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
16281 unsigned HOST_WIDE_INT tkind, tkind_zero;
16282 switch (OMP_CLAUSE_CODE (c))
16284 case OMP_CLAUSE_MAP:
16285 tkind = OMP_CLAUSE_MAP_KIND (c);
16286 tkind_zero = tkind;
16287 if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
16288 switch (tkind)
16290 case GOMP_MAP_ALLOC:
16291 case GOMP_MAP_TO:
16292 case GOMP_MAP_FROM:
16293 case GOMP_MAP_TOFROM:
16294 case GOMP_MAP_ALWAYS_TO:
16295 case GOMP_MAP_ALWAYS_FROM:
16296 case GOMP_MAP_ALWAYS_TOFROM:
16297 case GOMP_MAP_RELEASE:
16298 case GOMP_MAP_FORCE_TO:
16299 case GOMP_MAP_FORCE_FROM:
16300 case GOMP_MAP_FORCE_TOFROM:
16301 case GOMP_MAP_FORCE_PRESENT:
16302 tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
16303 break;
16304 case GOMP_MAP_DELETE:
16305 tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
16306 default:
16307 break;
16309 if (tkind_zero != tkind)
16311 if (integer_zerop (s))
16312 tkind = tkind_zero;
16313 else if (integer_nonzerop (s))
16314 tkind_zero = tkind;
16316 break;
16317 case OMP_CLAUSE_FIRSTPRIVATE:
16318 gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
16319 tkind = GOMP_MAP_TO;
16320 tkind_zero = tkind;
16321 break;
16322 case OMP_CLAUSE_TO:
16323 tkind = GOMP_MAP_TO;
16324 tkind_zero = tkind;
16325 break;
16326 case OMP_CLAUSE_FROM:
16327 tkind = GOMP_MAP_FROM;
16328 tkind_zero = tkind;
16329 break;
16330 default:
16331 gcc_unreachable ();
16333 gcc_checking_assert (tkind
16334 < (HOST_WIDE_INT_C (1U) << talign_shift));
16335 gcc_checking_assert (tkind_zero
16336 < (HOST_WIDE_INT_C (1U) << talign_shift));
16337 talign = ceil_log2 (talign);
16338 tkind |= talign << talign_shift;
16339 tkind_zero |= talign << talign_shift;
16340 gcc_checking_assert (tkind
16341 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
16342 gcc_checking_assert (tkind_zero
16343 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
16344 if (tkind == tkind_zero)
16345 x = build_int_cstu (tkind_type, tkind);
16346 else
16348 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
16349 x = build3 (COND_EXPR, tkind_type,
16350 fold_build2 (EQ_EXPR, boolean_type_node,
16351 unshare_expr (s), size_zero_node),
16352 build_int_cstu (tkind_type, tkind_zero),
16353 build_int_cstu (tkind_type, tkind));
16355 CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
16356 if (nc && nc != c)
16357 c = nc;
16358 break;
16360 case OMP_CLAUSE_FIRSTPRIVATE:
16361 if (is_oacc_parallel (ctx))
16362 goto oacc_firstprivate_map;
16363 ovar = OMP_CLAUSE_DECL (c);
16364 if (is_reference (ovar))
16365 talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
16366 else
16367 talign = DECL_ALIGN_UNIT (ovar);
16368 var = lookup_decl_in_outer_ctx (ovar, ctx);
16369 x = build_sender_ref (ovar, ctx);
16370 tkind = GOMP_MAP_FIRSTPRIVATE;
16371 type = TREE_TYPE (ovar);
16372 if (is_reference (ovar))
16373 type = TREE_TYPE (type);
16374 if ((INTEGRAL_TYPE_P (type)
16375 && TYPE_PRECISION (type) <= POINTER_SIZE)
16376 || TREE_CODE (type) == POINTER_TYPE)
16378 tkind = GOMP_MAP_FIRSTPRIVATE_INT;
16379 tree t = var;
16380 if (is_reference (var))
16381 t = build_simple_mem_ref (var);
16382 else if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
16383 TREE_NO_WARNING (var) = 1;
16384 if (TREE_CODE (type) != POINTER_TYPE)
16385 t = fold_convert (pointer_sized_int_node, t);
16386 t = fold_convert (TREE_TYPE (x), t);
16387 gimplify_assign (x, t, &ilist);
16389 else if (is_reference (var))
16390 gimplify_assign (x, var, &ilist);
16391 else if (is_gimple_reg (var))
16393 tree avar = create_tmp_var (TREE_TYPE (var));
16394 mark_addressable (avar);
16395 if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
16396 TREE_NO_WARNING (var) = 1;
16397 gimplify_assign (avar, var, &ilist);
16398 avar = build_fold_addr_expr (avar);
16399 gimplify_assign (x, avar, &ilist);
16401 else
16403 var = build_fold_addr_expr (var);
16404 gimplify_assign (x, var, &ilist);
16406 if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
16407 s = size_int (0);
16408 else if (is_reference (ovar))
16409 s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
16410 else
16411 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
16412 s = fold_convert (size_type_node, s);
16413 purpose = size_int (map_idx++);
16414 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
16415 if (TREE_CODE (s) != INTEGER_CST)
16416 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
16418 gcc_checking_assert (tkind
16419 < (HOST_WIDE_INT_C (1U) << talign_shift));
16420 talign = ceil_log2 (talign);
16421 tkind |= talign << talign_shift;
16422 gcc_checking_assert (tkind
16423 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
16424 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
16425 build_int_cstu (tkind_type, tkind));
16426 break;
16428 case OMP_CLAUSE_USE_DEVICE_PTR:
16429 case OMP_CLAUSE_IS_DEVICE_PTR:
16430 ovar = OMP_CLAUSE_DECL (c);
16431 var = lookup_decl_in_outer_ctx (ovar, ctx);
16432 x = build_sender_ref (ovar, ctx);
16433 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
16434 tkind = GOMP_MAP_USE_DEVICE_PTR;
16435 else
16436 tkind = GOMP_MAP_FIRSTPRIVATE_INT;
16437 type = TREE_TYPE (ovar);
16438 if (TREE_CODE (type) == ARRAY_TYPE)
16439 var = build_fold_addr_expr (var);
16440 else
16442 if (is_reference (ovar))
16444 type = TREE_TYPE (type);
16445 if (TREE_CODE (type) != ARRAY_TYPE)
16446 var = build_simple_mem_ref (var);
16447 var = fold_convert (TREE_TYPE (x), var);
16450 gimplify_assign (x, var, &ilist);
16451 s = size_int (0);
16452 purpose = size_int (map_idx++);
16453 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
16454 gcc_checking_assert (tkind
16455 < (HOST_WIDE_INT_C (1U) << talign_shift));
16456 gcc_checking_assert (tkind
16457 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
16458 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
16459 build_int_cstu (tkind_type, tkind));
16460 break;
16463 gcc_assert (map_idx == map_cnt);
16465 DECL_INITIAL (TREE_VEC_ELT (t, 1))
16466 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
16467 DECL_INITIAL (TREE_VEC_ELT (t, 2))
16468 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
16469 for (int i = 1; i <= 2; i++)
16470 if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
16472 gimple_seq initlist = NULL;
16473 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
16474 TREE_VEC_ELT (t, i)),
16475 &initlist, true, NULL_TREE);
16476 gimple_seq_add_seq (&ilist, initlist);
16478 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, i)),
16479 NULL);
16480 TREE_THIS_VOLATILE (clobber) = 1;
16481 gimple_seq_add_stmt (&olist,
16482 gimple_build_assign (TREE_VEC_ELT (t, i),
16483 clobber));
16486 tree clobber = build_constructor (ctx->record_type, NULL);
16487 TREE_THIS_VOLATILE (clobber) = 1;
16488 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
16489 clobber));
16492 /* Once all the expansions are done, sequence all the different
16493 fragments inside gimple_omp_body. */
16495 new_body = NULL;
16497 if (offloaded
16498 && ctx->record_type)
16500 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
16501 /* fixup_child_record_type might have changed receiver_decl's type. */
16502 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
16503 gimple_seq_add_stmt (&new_body,
16504 gimple_build_assign (ctx->receiver_decl, t));
16506 gimple_seq_add_seq (&new_body, fplist);
16508 if (offloaded || data_region)
16510 tree prev = NULL_TREE;
16511 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
16512 switch (OMP_CLAUSE_CODE (c))
16514 tree var, x;
16515 default:
16516 break;
16517 case OMP_CLAUSE_FIRSTPRIVATE:
16518 if (is_gimple_omp_oacc (ctx->stmt))
16519 break;
16520 var = OMP_CLAUSE_DECL (c);
16521 if (is_reference (var)
16522 || is_gimple_reg_type (TREE_TYPE (var)))
16524 tree new_var = lookup_decl (var, ctx);
16525 tree type;
16526 type = TREE_TYPE (var);
16527 if (is_reference (var))
16528 type = TREE_TYPE (type);
16529 if ((INTEGRAL_TYPE_P (type)
16530 && TYPE_PRECISION (type) <= POINTER_SIZE)
16531 || TREE_CODE (type) == POINTER_TYPE)
16533 x = build_receiver_ref (var, false, ctx);
16534 if (TREE_CODE (type) != POINTER_TYPE)
16535 x = fold_convert (pointer_sized_int_node, x);
16536 x = fold_convert (type, x);
16537 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
16538 fb_rvalue);
16539 if (is_reference (var))
16541 tree v = create_tmp_var_raw (type, get_name (var));
16542 gimple_add_tmp_var (v);
16543 TREE_ADDRESSABLE (v) = 1;
16544 gimple_seq_add_stmt (&new_body,
16545 gimple_build_assign (v, x));
16546 x = build_fold_addr_expr (v);
16548 gimple_seq_add_stmt (&new_body,
16549 gimple_build_assign (new_var, x));
16551 else
16553 x = build_receiver_ref (var, !is_reference (var), ctx);
16554 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
16555 fb_rvalue);
16556 gimple_seq_add_stmt (&new_body,
16557 gimple_build_assign (new_var, x));
16560 else if (is_variable_sized (var))
16562 tree pvar = DECL_VALUE_EXPR (var);
16563 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16564 pvar = TREE_OPERAND (pvar, 0);
16565 gcc_assert (DECL_P (pvar));
16566 tree new_var = lookup_decl (pvar, ctx);
16567 x = build_receiver_ref (var, false, ctx);
16568 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16569 gimple_seq_add_stmt (&new_body,
16570 gimple_build_assign (new_var, x));
16572 break;
16573 case OMP_CLAUSE_PRIVATE:
16574 if (is_gimple_omp_oacc (ctx->stmt))
16575 break;
16576 var = OMP_CLAUSE_DECL (c);
16577 if (is_reference (var))
16579 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
16580 tree new_var = lookup_decl (var, ctx);
16581 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
16582 if (TREE_CONSTANT (x))
16584 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
16585 get_name (var));
16586 gimple_add_tmp_var (x);
16587 TREE_ADDRESSABLE (x) = 1;
16588 x = build_fold_addr_expr_loc (clause_loc, x);
16590 else
16591 break;
16593 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
16594 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16595 gimple_seq_add_stmt (&new_body,
16596 gimple_build_assign (new_var, x));
16598 break;
16599 case OMP_CLAUSE_USE_DEVICE_PTR:
16600 case OMP_CLAUSE_IS_DEVICE_PTR:
16601 var = OMP_CLAUSE_DECL (c);
16602 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
16603 x = build_sender_ref (var, ctx);
16604 else
16605 x = build_receiver_ref (var, false, ctx);
16606 if (is_variable_sized (var))
16608 tree pvar = DECL_VALUE_EXPR (var);
16609 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16610 pvar = TREE_OPERAND (pvar, 0);
16611 gcc_assert (DECL_P (pvar));
16612 tree new_var = lookup_decl (pvar, ctx);
16613 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16614 gimple_seq_add_stmt (&new_body,
16615 gimple_build_assign (new_var, x));
16617 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
16619 tree new_var = lookup_decl (var, ctx);
16620 new_var = DECL_VALUE_EXPR (new_var);
16621 gcc_assert (TREE_CODE (new_var) == MEM_REF);
16622 new_var = TREE_OPERAND (new_var, 0);
16623 gcc_assert (DECL_P (new_var));
16624 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16625 gimple_seq_add_stmt (&new_body,
16626 gimple_build_assign (new_var, x));
16628 else
16630 tree type = TREE_TYPE (var);
16631 tree new_var = lookup_decl (var, ctx);
16632 if (is_reference (var))
16634 type = TREE_TYPE (type);
16635 if (TREE_CODE (type) != ARRAY_TYPE)
16637 tree v = create_tmp_var_raw (type, get_name (var));
16638 gimple_add_tmp_var (v);
16639 TREE_ADDRESSABLE (v) = 1;
16640 x = fold_convert (type, x);
16641 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
16642 fb_rvalue);
16643 gimple_seq_add_stmt (&new_body,
16644 gimple_build_assign (v, x));
16645 x = build_fold_addr_expr (v);
16648 new_var = DECL_VALUE_EXPR (new_var);
16649 x = fold_convert (TREE_TYPE (new_var), x);
16650 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16651 gimple_seq_add_stmt (&new_body,
16652 gimple_build_assign (new_var, x));
16654 break;
16656 /* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
16657 so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
16658 are already handled. Similarly OMP_CLAUSE_PRIVATE for VLAs
16659 or references to VLAs. */
16660 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
16661 switch (OMP_CLAUSE_CODE (c))
16663 tree var;
16664 default:
16665 break;
16666 case OMP_CLAUSE_MAP:
16667 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
16668 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
16670 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
16671 HOST_WIDE_INT offset = 0;
16672 gcc_assert (prev);
16673 var = OMP_CLAUSE_DECL (c);
16674 if (DECL_P (var)
16675 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
16676 && is_global_var (maybe_lookup_decl_in_outer_ctx (var,
16677 ctx))
16678 && varpool_node::get_create (var)->offloadable)
16679 break;
16680 if (TREE_CODE (var) == INDIRECT_REF
16681 && TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
16682 var = TREE_OPERAND (var, 0);
16683 if (TREE_CODE (var) == COMPONENT_REF)
16685 var = get_addr_base_and_unit_offset (var, &offset);
16686 gcc_assert (var != NULL_TREE && DECL_P (var));
16688 else if (DECL_SIZE (var)
16689 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
16691 tree var2 = DECL_VALUE_EXPR (var);
16692 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
16693 var2 = TREE_OPERAND (var2, 0);
16694 gcc_assert (DECL_P (var2));
16695 var = var2;
16697 tree new_var = lookup_decl (var, ctx), x;
16698 tree type = TREE_TYPE (new_var);
16699 bool is_ref;
16700 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
16701 && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
16702 == COMPONENT_REF))
16704 type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
16705 is_ref = true;
16706 new_var = build2 (MEM_REF, type,
16707 build_fold_addr_expr (new_var),
16708 build_int_cst (build_pointer_type (type),
16709 offset));
16711 else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
16713 type = TREE_TYPE (OMP_CLAUSE_DECL (c));
16714 is_ref = TREE_CODE (type) == REFERENCE_TYPE;
16715 new_var = build2 (MEM_REF, type,
16716 build_fold_addr_expr (new_var),
16717 build_int_cst (build_pointer_type (type),
16718 offset));
16720 else
16721 is_ref = is_reference (var);
16722 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
16723 is_ref = false;
16724 bool ref_to_array = false;
16725 if (is_ref)
16727 type = TREE_TYPE (type);
16728 if (TREE_CODE (type) == ARRAY_TYPE)
16730 type = build_pointer_type (type);
16731 ref_to_array = true;
16734 else if (TREE_CODE (type) == ARRAY_TYPE)
16736 tree decl2 = DECL_VALUE_EXPR (new_var);
16737 gcc_assert (TREE_CODE (decl2) == MEM_REF);
16738 decl2 = TREE_OPERAND (decl2, 0);
16739 gcc_assert (DECL_P (decl2));
16740 new_var = decl2;
16741 type = TREE_TYPE (new_var);
16743 x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
16744 x = fold_convert_loc (clause_loc, type, x);
16745 if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
16747 tree bias = OMP_CLAUSE_SIZE (c);
16748 if (DECL_P (bias))
16749 bias = lookup_decl (bias, ctx);
16750 bias = fold_convert_loc (clause_loc, sizetype, bias);
16751 bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
16752 bias);
16753 x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
16754 TREE_TYPE (x), x, bias);
16756 if (ref_to_array)
16757 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
16758 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16759 if (is_ref && !ref_to_array)
16761 tree t = create_tmp_var_raw (type, get_name (var));
16762 gimple_add_tmp_var (t);
16763 TREE_ADDRESSABLE (t) = 1;
16764 gimple_seq_add_stmt (&new_body,
16765 gimple_build_assign (t, x));
16766 x = build_fold_addr_expr_loc (clause_loc, t);
16768 gimple_seq_add_stmt (&new_body,
16769 gimple_build_assign (new_var, x));
16770 prev = NULL_TREE;
16772 else if (OMP_CLAUSE_CHAIN (c)
16773 && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
16774 == OMP_CLAUSE_MAP
16775 && (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
16776 == GOMP_MAP_FIRSTPRIVATE_POINTER
16777 || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
16778 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
16779 prev = c;
16780 break;
16781 case OMP_CLAUSE_PRIVATE:
16782 var = OMP_CLAUSE_DECL (c);
16783 if (is_variable_sized (var))
16785 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
16786 tree new_var = lookup_decl (var, ctx);
16787 tree pvar = DECL_VALUE_EXPR (var);
16788 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16789 pvar = TREE_OPERAND (pvar, 0);
16790 gcc_assert (DECL_P (pvar));
16791 tree new_pvar = lookup_decl (pvar, ctx);
16792 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
16793 tree al = size_int (DECL_ALIGN (var));
16794 tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
16795 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
16796 x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
16797 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16798 gimple_seq_add_stmt (&new_body,
16799 gimple_build_assign (new_pvar, x));
16801 else if (is_reference (var) && !is_gimple_omp_oacc (ctx->stmt))
16803 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
16804 tree new_var = lookup_decl (var, ctx);
16805 tree x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
16806 if (TREE_CONSTANT (x))
16807 break;
16808 else
16810 tree atmp
16811 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
16812 tree rtype = TREE_TYPE (TREE_TYPE (new_var));
16813 tree al = size_int (TYPE_ALIGN (rtype));
16814 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
16817 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
16818 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16819 gimple_seq_add_stmt (&new_body,
16820 gimple_build_assign (new_var, x));
16822 break;
16825 gimple_seq fork_seq = NULL;
16826 gimple_seq join_seq = NULL;
16828 if (is_oacc_parallel (ctx))
16830 /* If there are reductions on the offloaded region itself, treat
16831 them as a dummy GANG loop. */
16832 tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
16834 lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
16835 false, NULL, NULL, &fork_seq, &join_seq, ctx);
16838 gimple_seq_add_seq (&new_body, fork_seq);
16839 gimple_seq_add_seq (&new_body, tgt_body);
16840 gimple_seq_add_seq (&new_body, join_seq);
16842 if (offloaded)
16843 new_body = maybe_catch_exception (new_body);
16845 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
16846 gimple_omp_set_body (stmt, new_body);
16849 bind = gimple_build_bind (NULL, NULL,
16850 tgt_bind ? gimple_bind_block (tgt_bind)
16851 : NULL_TREE);
16852 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
16853 gimple_bind_add_seq (bind, ilist);
16854 gimple_bind_add_stmt (bind, stmt);
16855 gimple_bind_add_seq (bind, olist);
16857 pop_gimplify_context (NULL);
16859 if (dep_bind)
16861 gimple_bind_add_seq (dep_bind, dep_ilist);
16862 gimple_bind_add_stmt (dep_bind, bind);
16863 gimple_bind_add_seq (dep_bind, dep_olist);
16864 pop_gimplify_context (dep_bind);
16868 /* Expand code for an OpenMP teams directive. */
16870 static void
16871 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
16873 gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
16874 push_gimplify_context ();
16876 tree block = make_node (BLOCK);
16877 gbind *bind = gimple_build_bind (NULL, NULL, block);
16878 gsi_replace (gsi_p, bind, true);
16879 gimple_seq bind_body = NULL;
16880 gimple_seq dlist = NULL;
16881 gimple_seq olist = NULL;
16883 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
16884 OMP_CLAUSE_NUM_TEAMS);
16885 if (num_teams == NULL_TREE)
16886 num_teams = build_int_cst (unsigned_type_node, 0);
16887 else
16889 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
16890 num_teams = fold_convert (unsigned_type_node, num_teams);
16891 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
16893 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
16894 OMP_CLAUSE_THREAD_LIMIT);
16895 if (thread_limit == NULL_TREE)
16896 thread_limit = build_int_cst (unsigned_type_node, 0);
16897 else
16899 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
16900 thread_limit = fold_convert (unsigned_type_node, thread_limit);
16901 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
16902 fb_rvalue);
16905 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
16906 &bind_body, &dlist, ctx, NULL);
16907 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
16908 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
16909 if (!gimple_omp_teams_grid_phony (teams_stmt))
16911 gimple_seq_add_stmt (&bind_body, teams_stmt);
16912 location_t loc = gimple_location (teams_stmt);
16913 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
16914 gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
16915 gimple_set_location (call, loc);
16916 gimple_seq_add_stmt (&bind_body, call);
16919 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
16920 gimple_omp_set_body (teams_stmt, NULL);
16921 gimple_seq_add_seq (&bind_body, olist);
16922 gimple_seq_add_seq (&bind_body, dlist);
16923 if (!gimple_omp_teams_grid_phony (teams_stmt))
16924 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
16925 gimple_bind_set_body (bind, bind_body);
16927 pop_gimplify_context (bind);
16929 gimple_bind_append_vars (bind, ctx->block_vars);
16930 BLOCK_VARS (block) = ctx->block_vars;
16931 if (BLOCK_VARS (block))
16932 TREE_USED (block) = 1;
16935 /* Expand code within an artificial GIMPLE_OMP_GRID_BODY OMP construct. */
16937 static void
16938 lower_omp_grid_body (gimple_stmt_iterator *gsi_p, omp_context *ctx)
16940 gimple *stmt = gsi_stmt (*gsi_p);
16941 lower_omp (gimple_omp_body_ptr (stmt), ctx);
16942 gimple_seq_add_stmt (gimple_omp_body_ptr (stmt),
16943 gimple_build_omp_return (false));
16947 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
16948 regimplified. If DATA is non-NULL, lower_omp_1 is outside
16949 of OMP context, but with task_shared_vars set. */
16951 static tree
16952 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
16953 void *data)
16955 tree t = *tp;
16957 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
16958 if (VAR_P (t) && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
16959 return t;
16961 if (task_shared_vars
16962 && DECL_P (t)
16963 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
16964 return t;
16966 /* If a global variable has been privatized, TREE_CONSTANT on
16967 ADDR_EXPR might be wrong. */
16968 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
16969 recompute_tree_invariant_for_addr_expr (t);
16971 *walk_subtrees = !IS_TYPE_OR_DECL_P (t);
16972 return NULL_TREE;
16975 /* Data to be communicated between lower_omp_regimplify_operands and
16976 lower_omp_regimplify_operands_p. */
16978 struct lower_omp_regimplify_operands_data
16980 omp_context *ctx;
16981 vec<tree> *decls;
16984 /* Helper function for lower_omp_regimplify_operands. Find
16985 omp_member_access_dummy_var vars and adjust temporarily their
16986 DECL_VALUE_EXPRs if needed. */
16988 static tree
16989 lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
16990 void *data)
16992 tree t = omp_member_access_dummy_var (*tp);
16993 if (t)
16995 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
16996 lower_omp_regimplify_operands_data *ldata
16997 = (lower_omp_regimplify_operands_data *) wi->info;
16998 tree o = maybe_lookup_decl (t, ldata->ctx);
16999 if (o != t)
17001 ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
17002 ldata->decls->safe_push (*tp);
17003 tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
17004 SET_DECL_VALUE_EXPR (*tp, v);
17007 *walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
17008 return NULL_TREE;
17011 /* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
17012 of omp_member_access_dummy_var vars during regimplification. */
17014 static void
17015 lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
17016 gimple_stmt_iterator *gsi_p)
17018 auto_vec<tree, 10> decls;
17019 if (ctx)
17021 struct walk_stmt_info wi;
17022 memset (&wi, '\0', sizeof (wi));
17023 struct lower_omp_regimplify_operands_data data;
17024 data.ctx = ctx;
17025 data.decls = &decls;
17026 wi.info = &data;
17027 walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
17029 gimple_regimplify_operands (stmt, gsi_p);
17030 while (!decls.is_empty ())
17032 tree t = decls.pop ();
17033 tree v = decls.pop ();
17034 SET_DECL_VALUE_EXPR (t, v);
17038 static void
17039 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
17041 gimple *stmt = gsi_stmt (*gsi_p);
17042 struct walk_stmt_info wi;
17043 gcall *call_stmt;
17045 if (gimple_has_location (stmt))
17046 input_location = gimple_location (stmt);
17048 if (task_shared_vars)
17049 memset (&wi, '\0', sizeof (wi));
17051 /* If we have issued syntax errors, avoid doing any heavy lifting.
17052 Just replace the OMP directives with a NOP to avoid
17053 confusing RTL expansion. */
17054 if (seen_error () && is_gimple_omp (stmt))
17056 gsi_replace (gsi_p, gimple_build_nop (), true);
17057 return;
17060 switch (gimple_code (stmt))
17062 case GIMPLE_COND:
17064 gcond *cond_stmt = as_a <gcond *> (stmt);
17065 if ((ctx || task_shared_vars)
17066 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
17067 lower_omp_regimplify_p,
17068 ctx ? NULL : &wi, NULL)
17069 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
17070 lower_omp_regimplify_p,
17071 ctx ? NULL : &wi, NULL)))
17072 lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
17074 break;
17075 case GIMPLE_CATCH:
17076 lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
17077 break;
17078 case GIMPLE_EH_FILTER:
17079 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
17080 break;
17081 case GIMPLE_TRY:
17082 lower_omp (gimple_try_eval_ptr (stmt), ctx);
17083 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
17084 break;
17085 case GIMPLE_TRANSACTION:
17086 lower_omp (gimple_transaction_body_ptr (
17087 as_a <gtransaction *> (stmt)),
17088 ctx);
17089 break;
17090 case GIMPLE_BIND:
17091 lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
17092 break;
17093 case GIMPLE_OMP_PARALLEL:
17094 case GIMPLE_OMP_TASK:
17095 ctx = maybe_lookup_ctx (stmt);
17096 gcc_assert (ctx);
17097 if (ctx->cancellable)
17098 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
17099 lower_omp_taskreg (gsi_p, ctx);
17100 break;
17101 case GIMPLE_OMP_FOR:
17102 ctx = maybe_lookup_ctx (stmt);
17103 gcc_assert (ctx);
17104 if (ctx->cancellable)
17105 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
17106 lower_omp_for (gsi_p, ctx);
17107 break;
17108 case GIMPLE_OMP_SECTIONS:
17109 ctx = maybe_lookup_ctx (stmt);
17110 gcc_assert (ctx);
17111 if (ctx->cancellable)
17112 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
17113 lower_omp_sections (gsi_p, ctx);
17114 break;
17115 case GIMPLE_OMP_SINGLE:
17116 ctx = maybe_lookup_ctx (stmt);
17117 gcc_assert (ctx);
17118 lower_omp_single (gsi_p, ctx);
17119 break;
17120 case GIMPLE_OMP_MASTER:
17121 ctx = maybe_lookup_ctx (stmt);
17122 gcc_assert (ctx);
17123 lower_omp_master (gsi_p, ctx);
17124 break;
17125 case GIMPLE_OMP_TASKGROUP:
17126 ctx = maybe_lookup_ctx (stmt);
17127 gcc_assert (ctx);
17128 lower_omp_taskgroup (gsi_p, ctx);
17129 break;
17130 case GIMPLE_OMP_ORDERED:
17131 ctx = maybe_lookup_ctx (stmt);
17132 gcc_assert (ctx);
17133 lower_omp_ordered (gsi_p, ctx);
17134 break;
17135 case GIMPLE_OMP_CRITICAL:
17136 ctx = maybe_lookup_ctx (stmt);
17137 gcc_assert (ctx);
17138 lower_omp_critical (gsi_p, ctx);
17139 break;
17140 case GIMPLE_OMP_ATOMIC_LOAD:
17141 if ((ctx || task_shared_vars)
17142 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
17143 as_a <gomp_atomic_load *> (stmt)),
17144 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
17145 lower_omp_regimplify_operands (ctx, stmt, gsi_p);
17146 break;
17147 case GIMPLE_OMP_TARGET:
17148 ctx = maybe_lookup_ctx (stmt);
17149 gcc_assert (ctx);
17150 lower_omp_target (gsi_p, ctx);
17151 break;
17152 case GIMPLE_OMP_TEAMS:
17153 ctx = maybe_lookup_ctx (stmt);
17154 gcc_assert (ctx);
17155 lower_omp_teams (gsi_p, ctx);
17156 break;
17157 case GIMPLE_OMP_GRID_BODY:
17158 ctx = maybe_lookup_ctx (stmt);
17159 gcc_assert (ctx);
17160 lower_omp_grid_body (gsi_p, ctx);
17161 break;
17162 case GIMPLE_CALL:
17163 tree fndecl;
17164 call_stmt = as_a <gcall *> (stmt);
17165 fndecl = gimple_call_fndecl (call_stmt);
17166 if (fndecl
17167 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
17168 switch (DECL_FUNCTION_CODE (fndecl))
17170 case BUILT_IN_GOMP_BARRIER:
17171 if (ctx == NULL)
17172 break;
17173 /* FALLTHRU */
17174 case BUILT_IN_GOMP_CANCEL:
17175 case BUILT_IN_GOMP_CANCELLATION_POINT:
17176 omp_context *cctx;
17177 cctx = ctx;
17178 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
17179 cctx = cctx->outer;
17180 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
17181 if (!cctx->cancellable)
17183 if (DECL_FUNCTION_CODE (fndecl)
17184 == BUILT_IN_GOMP_CANCELLATION_POINT)
17186 stmt = gimple_build_nop ();
17187 gsi_replace (gsi_p, stmt, false);
17189 break;
17191 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
17193 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
17194 gimple_call_set_fndecl (call_stmt, fndecl);
17195 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
17197 tree lhs;
17198 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
17199 gimple_call_set_lhs (call_stmt, lhs);
17200 tree fallthru_label;
17201 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
17202 gimple *g;
17203 g = gimple_build_label (fallthru_label);
17204 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
17205 g = gimple_build_cond (NE_EXPR, lhs,
17206 fold_convert (TREE_TYPE (lhs),
17207 boolean_false_node),
17208 cctx->cancel_label, fallthru_label);
17209 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
17210 break;
17211 default:
17212 break;
17214 /* FALLTHRU */
17215 default:
17216 if ((ctx || task_shared_vars)
17217 && walk_gimple_op (stmt, lower_omp_regimplify_p,
17218 ctx ? NULL : &wi))
17220 /* Just remove clobbers, this should happen only if we have
17221 "privatized" local addressable variables in SIMD regions,
17222 the clobber isn't needed in that case and gimplifying address
17223 of the ARRAY_REF into a pointer and creating MEM_REF based
17224 clobber would create worse code than we get with the clobber
17225 dropped. */
17226 if (gimple_clobber_p (stmt))
17228 gsi_replace (gsi_p, gimple_build_nop (), true);
17229 break;
17231 lower_omp_regimplify_operands (ctx, stmt, gsi_p);
17233 break;
17237 static void
17238 lower_omp (gimple_seq *body, omp_context *ctx)
17240 location_t saved_location = input_location;
17241 gimple_stmt_iterator gsi;
17242 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
17243 lower_omp_1 (&gsi, ctx);
17244 /* During gimplification, we haven't folded statments inside offloading
17245 or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
17246 if (target_nesting_level || taskreg_nesting_level)
17247 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
17248 fold_stmt (&gsi);
17249 input_location = saved_location;
17252 /* Returen true if STMT is an assignment of a register-type into a local
17253 VAR_DECL. */
17255 static bool
17256 grid_reg_assignment_to_local_var_p (gimple *stmt)
17258 gassign *assign = dyn_cast <gassign *> (stmt);
17259 if (!assign)
17260 return false;
17261 tree lhs = gimple_assign_lhs (assign);
17262 if (!VAR_P (lhs)
17263 || !is_gimple_reg_type (TREE_TYPE (lhs))
17264 || is_global_var (lhs))
17265 return false;
17266 return true;
17269 /* Return true if all statements in SEQ are assignments to local register-type
17270 variables. */
17272 static bool
17273 grid_seq_only_contains_local_assignments (gimple_seq seq)
17275 if (!seq)
17276 return true;
17278 gimple_stmt_iterator gsi;
17279 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
17280 if (!grid_reg_assignment_to_local_var_p (gsi_stmt (gsi)))
17281 return false;
17282 return true;
17285 /* Scan statements in SEQ and call itself recursively on any bind. If during
17286 whole search only assignments to register-type local variables and one
17287 single OMP statement is encountered, return true, otherwise return false.
17288 RET is where we store any OMP statement encountered. TARGET_LOC and NAME
17289 are used for dumping a note about a failure. */
17291 static bool
17292 grid_find_single_omp_among_assignments_1 (gimple_seq seq, location_t target_loc,
17293 const char *name, gimple **ret)
17295 gimple_stmt_iterator gsi;
17296 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
17298 gimple *stmt = gsi_stmt (gsi);
17300 if (grid_reg_assignment_to_local_var_p (stmt))
17301 continue;
17302 if (gbind *bind = dyn_cast <gbind *> (stmt))
17304 if (!grid_find_single_omp_among_assignments_1 (gimple_bind_body (bind),
17305 target_loc, name, ret))
17306 return false;
17308 else if (is_gimple_omp (stmt))
17310 if (*ret)
17312 if (dump_enabled_p ())
17313 dump_printf_loc (MSG_NOTE, target_loc,
17314 "Will not turn target construct into a simple "
17315 "GPGPU kernel because %s construct contains "
17316 "multiple OpenMP constructs\n", name);
17317 return false;
17319 *ret = stmt;
17321 else
17323 if (dump_enabled_p ())
17324 dump_printf_loc (MSG_NOTE, target_loc,
17325 "Will not turn target construct into a simple "
17326 "GPGPU kernel because %s construct contains "
17327 "a complex statement\n", name);
17328 return false;
17331 return true;
17334 /* Scan statements in SEQ and make sure that it and any binds in it contain
17335 only assignments to local register-type variables and one OMP construct. If
17336 so, return that construct, otherwise return NULL. If dumping is enabled and
17337 function fails, use TARGET_LOC and NAME to dump a note with the reason for
17338 failure. */
17340 static gimple *
17341 grid_find_single_omp_among_assignments (gimple_seq seq, location_t target_loc,
17342 const char *name)
17344 if (!seq)
17346 if (dump_enabled_p ())
17347 dump_printf_loc (MSG_NOTE, target_loc,
17348 "Will not turn target construct into a simple "
17349 "GPGPU kernel because %s construct has empty "
17350 "body\n",
17351 name);
17352 return NULL;
17355 gimple *ret = NULL;
17356 if (grid_find_single_omp_among_assignments_1 (seq, target_loc, name, &ret))
17358 if (!ret && dump_enabled_p ())
17359 dump_printf_loc (MSG_NOTE, target_loc,
17360 "Will not turn target construct into a simple "
17361 "GPGPU kernel because %s construct does not contain"
17362 "any other OpenMP construct\n", name);
17363 return ret;
17365 else
17366 return NULL;
17369 /* Walker function looking for statements there is no point gridifying (and for
17370 noreturn function calls which we cannot do). Return non-NULL if such a
17371 function is found. */
17373 static tree
17374 grid_find_ungridifiable_statement (gimple_stmt_iterator *gsi,
17375 bool *handled_ops_p,
17376 struct walk_stmt_info *wi)
17378 *handled_ops_p = false;
17379 gimple *stmt = gsi_stmt (*gsi);
17380 switch (gimple_code (stmt))
17382 case GIMPLE_CALL:
17383 if (gimple_call_noreturn_p (as_a <gcall *> (stmt)))
17385 *handled_ops_p = true;
17386 wi->info = stmt;
17387 return error_mark_node;
17389 break;
17391 /* We may reduce the following list if we find a way to implement the
17392 clauses, but now there is no point trying further. */
17393 case GIMPLE_OMP_CRITICAL:
17394 case GIMPLE_OMP_TASKGROUP:
17395 case GIMPLE_OMP_TASK:
17396 case GIMPLE_OMP_SECTION:
17397 case GIMPLE_OMP_SECTIONS:
17398 case GIMPLE_OMP_SECTIONS_SWITCH:
17399 case GIMPLE_OMP_TARGET:
17400 case GIMPLE_OMP_ORDERED:
17401 *handled_ops_p = true;
17402 wi->info = stmt;
17403 return error_mark_node;
17405 case GIMPLE_OMP_FOR:
17406 if ((gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
17407 && gimple_omp_for_combined_into_p (stmt))
17409 *handled_ops_p = true;
17410 wi->info = stmt;
17411 return error_mark_node;
17413 break;
17415 default:
17416 break;
17418 return NULL;
17422 /* If TARGET follows a pattern that can be turned into a gridified GPGPU
17423 kernel, return true, otherwise return false. In the case of success, also
17424 fill in GROUP_SIZE_P with the requested group size or NULL if there is
17425 none. */
17427 static bool
17428 grid_target_follows_gridifiable_pattern (gomp_target *target, tree *group_size_p)
17430 if (gimple_omp_target_kind (target) != GF_OMP_TARGET_KIND_REGION)
17431 return false;
17433 location_t tloc = gimple_location (target);
17434 gimple *stmt
17435 = grid_find_single_omp_among_assignments (gimple_omp_body (target),
17436 tloc, "target");
17437 if (!stmt)
17438 return false;
17439 gomp_teams *teams = dyn_cast <gomp_teams *> (stmt);
17440 tree group_size = NULL;
17441 if (!teams)
17443 dump_printf_loc (MSG_NOTE, tloc,
17444 "Will not turn target construct into a simple "
17445 "GPGPU kernel because it does not have a sole teams "
17446 "construct in it.\n");
17447 return false;
17450 tree clauses = gimple_omp_teams_clauses (teams);
17451 while (clauses)
17453 switch (OMP_CLAUSE_CODE (clauses))
17455 case OMP_CLAUSE_NUM_TEAMS:
17456 if (dump_enabled_p ())
17457 dump_printf_loc (MSG_NOTE, tloc,
17458 "Will not turn target construct into a "
17459 "gridified GPGPU kernel because we cannot "
17460 "handle num_teams clause of teams "
17461 "construct\n ");
17462 return false;
17464 case OMP_CLAUSE_REDUCTION:
17465 if (dump_enabled_p ())
17466 dump_printf_loc (MSG_NOTE, tloc,
17467 "Will not turn target construct into a "
17468 "gridified GPGPU kernel because a reduction "
17469 "clause is present\n ");
17470 return false;
17472 case OMP_CLAUSE_LASTPRIVATE:
17473 if (dump_enabled_p ())
17474 dump_printf_loc (MSG_NOTE, tloc,
17475 "Will not turn target construct into a "
17476 "gridified GPGPU kernel because a lastprivate "
17477 "clause is present\n ");
17478 return false;
17480 case OMP_CLAUSE_THREAD_LIMIT:
17481 group_size = OMP_CLAUSE_OPERAND (clauses, 0);
17482 break;
17484 default:
17485 break;
17487 clauses = OMP_CLAUSE_CHAIN (clauses);
17490 stmt = grid_find_single_omp_among_assignments (gimple_omp_body (teams), tloc,
17491 "teams");
17492 if (!stmt)
17493 return false;
17494 gomp_for *dist = dyn_cast <gomp_for *> (stmt);
17495 if (!dist)
17497 dump_printf_loc (MSG_NOTE, tloc,
17498 "Will not turn target construct into a simple "
17499 "GPGPU kernel because the teams construct does not have "
17500 "a sole distribute construct in it.\n");
17501 return false;
17504 gcc_assert (gimple_omp_for_kind (dist) == GF_OMP_FOR_KIND_DISTRIBUTE);
17505 if (!gimple_omp_for_combined_p (dist))
17507 if (dump_enabled_p ())
17508 dump_printf_loc (MSG_NOTE, tloc,
17509 "Will not turn target construct into a gridified GPGPU "
17510 "kernel because we cannot handle a standalone "
17511 "distribute construct\n ");
17512 return false;
17514 if (dist->collapse > 1)
17516 if (dump_enabled_p ())
17517 dump_printf_loc (MSG_NOTE, tloc,
17518 "Will not turn target construct into a gridified GPGPU "
17519 "kernel because the distribute construct contains "
17520 "collapse clause\n");
17521 return false;
17523 struct omp_for_data fd;
17524 extract_omp_for_data (dist, &fd, NULL);
17525 if (fd.chunk_size)
17527 if (group_size && !operand_equal_p (group_size, fd.chunk_size, 0))
17529 if (dump_enabled_p ())
17530 dump_printf_loc (MSG_NOTE, tloc,
17531 "Will not turn target construct into a "
17532 "gridified GPGPU kernel because the teams "
17533 "thread limit is different from distribute "
17534 "schedule chunk\n");
17535 return false;
17537 group_size = fd.chunk_size;
17539 stmt = grid_find_single_omp_among_assignments (gimple_omp_body (dist), tloc,
17540 "distribute");
17541 gomp_parallel *par;
17542 if (!stmt || !(par = dyn_cast <gomp_parallel *> (stmt)))
17543 return false;
17545 clauses = gimple_omp_parallel_clauses (par);
17546 while (clauses)
17548 switch (OMP_CLAUSE_CODE (clauses))
17550 case OMP_CLAUSE_NUM_THREADS:
17551 if (dump_enabled_p ())
17552 dump_printf_loc (MSG_NOTE, tloc,
17553 "Will not turn target construct into a gridified"
17554 "GPGPU kernel because there is a num_threads "
17555 "clause of the parallel construct\n");
17556 return false;
17558 case OMP_CLAUSE_REDUCTION:
17559 if (dump_enabled_p ())
17560 dump_printf_loc (MSG_NOTE, tloc,
17561 "Will not turn target construct into a "
17562 "gridified GPGPU kernel because a reduction "
17563 "clause is present\n ");
17564 return false;
17566 case OMP_CLAUSE_LASTPRIVATE:
17567 if (dump_enabled_p ())
17568 dump_printf_loc (MSG_NOTE, tloc,
17569 "Will not turn target construct into a "
17570 "gridified GPGPU kernel because a lastprivate "
17571 "clause is present\n ");
17572 return false;
17574 default:
17575 break;
17577 clauses = OMP_CLAUSE_CHAIN (clauses);
17580 stmt = grid_find_single_omp_among_assignments (gimple_omp_body (par), tloc,
17581 "parallel");
17582 gomp_for *gfor;
17583 if (!stmt || !(gfor = dyn_cast <gomp_for *> (stmt)))
17584 return false;
17586 if (gimple_omp_for_kind (gfor) != GF_OMP_FOR_KIND_FOR)
17588 if (dump_enabled_p ())
17589 dump_printf_loc (MSG_NOTE, tloc,
17590 "Will not turn target construct into a gridified GPGPU "
17591 "kernel because the inner loop is not a simple for "
17592 "loop\n");
17593 return false;
17595 if (gfor->collapse > 1)
17597 if (dump_enabled_p ())
17598 dump_printf_loc (MSG_NOTE, tloc,
17599 "Will not turn target construct into a gridified GPGPU "
17600 "kernel because the inner loop contains collapse "
17601 "clause\n");
17602 return false;
17605 if (!grid_seq_only_contains_local_assignments (gimple_omp_for_pre_body (gfor)))
17607 if (dump_enabled_p ())
17608 dump_printf_loc (MSG_NOTE, tloc,
17609 "Will not turn target construct into a gridified GPGPU "
17610 "kernel because the inner loop pre_body contains"
17611 "a complex instruction\n");
17612 return false;
17615 clauses = gimple_omp_for_clauses (gfor);
17616 while (clauses)
17618 switch (OMP_CLAUSE_CODE (clauses))
17620 case OMP_CLAUSE_SCHEDULE:
17621 if (OMP_CLAUSE_SCHEDULE_KIND (clauses) != OMP_CLAUSE_SCHEDULE_AUTO)
17623 if (dump_enabled_p ())
17624 dump_printf_loc (MSG_NOTE, tloc,
17625 "Will not turn target construct into a "
17626 "gridified GPGPU kernel because the inner "
17627 "loop has a non-automatic scheduling clause\n");
17628 return false;
17630 break;
17632 case OMP_CLAUSE_REDUCTION:
17633 if (dump_enabled_p ())
17634 dump_printf_loc (MSG_NOTE, tloc,
17635 "Will not turn target construct into a "
17636 "gridified GPGPU kernel because a reduction "
17637 "clause is present\n ");
17638 return false;
17640 case OMP_CLAUSE_LASTPRIVATE:
17641 if (dump_enabled_p ())
17642 dump_printf_loc (MSG_NOTE, tloc,
17643 "Will not turn target construct into a "
17644 "gridified GPGPU kernel because a lastprivate "
17645 "clause is present\n ");
17646 return false;
17648 default:
17649 break;
17651 clauses = OMP_CLAUSE_CHAIN (clauses);
17654 struct walk_stmt_info wi;
17655 memset (&wi, 0, sizeof (wi));
17656 if (walk_gimple_seq (gimple_omp_body (gfor),
17657 grid_find_ungridifiable_statement,
17658 NULL, &wi))
17660 gimple *bad = (gimple *) wi.info;
17661 if (dump_enabled_p ())
17663 if (is_gimple_call (bad))
17664 dump_printf_loc (MSG_NOTE, tloc,
17665 "Will not turn target construct into a gridified "
17666 " GPGPU kernel because the inner loop contains "
17667 "call to a noreturn function\n");
17668 if (gimple_code (bad) == GIMPLE_OMP_FOR)
17669 dump_printf_loc (MSG_NOTE, tloc,
17670 "Will not turn target construct into a gridified "
17671 " GPGPU kernel because the inner loop contains "
17672 "a simd construct\n");
17673 else
17674 dump_printf_loc (MSG_NOTE, tloc,
17675 "Will not turn target construct into a gridified "
17676 "GPGPU kernel because the inner loop contains "
17677 "statement %s which cannot be transformed\n",
17678 gimple_code_name[(int) gimple_code (bad)]);
17680 return false;
17683 *group_size_p = group_size;
17684 return true;
17687 /* Operand walker, used to remap pre-body declarations according to a hash map
17688 provided in DATA. */
17690 static tree
17691 grid_remap_prebody_decls (tree *tp, int *walk_subtrees, void *data)
17693 tree t = *tp;
17695 if (DECL_P (t) || TYPE_P (t))
17696 *walk_subtrees = 0;
17697 else
17698 *walk_subtrees = 1;
17700 if (VAR_P (t))
17702 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
17703 hash_map<tree, tree> *declmap = (hash_map<tree, tree> *) wi->info;
17704 tree *repl = declmap->get (t);
17705 if (repl)
17706 *tp = *repl;
17708 return NULL_TREE;
17711 /* Copy leading register-type assignments to local variables in SRC to just
17712 before DST, Creating temporaries, adjusting mapping of operands in WI and
17713 remapping operands as necessary. Add any new temporaries to TGT_BIND.
17714 Return the first statement that does not conform to
17715 grid_reg_assignment_to_local_var_p or NULL. */
17717 static gimple *
17718 grid_copy_leading_local_assignments (gimple_seq src, gimple_stmt_iterator *dst,
17719 gbind *tgt_bind, struct walk_stmt_info *wi)
17721 hash_map<tree, tree> *declmap = (hash_map<tree, tree> *) wi->info;
17722 gimple_stmt_iterator gsi;
17723 for (gsi = gsi_start (src); !gsi_end_p (gsi); gsi_next (&gsi))
17725 gimple *stmt = gsi_stmt (gsi);
17726 if (gbind *bind = dyn_cast <gbind *> (stmt))
17728 gimple *r = grid_copy_leading_local_assignments
17729 (gimple_bind_body (bind), dst, tgt_bind, wi);
17730 if (r)
17731 return r;
17732 else
17733 continue;
17735 if (!grid_reg_assignment_to_local_var_p (stmt))
17736 return stmt;
17737 tree lhs = gimple_assign_lhs (as_a <gassign *> (stmt));
17738 tree repl = copy_var_decl (lhs, create_tmp_var_name (NULL),
17739 TREE_TYPE (lhs));
17740 DECL_CONTEXT (repl) = current_function_decl;
17741 gimple_bind_append_vars (tgt_bind, repl);
17743 declmap->put (lhs, repl);
17744 gassign *copy = as_a <gassign *> (gimple_copy (stmt));
17745 walk_gimple_op (copy, grid_remap_prebody_decls, wi);
17746 gsi_insert_before (dst, copy, GSI_SAME_STMT);
17748 return NULL;
17751 /* Given freshly copied top level kernel SEQ, identify the individual OMP
17752 components, mark them as part of kernel and return the inner loop, and copy
17753 assignment leading to them just before DST, remapping them using WI and
17754 adding new temporaries to TGT_BIND. */
17756 static gomp_for *
17757 grid_process_kernel_body_copy (gimple_seq seq, gimple_stmt_iterator *dst,
17758 gbind *tgt_bind, struct walk_stmt_info *wi)
17760 gimple *stmt = grid_copy_leading_local_assignments (seq, dst, tgt_bind, wi);
17761 gomp_teams *teams = dyn_cast <gomp_teams *> (stmt);
17762 gcc_assert (teams);
17763 gimple_omp_teams_set_grid_phony (teams, true);
17764 stmt = grid_copy_leading_local_assignments (gimple_omp_body (teams), dst,
17765 tgt_bind, wi);
17766 gcc_checking_assert (stmt);
17767 gomp_for *dist = dyn_cast <gomp_for *> (stmt);
17768 gcc_assert (dist);
17769 gimple_seq prebody = gimple_omp_for_pre_body (dist);
17770 if (prebody)
17771 grid_copy_leading_local_assignments (prebody, dst, tgt_bind, wi);
17772 gimple_omp_for_set_grid_phony (dist, true);
17773 stmt = grid_copy_leading_local_assignments (gimple_omp_body (dist), dst,
17774 tgt_bind, wi);
17775 gcc_checking_assert (stmt);
17777 gomp_parallel *parallel = as_a <gomp_parallel *> (stmt);
17778 gimple_omp_parallel_set_grid_phony (parallel, true);
17779 stmt = grid_copy_leading_local_assignments (gimple_omp_body (parallel), dst,
17780 tgt_bind, wi);
17781 gomp_for *inner_loop = as_a <gomp_for *> (stmt);
17782 gimple_omp_for_set_kind (inner_loop, GF_OMP_FOR_KIND_GRID_LOOP);
17783 prebody = gimple_omp_for_pre_body (inner_loop);
17784 if (prebody)
17785 grid_copy_leading_local_assignments (prebody, dst, tgt_bind, wi);
17787 return inner_loop;
17790 /* If TARGET points to a GOMP_TARGET which follows a gridifiable pattern,
17791 create a GPU kernel for it. GSI must point to the same statement, TGT_BIND
17792 is the bind into which temporaries inserted before TARGET should be
17793 added. */
17795 static void
17796 grid_attempt_target_gridification (gomp_target *target,
17797 gimple_stmt_iterator *gsi,
17798 gbind *tgt_bind)
17800 tree group_size;
17801 if (!target || !grid_target_follows_gridifiable_pattern (target, &group_size))
17802 return;
17804 location_t loc = gimple_location (target);
17805 if (dump_enabled_p ())
17806 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc,
17807 "Target construct will be turned into a gridified GPGPU "
17808 "kernel\n");
17810 /* Copy target body to a GPUKERNEL construct: */
17811 gimple_seq kernel_seq = copy_gimple_seq_and_replace_locals
17812 (gimple_omp_body (target));
17814 hash_map<tree, tree> *declmap = new hash_map<tree, tree>;
17815 struct walk_stmt_info wi;
17816 memset (&wi, 0, sizeof (struct walk_stmt_info));
17817 wi.info = declmap;
17819 /* Copy assignments in between OMP statements before target, mark OMP
17820 statements within copy appropriatly. */
17821 gomp_for *inner_loop = grid_process_kernel_body_copy (kernel_seq, gsi,
17822 tgt_bind, &wi);
17824 gbind *old_bind = as_a <gbind *> (gimple_seq_first (gimple_omp_body (target)));
17825 gbind *new_bind = as_a <gbind *> (gimple_seq_first (kernel_seq));
17826 tree new_block = gimple_bind_block (new_bind);
17827 tree enc_block = BLOCK_SUPERCONTEXT (gimple_bind_block (old_bind));
17828 BLOCK_CHAIN (new_block) = BLOCK_SUBBLOCKS (enc_block);
17829 BLOCK_SUBBLOCKS (enc_block) = new_block;
17830 BLOCK_SUPERCONTEXT (new_block) = enc_block;
17831 gimple *gpukernel = gimple_build_omp_grid_body (kernel_seq);
17832 gimple_seq_add_stmt
17833 (gimple_bind_body_ptr (as_a <gbind *> (gimple_omp_body (target))),
17834 gpukernel);
17836 walk_tree (&group_size, grid_remap_prebody_decls, &wi, NULL);
17837 push_gimplify_context ();
17838 size_t collapse = gimple_omp_for_collapse (inner_loop);
17839 for (size_t i = 0; i < collapse; i++)
17841 tree itype, type = TREE_TYPE (gimple_omp_for_index (inner_loop, i));
17842 if (POINTER_TYPE_P (type))
17843 itype = signed_type_for (type);
17844 else
17845 itype = type;
17847 enum tree_code cond_code = gimple_omp_for_cond (inner_loop, i);
17848 tree n1 = unshare_expr (gimple_omp_for_initial (inner_loop, i));
17849 walk_tree (&n1, grid_remap_prebody_decls, &wi, NULL);
17850 tree n2 = unshare_expr (gimple_omp_for_final (inner_loop, i));
17851 walk_tree (&n2, grid_remap_prebody_decls, &wi, NULL);
17852 adjust_for_condition (loc, &cond_code, &n2);
17853 tree step;
17854 step = get_omp_for_step_from_incr (loc,
17855 gimple_omp_for_incr (inner_loop, i));
17856 gimple_seq tmpseq = NULL;
17857 n1 = fold_convert (itype, n1);
17858 n2 = fold_convert (itype, n2);
17859 tree t = build_int_cst (itype, (cond_code == LT_EXPR ? -1 : 1));
17860 t = fold_build2 (PLUS_EXPR, itype, step, t);
17861 t = fold_build2 (PLUS_EXPR, itype, t, n2);
17862 t = fold_build2 (MINUS_EXPR, itype, t, n1);
17863 if (TYPE_UNSIGNED (itype) && cond_code == GT_EXPR)
17864 t = fold_build2 (TRUNC_DIV_EXPR, itype,
17865 fold_build1 (NEGATE_EXPR, itype, t),
17866 fold_build1 (NEGATE_EXPR, itype, step));
17867 else
17868 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
17869 tree gs = fold_convert (uint32_type_node, t);
17870 gimplify_expr (&gs, &tmpseq, NULL, is_gimple_val, fb_rvalue);
17871 if (!gimple_seq_empty_p (tmpseq))
17872 gsi_insert_seq_before (gsi, tmpseq, GSI_SAME_STMT);
17874 tree ws;
17875 if (i == 0 && group_size)
17877 ws = fold_convert (uint32_type_node, group_size);
17878 tmpseq = NULL;
17879 gimplify_expr (&ws, &tmpseq, NULL, is_gimple_val, fb_rvalue);
17880 if (!gimple_seq_empty_p (tmpseq))
17881 gsi_insert_seq_before (gsi, tmpseq, GSI_SAME_STMT);
17883 else
17884 ws = build_zero_cst (uint32_type_node);
17886 tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__GRIDDIM_);
17887 OMP_CLAUSE__GRIDDIM__DIMENSION (c) = i;
17888 OMP_CLAUSE__GRIDDIM__SIZE (c) = gs;
17889 OMP_CLAUSE__GRIDDIM__GROUP (c) = ws;
17890 OMP_CLAUSE_CHAIN (c) = gimple_omp_target_clauses (target);
17891 gimple_omp_target_set_clauses (target, c);
17893 pop_gimplify_context (tgt_bind);
17894 delete declmap;
17895 return;
17898 /* Walker function doing all the work for create_target_kernels. */
17900 static tree
17901 grid_gridify_all_targets_stmt (gimple_stmt_iterator *gsi,
17902 bool *handled_ops_p,
17903 struct walk_stmt_info *incoming)
17905 *handled_ops_p = false;
17907 gimple *stmt = gsi_stmt (*gsi);
17908 gomp_target *target = dyn_cast <gomp_target *> (stmt);
17909 if (target)
17911 gbind *tgt_bind = (gbind *) incoming->info;
17912 gcc_checking_assert (tgt_bind);
17913 grid_attempt_target_gridification (target, gsi, tgt_bind);
17914 return NULL_TREE;
17916 gbind *bind = dyn_cast <gbind *> (stmt);
17917 if (bind)
17919 *handled_ops_p = true;
17920 struct walk_stmt_info wi;
17921 memset (&wi, 0, sizeof (wi));
17922 wi.info = bind;
17923 walk_gimple_seq_mod (gimple_bind_body_ptr (bind),
17924 grid_gridify_all_targets_stmt, NULL, &wi);
17926 return NULL_TREE;
17929 /* Attempt to gridify all target constructs in BODY_P. All such targets will
17930 have their bodies duplicated, with the new copy being put into a
17931 gimple_omp_grid_body statement. All kernel-related construct within the
17932 grid_body will be marked with phony flags or kernel kinds. Moreover, some
17933 re-structuring is often needed, such as copying pre-bodies before the target
17934 construct so that kernel grid sizes can be computed. */
17936 static void
17937 grid_gridify_all_targets (gimple_seq *body_p)
17939 struct walk_stmt_info wi;
17940 memset (&wi, 0, sizeof (wi));
17941 walk_gimple_seq_mod (body_p, grid_gridify_all_targets_stmt, NULL, &wi);
17945 /* Main entry point. */
17947 static unsigned int
17948 execute_lower_omp (void)
17950 gimple_seq body;
17951 int i;
17952 omp_context *ctx;
17954 /* This pass always runs, to provide PROP_gimple_lomp.
17955 But often, there is nothing to do. */
17956 if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0
17957 && flag_openmp_simd == 0)
17958 return 0;
17960 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
17961 delete_omp_context);
17963 body = gimple_body (current_function_decl);
17965 if (hsa_gen_requested_p ())
17966 grid_gridify_all_targets (&body);
17968 scan_omp (&body, NULL);
17969 gcc_assert (taskreg_nesting_level == 0);
17970 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
17971 finish_taskreg_scan (ctx);
17972 taskreg_contexts.release ();
17974 if (all_contexts->root)
17976 if (task_shared_vars)
17977 push_gimplify_context ();
17978 lower_omp (&body, NULL);
17979 if (task_shared_vars)
17980 pop_gimplify_context (NULL);
17983 if (all_contexts)
17985 splay_tree_delete (all_contexts);
17986 all_contexts = NULL;
17988 BITMAP_FREE (task_shared_vars);
17989 return 0;
17992 namespace {
17994 const pass_data pass_data_lower_omp =
17996 GIMPLE_PASS, /* type */
17997 "omplower", /* name */
17998 OPTGROUP_NONE, /* optinfo_flags */
17999 TV_NONE, /* tv_id */
18000 PROP_gimple_any, /* properties_required */
18001 PROP_gimple_lomp, /* properties_provided */
18002 0, /* properties_destroyed */
18003 0, /* todo_flags_start */
18004 0, /* todo_flags_finish */
18007 class pass_lower_omp : public gimple_opt_pass
18009 public:
18010 pass_lower_omp (gcc::context *ctxt)
18011 : gimple_opt_pass (pass_data_lower_omp, ctxt)
18014 /* opt_pass methods: */
18015 virtual unsigned int execute (function *) { return execute_lower_omp (); }
18017 }; // class pass_lower_omp
18019 } // anon namespace
18021 gimple_opt_pass *
18022 make_pass_lower_omp (gcc::context *ctxt)
18024 return new pass_lower_omp (ctxt);
18027 /* The following is a utility to diagnose structured block violations.
18028 It is not part of the "omplower" pass, as that's invoked too late. It
18029 should be invoked by the respective front ends after gimplification. */
18031 static splay_tree all_labels;
18033 /* Check for mismatched contexts and generate an error if needed. Return
18034 true if an error is detected. */
18036 static bool
18037 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
18038 gimple *branch_ctx, gimple *label_ctx)
18040 gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
18041 gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
18043 if (label_ctx == branch_ctx)
18044 return false;
18046 const char* kind = NULL;
18048 if (flag_cilkplus)
18050 if ((branch_ctx
18051 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
18052 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
18053 || (label_ctx
18054 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
18055 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
18056 kind = "Cilk Plus";
18058 if (flag_openacc)
18060 if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
18061 || (label_ctx && is_gimple_omp_oacc (label_ctx)))
18063 gcc_checking_assert (kind == NULL);
18064 kind = "OpenACC";
18067 if (kind == NULL)
18069 gcc_checking_assert (flag_openmp);
18070 kind = "OpenMP";
18074 Previously we kept track of the label's entire context in diagnose_sb_[12]
18075 so we could traverse it and issue a correct "exit" or "enter" error
18076 message upon a structured block violation.
18078 We built the context by building a list with tree_cons'ing, but there is
18079 no easy counterpart in gimple tuples. It seems like far too much work
18080 for issuing exit/enter error messages. If someone really misses the
18081 distinct error message... patches welcome.
18084 #if 0
18085 /* Try to avoid confusing the user by producing and error message
18086 with correct "exit" or "enter" verbiage. We prefer "exit"
18087 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
18088 if (branch_ctx == NULL)
18089 exit_p = false;
18090 else
18092 while (label_ctx)
18094 if (TREE_VALUE (label_ctx) == branch_ctx)
18096 exit_p = false;
18097 break;
18099 label_ctx = TREE_CHAIN (label_ctx);
18103 if (exit_p)
18104 error ("invalid exit from %s structured block", kind);
18105 else
18106 error ("invalid entry to %s structured block", kind);
18107 #endif
18109 /* If it's obvious we have an invalid entry, be specific about the error. */
18110 if (branch_ctx == NULL)
18111 error ("invalid entry to %s structured block", kind);
18112 else
18114 /* Otherwise, be vague and lazy, but efficient. */
18115 error ("invalid branch to/from %s structured block", kind);
18118 gsi_replace (gsi_p, gimple_build_nop (), false);
18119 return true;
18122 /* Pass 1: Create a minimal tree of structured blocks, and record
18123 where each label is found. */
18125 static tree
18126 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
18127 struct walk_stmt_info *wi)
18129 gimple *context = (gimple *) wi->info;
18130 gimple *inner_context;
18131 gimple *stmt = gsi_stmt (*gsi_p);
18133 *handled_ops_p = true;
18135 switch (gimple_code (stmt))
18137 WALK_SUBSTMTS;
18139 case GIMPLE_OMP_PARALLEL:
18140 case GIMPLE_OMP_TASK:
18141 case GIMPLE_OMP_SECTIONS:
18142 case GIMPLE_OMP_SINGLE:
18143 case GIMPLE_OMP_SECTION:
18144 case GIMPLE_OMP_MASTER:
18145 case GIMPLE_OMP_ORDERED:
18146 case GIMPLE_OMP_CRITICAL:
18147 case GIMPLE_OMP_TARGET:
18148 case GIMPLE_OMP_TEAMS:
18149 case GIMPLE_OMP_TASKGROUP:
18150 /* The minimal context here is just the current OMP construct. */
18151 inner_context = stmt;
18152 wi->info = inner_context;
18153 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
18154 wi->info = context;
18155 break;
18157 case GIMPLE_OMP_FOR:
18158 inner_context = stmt;
18159 wi->info = inner_context;
18160 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
18161 walk them. */
18162 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
18163 diagnose_sb_1, NULL, wi);
18164 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
18165 wi->info = context;
18166 break;
18168 case GIMPLE_LABEL:
18169 splay_tree_insert (all_labels,
18170 (splay_tree_key) gimple_label_label (
18171 as_a <glabel *> (stmt)),
18172 (splay_tree_value) context);
18173 break;
18175 default:
18176 break;
18179 return NULL_TREE;
18182 /* Pass 2: Check each branch and see if its context differs from that of
18183 the destination label's context. */
18185 static tree
18186 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
18187 struct walk_stmt_info *wi)
18189 gimple *context = (gimple *) wi->info;
18190 splay_tree_node n;
18191 gimple *stmt = gsi_stmt (*gsi_p);
18193 *handled_ops_p = true;
18195 switch (gimple_code (stmt))
18197 WALK_SUBSTMTS;
18199 case GIMPLE_OMP_PARALLEL:
18200 case GIMPLE_OMP_TASK:
18201 case GIMPLE_OMP_SECTIONS:
18202 case GIMPLE_OMP_SINGLE:
18203 case GIMPLE_OMP_SECTION:
18204 case GIMPLE_OMP_MASTER:
18205 case GIMPLE_OMP_ORDERED:
18206 case GIMPLE_OMP_CRITICAL:
18207 case GIMPLE_OMP_TARGET:
18208 case GIMPLE_OMP_TEAMS:
18209 case GIMPLE_OMP_TASKGROUP:
18210 wi->info = stmt;
18211 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
18212 wi->info = context;
18213 break;
18215 case GIMPLE_OMP_FOR:
18216 wi->info = stmt;
18217 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
18218 walk them. */
18219 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
18220 diagnose_sb_2, NULL, wi);
18221 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
18222 wi->info = context;
18223 break;
18225 case GIMPLE_COND:
18227 gcond *cond_stmt = as_a <gcond *> (stmt);
18228 tree lab = gimple_cond_true_label (cond_stmt);
18229 if (lab)
18231 n = splay_tree_lookup (all_labels,
18232 (splay_tree_key) lab);
18233 diagnose_sb_0 (gsi_p, context,
18234 n ? (gimple *) n->value : NULL);
18236 lab = gimple_cond_false_label (cond_stmt);
18237 if (lab)
18239 n = splay_tree_lookup (all_labels,
18240 (splay_tree_key) lab);
18241 diagnose_sb_0 (gsi_p, context,
18242 n ? (gimple *) n->value : NULL);
18245 break;
18247 case GIMPLE_GOTO:
18249 tree lab = gimple_goto_dest (stmt);
18250 if (TREE_CODE (lab) != LABEL_DECL)
18251 break;
18253 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
18254 diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
18256 break;
18258 case GIMPLE_SWITCH:
18260 gswitch *switch_stmt = as_a <gswitch *> (stmt);
18261 unsigned int i;
18262 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
18264 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
18265 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
18266 if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
18267 break;
18270 break;
18272 case GIMPLE_RETURN:
18273 diagnose_sb_0 (gsi_p, context, NULL);
18274 break;
18276 default:
18277 break;
18280 return NULL_TREE;
18283 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
18284 GIMPLE_* codes. */
18285 bool
18286 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
18287 int *region_idx)
18289 gimple *last = last_stmt (bb);
18290 enum gimple_code code = gimple_code (last);
18291 struct omp_region *cur_region = *region;
18292 bool fallthru = false;
18294 switch (code)
18296 case GIMPLE_OMP_PARALLEL:
18297 case GIMPLE_OMP_TASK:
18298 case GIMPLE_OMP_FOR:
18299 case GIMPLE_OMP_SINGLE:
18300 case GIMPLE_OMP_TEAMS:
18301 case GIMPLE_OMP_MASTER:
18302 case GIMPLE_OMP_TASKGROUP:
18303 case GIMPLE_OMP_CRITICAL:
18304 case GIMPLE_OMP_SECTION:
18305 case GIMPLE_OMP_GRID_BODY:
18306 cur_region = new_omp_region (bb, code, cur_region);
18307 fallthru = true;
18308 break;
18310 case GIMPLE_OMP_ORDERED:
18311 cur_region = new_omp_region (bb, code, cur_region);
18312 fallthru = true;
18313 if (find_omp_clause (gimple_omp_ordered_clauses
18314 (as_a <gomp_ordered *> (last)),
18315 OMP_CLAUSE_DEPEND))
18316 cur_region = cur_region->outer;
18317 break;
18319 case GIMPLE_OMP_TARGET:
18320 cur_region = new_omp_region (bb, code, cur_region);
18321 fallthru = true;
18322 switch (gimple_omp_target_kind (last))
18324 case GF_OMP_TARGET_KIND_REGION:
18325 case GF_OMP_TARGET_KIND_DATA:
18326 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
18327 case GF_OMP_TARGET_KIND_OACC_KERNELS:
18328 case GF_OMP_TARGET_KIND_OACC_DATA:
18329 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
18330 break;
18331 case GF_OMP_TARGET_KIND_UPDATE:
18332 case GF_OMP_TARGET_KIND_ENTER_DATA:
18333 case GF_OMP_TARGET_KIND_EXIT_DATA:
18334 case GF_OMP_TARGET_KIND_OACC_UPDATE:
18335 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
18336 case GF_OMP_TARGET_KIND_OACC_DECLARE:
18337 cur_region = cur_region->outer;
18338 break;
18339 default:
18340 gcc_unreachable ();
18342 break;
18344 case GIMPLE_OMP_SECTIONS:
18345 cur_region = new_omp_region (bb, code, cur_region);
18346 fallthru = true;
18347 break;
18349 case GIMPLE_OMP_SECTIONS_SWITCH:
18350 fallthru = false;
18351 break;
18353 case GIMPLE_OMP_ATOMIC_LOAD:
18354 case GIMPLE_OMP_ATOMIC_STORE:
18355 fallthru = true;
18356 break;
18358 case GIMPLE_OMP_RETURN:
18359 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
18360 somewhere other than the next block. This will be
18361 created later. */
18362 cur_region->exit = bb;
18363 if (cur_region->type == GIMPLE_OMP_TASK)
18364 /* Add an edge corresponding to not scheduling the task
18365 immediately. */
18366 make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
18367 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
18368 cur_region = cur_region->outer;
18369 break;
18371 case GIMPLE_OMP_CONTINUE:
18372 cur_region->cont = bb;
18373 switch (cur_region->type)
18375 case GIMPLE_OMP_FOR:
18376 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
18377 succs edges as abnormal to prevent splitting
18378 them. */
18379 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
18380 /* Make the loopback edge. */
18381 make_edge (bb, single_succ (cur_region->entry),
18382 EDGE_ABNORMAL);
18384 /* Create an edge from GIMPLE_OMP_FOR to exit, which
18385 corresponds to the case that the body of the loop
18386 is not executed at all. */
18387 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
18388 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
18389 fallthru = false;
18390 break;
18392 case GIMPLE_OMP_SECTIONS:
18393 /* Wire up the edges into and out of the nested sections. */
18395 basic_block switch_bb = single_succ (cur_region->entry);
18397 struct omp_region *i;
18398 for (i = cur_region->inner; i ; i = i->next)
18400 gcc_assert (i->type == GIMPLE_OMP_SECTION);
18401 make_edge (switch_bb, i->entry, 0);
18402 make_edge (i->exit, bb, EDGE_FALLTHRU);
18405 /* Make the loopback edge to the block with
18406 GIMPLE_OMP_SECTIONS_SWITCH. */
18407 make_edge (bb, switch_bb, 0);
18409 /* Make the edge from the switch to exit. */
18410 make_edge (switch_bb, bb->next_bb, 0);
18411 fallthru = false;
18413 break;
18415 case GIMPLE_OMP_TASK:
18416 fallthru = true;
18417 break;
18419 default:
18420 gcc_unreachable ();
18422 break;
18424 default:
18425 gcc_unreachable ();
18428 if (*region != cur_region)
18430 *region = cur_region;
18431 if (cur_region)
18432 *region_idx = cur_region->entry->index;
18433 else
18434 *region_idx = 0;
18437 return fallthru;
18440 static unsigned int
18441 diagnose_omp_structured_block_errors (void)
18443 struct walk_stmt_info wi;
18444 gimple_seq body = gimple_body (current_function_decl);
18446 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
18448 memset (&wi, 0, sizeof (wi));
18449 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
18451 memset (&wi, 0, sizeof (wi));
18452 wi.want_locations = true;
18453 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
18455 gimple_set_body (current_function_decl, body);
18457 splay_tree_delete (all_labels);
18458 all_labels = NULL;
18460 return 0;
18463 namespace {
18465 const pass_data pass_data_diagnose_omp_blocks =
18467 GIMPLE_PASS, /* type */
18468 "*diagnose_omp_blocks", /* name */
18469 OPTGROUP_NONE, /* optinfo_flags */
18470 TV_NONE, /* tv_id */
18471 PROP_gimple_any, /* properties_required */
18472 0, /* properties_provided */
18473 0, /* properties_destroyed */
18474 0, /* todo_flags_start */
18475 0, /* todo_flags_finish */
18478 class pass_diagnose_omp_blocks : public gimple_opt_pass
18480 public:
18481 pass_diagnose_omp_blocks (gcc::context *ctxt)
18482 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
18485 /* opt_pass methods: */
18486 virtual bool gate (function *)
18488 return flag_cilkplus || flag_openacc || flag_openmp;
18490 virtual unsigned int execute (function *)
18492 return diagnose_omp_structured_block_errors ();
18495 }; // class pass_diagnose_omp_blocks
18497 } // anon namespace
18499 gimple_opt_pass *
18500 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
18502 return new pass_diagnose_omp_blocks (ctxt);
18505 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
18506 adds their addresses and sizes to constructor-vector V_CTOR. */
18507 static void
18508 add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls,
18509 vec<constructor_elt, va_gc> *v_ctor)
18511 unsigned len = vec_safe_length (v_decls);
18512 for (unsigned i = 0; i < len; i++)
18514 tree it = (*v_decls)[i];
18515 bool is_var = VAR_P (it);
18516 bool is_link_var
18517 = is_var
18518 #ifdef ACCEL_COMPILER
18519 && DECL_HAS_VALUE_EXPR_P (it)
18520 #endif
18521 && lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (it));
18523 tree size = NULL_TREE;
18524 if (is_var)
18525 size = fold_convert (const_ptr_type_node, DECL_SIZE_UNIT (it));
18527 tree addr;
18528 if (!is_link_var)
18529 addr = build_fold_addr_expr (it);
18530 else
18532 #ifdef ACCEL_COMPILER
18533 /* For "omp declare target link" vars add address of the pointer to
18534 the target table, instead of address of the var. */
18535 tree value_expr = DECL_VALUE_EXPR (it);
18536 tree link_ptr_decl = TREE_OPERAND (value_expr, 0);
18537 varpool_node::finalize_decl (link_ptr_decl);
18538 addr = build_fold_addr_expr (link_ptr_decl);
18539 #else
18540 addr = build_fold_addr_expr (it);
18541 #endif
18543 /* Most significant bit of the size marks "omp declare target link"
18544 vars in host and target tables. */
18545 unsigned HOST_WIDE_INT isize = tree_to_uhwi (size);
18546 isize |= 1ULL << (int_size_in_bytes (const_ptr_type_node)
18547 * BITS_PER_UNIT - 1);
18548 size = wide_int_to_tree (const_ptr_type_node, isize);
18551 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, addr);
18552 if (is_var)
18553 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, size);
18557 /* Create new symbols containing (address, size) pairs for global variables,
18558 marked with "omp declare target" attribute, as well as addresses for the
18559 functions, which are outlined offloading regions. */
18560 void
18561 omp_finish_file (void)
18563 unsigned num_funcs = vec_safe_length (offload_funcs);
18564 unsigned num_vars = vec_safe_length (offload_vars);
18566 if (num_funcs == 0 && num_vars == 0)
18567 return;
18569 if (targetm_common.have_named_sections)
18571 vec<constructor_elt, va_gc> *v_f, *v_v;
18572 vec_alloc (v_f, num_funcs);
18573 vec_alloc (v_v, num_vars * 2);
18575 add_decls_addresses_to_decl_constructor (offload_funcs, v_f);
18576 add_decls_addresses_to_decl_constructor (offload_vars, v_v);
18578 tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node,
18579 num_vars * 2);
18580 tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node,
18581 num_funcs);
18582 SET_TYPE_ALIGN (vars_decl_type, TYPE_ALIGN (pointer_sized_int_node));
18583 SET_TYPE_ALIGN (funcs_decl_type, TYPE_ALIGN (pointer_sized_int_node));
18584 tree ctor_v = build_constructor (vars_decl_type, v_v);
18585 tree ctor_f = build_constructor (funcs_decl_type, v_f);
18586 TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1;
18587 TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1;
18588 tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
18589 get_identifier (".offload_func_table"),
18590 funcs_decl_type);
18591 tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
18592 get_identifier (".offload_var_table"),
18593 vars_decl_type);
18594 TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1;
18595 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
18596 otherwise a joint table in a binary will contain padding between
18597 tables from multiple object files. */
18598 DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1;
18599 SET_DECL_ALIGN (funcs_decl, TYPE_ALIGN (funcs_decl_type));
18600 SET_DECL_ALIGN (vars_decl, TYPE_ALIGN (vars_decl_type));
18601 DECL_INITIAL (funcs_decl) = ctor_f;
18602 DECL_INITIAL (vars_decl) = ctor_v;
18603 set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME);
18604 set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME);
18606 varpool_node::finalize_decl (vars_decl);
18607 varpool_node::finalize_decl (funcs_decl);
18609 else
18611 for (unsigned i = 0; i < num_funcs; i++)
18613 tree it = (*offload_funcs)[i];
18614 targetm.record_offload_symbol (it);
18616 for (unsigned i = 0; i < num_vars; i++)
18618 tree it = (*offload_vars)[i];
18619 targetm.record_offload_symbol (it);
18624 /* Find the number of threads (POS = false), or thread number (POS =
18625 true) for an OpenACC region partitioned as MASK. Setup code
18626 required for the calculation is added to SEQ. */
18628 static tree
18629 oacc_thread_numbers (bool pos, int mask, gimple_seq *seq)
18631 tree res = pos ? NULL_TREE : build_int_cst (unsigned_type_node, 1);
18632 unsigned ix;
18634 /* Start at gang level, and examine relevant dimension indices. */
18635 for (ix = GOMP_DIM_GANG; ix != GOMP_DIM_MAX; ix++)
18636 if (GOMP_DIM_MASK (ix) & mask)
18638 tree arg = build_int_cst (unsigned_type_node, ix);
18640 if (res)
18642 /* We had an outer index, so scale that by the size of
18643 this dimension. */
18644 tree n = create_tmp_var (integer_type_node);
18645 gimple *call
18646 = gimple_build_call_internal (IFN_GOACC_DIM_SIZE, 1, arg);
18648 gimple_call_set_lhs (call, n);
18649 gimple_seq_add_stmt (seq, call);
18650 res = fold_build2 (MULT_EXPR, integer_type_node, res, n);
18652 if (pos)
18654 /* Determine index in this dimension. */
18655 tree id = create_tmp_var (integer_type_node);
18656 gimple *call = gimple_build_call_internal
18657 (IFN_GOACC_DIM_POS, 1, arg);
18659 gimple_call_set_lhs (call, id);
18660 gimple_seq_add_stmt (seq, call);
18661 if (res)
18662 res = fold_build2 (PLUS_EXPR, integer_type_node, res, id);
18663 else
18664 res = id;
18668 if (res == NULL_TREE)
18669 res = integer_zero_node;
18671 return res;
18674 /* Transform IFN_GOACC_LOOP calls to actual code. See
18675 expand_oacc_for for where these are generated. At the vector
18676 level, we stride loops, such that each member of a warp will
18677 operate on adjacent iterations. At the worker and gang level,
18678 each gang/warp executes a set of contiguous iterations. Chunking
18679 can override this such that each iteration engine executes a
18680 contiguous chunk, and then moves on to stride to the next chunk. */
18682 static void
18683 oacc_xform_loop (gcall *call)
18685 gimple_stmt_iterator gsi = gsi_for_stmt (call);
18686 enum ifn_goacc_loop_kind code
18687 = (enum ifn_goacc_loop_kind) TREE_INT_CST_LOW (gimple_call_arg (call, 0));
18688 tree dir = gimple_call_arg (call, 1);
18689 tree range = gimple_call_arg (call, 2);
18690 tree step = gimple_call_arg (call, 3);
18691 tree chunk_size = NULL_TREE;
18692 unsigned mask = (unsigned) TREE_INT_CST_LOW (gimple_call_arg (call, 5));
18693 tree lhs = gimple_call_lhs (call);
18694 tree type = TREE_TYPE (lhs);
18695 tree diff_type = TREE_TYPE (range);
18696 tree r = NULL_TREE;
18697 gimple_seq seq = NULL;
18698 bool chunking = false, striding = true;
18699 unsigned outer_mask = mask & (~mask + 1); // Outermost partitioning
18700 unsigned inner_mask = mask & ~outer_mask; // Inner partitioning (if any)
18702 #ifdef ACCEL_COMPILER
18703 chunk_size = gimple_call_arg (call, 4);
18704 if (integer_minus_onep (chunk_size) /* Force static allocation. */
18705 || integer_zerop (chunk_size)) /* Default (also static). */
18707 /* If we're at the gang level, we want each to execute a
18708 contiguous run of iterations. Otherwise we want each element
18709 to stride. */
18710 striding = !(outer_mask & GOMP_DIM_MASK (GOMP_DIM_GANG));
18711 chunking = false;
18713 else
18715 /* Chunk of size 1 is striding. */
18716 striding = integer_onep (chunk_size);
18717 chunking = !striding;
18719 #endif
18721 /* striding=true, chunking=true
18722 -> invalid.
18723 striding=true, chunking=false
18724 -> chunks=1
18725 striding=false,chunking=true
18726 -> chunks=ceil (range/(chunksize*threads*step))
18727 striding=false,chunking=false
18728 -> chunk_size=ceil(range/(threads*step)),chunks=1 */
18729 push_gimplify_context (true);
18731 switch (code)
18733 default: gcc_unreachable ();
18735 case IFN_GOACC_LOOP_CHUNKS:
18736 if (!chunking)
18737 r = build_int_cst (type, 1);
18738 else
18740 /* chunk_max
18741 = (range - dir) / (chunks * step * num_threads) + dir */
18742 tree per = oacc_thread_numbers (false, mask, &seq);
18743 per = fold_convert (type, per);
18744 chunk_size = fold_convert (type, chunk_size);
18745 per = fold_build2 (MULT_EXPR, type, per, chunk_size);
18746 per = fold_build2 (MULT_EXPR, type, per, step);
18747 r = build2 (MINUS_EXPR, type, range, dir);
18748 r = build2 (PLUS_EXPR, type, r, per);
18749 r = build2 (TRUNC_DIV_EXPR, type, r, per);
18751 break;
18753 case IFN_GOACC_LOOP_STEP:
18755 /* If striding, step by the entire compute volume, otherwise
18756 step by the inner volume. */
18757 unsigned volume = striding ? mask : inner_mask;
18759 r = oacc_thread_numbers (false, volume, &seq);
18760 r = build2 (MULT_EXPR, type, fold_convert (type, r), step);
18762 break;
18764 case IFN_GOACC_LOOP_OFFSET:
18765 if (striding)
18767 r = oacc_thread_numbers (true, mask, &seq);
18768 r = fold_convert (diff_type, r);
18770 else
18772 tree inner_size = oacc_thread_numbers (false, inner_mask, &seq);
18773 tree outer_size = oacc_thread_numbers (false, outer_mask, &seq);
18774 tree volume = fold_build2 (MULT_EXPR, TREE_TYPE (inner_size),
18775 inner_size, outer_size);
18777 volume = fold_convert (diff_type, volume);
18778 if (chunking)
18779 chunk_size = fold_convert (diff_type, chunk_size);
18780 else
18782 tree per = fold_build2 (MULT_EXPR, diff_type, volume, step);
18784 chunk_size = build2 (MINUS_EXPR, diff_type, range, dir);
18785 chunk_size = build2 (PLUS_EXPR, diff_type, chunk_size, per);
18786 chunk_size = build2 (TRUNC_DIV_EXPR, diff_type, chunk_size, per);
18789 tree span = build2 (MULT_EXPR, diff_type, chunk_size,
18790 fold_convert (diff_type, inner_size));
18791 r = oacc_thread_numbers (true, outer_mask, &seq);
18792 r = fold_convert (diff_type, r);
18793 r = build2 (MULT_EXPR, diff_type, r, span);
18795 tree inner = oacc_thread_numbers (true, inner_mask, &seq);
18796 inner = fold_convert (diff_type, inner);
18797 r = fold_build2 (PLUS_EXPR, diff_type, r, inner);
18799 if (chunking)
18801 tree chunk = fold_convert (diff_type, gimple_call_arg (call, 6));
18802 tree per
18803 = fold_build2 (MULT_EXPR, diff_type, volume, chunk_size);
18804 per = build2 (MULT_EXPR, diff_type, per, chunk);
18806 r = build2 (PLUS_EXPR, diff_type, r, per);
18809 r = fold_build2 (MULT_EXPR, diff_type, r, step);
18810 if (type != diff_type)
18811 r = fold_convert (type, r);
18812 break;
18814 case IFN_GOACC_LOOP_BOUND:
18815 if (striding)
18816 r = range;
18817 else
18819 tree inner_size = oacc_thread_numbers (false, inner_mask, &seq);
18820 tree outer_size = oacc_thread_numbers (false, outer_mask, &seq);
18821 tree volume = fold_build2 (MULT_EXPR, TREE_TYPE (inner_size),
18822 inner_size, outer_size);
18824 volume = fold_convert (diff_type, volume);
18825 if (chunking)
18826 chunk_size = fold_convert (diff_type, chunk_size);
18827 else
18829 tree per = fold_build2 (MULT_EXPR, diff_type, volume, step);
18831 chunk_size = build2 (MINUS_EXPR, diff_type, range, dir);
18832 chunk_size = build2 (PLUS_EXPR, diff_type, chunk_size, per);
18833 chunk_size = build2 (TRUNC_DIV_EXPR, diff_type, chunk_size, per);
18836 tree span = build2 (MULT_EXPR, diff_type, chunk_size,
18837 fold_convert (diff_type, inner_size));
18839 r = fold_build2 (MULT_EXPR, diff_type, span, step);
18841 tree offset = gimple_call_arg (call, 6);
18842 r = build2 (PLUS_EXPR, diff_type, r,
18843 fold_convert (diff_type, offset));
18844 r = build2 (integer_onep (dir) ? MIN_EXPR : MAX_EXPR,
18845 diff_type, r, range);
18847 if (diff_type != type)
18848 r = fold_convert (type, r);
18849 break;
18852 gimplify_assign (lhs, r, &seq);
18854 pop_gimplify_context (NULL);
18856 gsi_replace_with_seq (&gsi, seq, true);
18859 /* Default partitioned and minimum partitioned dimensions. */
18861 static int oacc_default_dims[GOMP_DIM_MAX];
18862 static int oacc_min_dims[GOMP_DIM_MAX];
18864 /* Parse the default dimension parameter. This is a set of
18865 :-separated optional compute dimensions. Each specified dimension
18866 is a positive integer. When device type support is added, it is
18867 planned to be a comma separated list of such compute dimensions,
18868 with all but the first prefixed by the colon-terminated device
18869 type. */
18871 static void
18872 oacc_parse_default_dims (const char *dims)
18874 int ix;
18876 for (ix = GOMP_DIM_MAX; ix--;)
18878 oacc_default_dims[ix] = -1;
18879 oacc_min_dims[ix] = 1;
18882 #ifndef ACCEL_COMPILER
18883 /* Cannot be overridden on the host. */
18884 dims = NULL;
18885 #endif
18886 if (dims)
18888 const char *pos = dims;
18890 for (ix = 0; *pos && ix != GOMP_DIM_MAX; ix++)
18892 if (ix)
18894 if (*pos != ':')
18895 goto malformed;
18896 pos++;
18899 if (*pos != ':')
18901 long val;
18902 const char *eptr;
18904 errno = 0;
18905 val = strtol (pos, CONST_CAST (char **, &eptr), 10);
18906 if (errno || val <= 0 || (int) val != val)
18907 goto malformed;
18908 pos = eptr;
18909 oacc_default_dims[ix] = (int) val;
18912 if (*pos)
18914 malformed:
18915 error_at (UNKNOWN_LOCATION,
18916 "-fopenacc-dim operand is malformed at '%s'", pos);
18920 /* Allow the backend to validate the dimensions. */
18921 targetm.goacc.validate_dims (NULL_TREE, oacc_default_dims, -1);
18922 targetm.goacc.validate_dims (NULL_TREE, oacc_min_dims, -2);
18925 /* Validate and update the dimensions for offloaded FN. ATTRS is the
18926 raw attribute. DIMS is an array of dimensions, which is filled in.
18927 LEVEL is the partitioning level of a routine, or -1 for an offload
18928 region itself. USED is the mask of partitioned execution in the
18929 function. */
18931 static void
18932 oacc_validate_dims (tree fn, tree attrs, int *dims, int level, unsigned used)
18934 tree purpose[GOMP_DIM_MAX];
18935 unsigned ix;
18936 tree pos = TREE_VALUE (attrs);
18937 bool is_kernel = oacc_fn_attrib_kernels_p (attrs);
18939 /* Make sure the attribute creator attached the dimension
18940 information. */
18941 gcc_assert (pos);
18943 for (ix = 0; ix != GOMP_DIM_MAX; ix++)
18945 purpose[ix] = TREE_PURPOSE (pos);
18946 tree val = TREE_VALUE (pos);
18947 dims[ix] = val ? TREE_INT_CST_LOW (val) : -1;
18948 pos = TREE_CHAIN (pos);
18951 bool changed = targetm.goacc.validate_dims (fn, dims, level);
18953 /* Default anything left to 1 or a partitioned default. */
18954 for (ix = 0; ix != GOMP_DIM_MAX; ix++)
18955 if (dims[ix] < 0)
18957 /* The OpenACC spec says 'If the [num_gangs] clause is not
18958 specified, an implementation-defined default will be used;
18959 the default may depend on the code within the construct.'
18960 (2.5.6). Thus an implementation is free to choose
18961 non-unity default for a parallel region that doesn't have
18962 any gang-partitioned loops. However, it appears that there
18963 is a sufficient body of user code that expects non-gang
18964 partitioned regions to not execute in gang-redundant mode.
18965 So we (a) don't warn about the non-portability and (b) pick
18966 the minimum permissible dimension size when there is no
18967 partitioned execution. Otherwise we pick the global
18968 default for the dimension, which the user can control. The
18969 same wording and logic applies to num_workers and
18970 vector_length, however the worker- or vector- single
18971 execution doesn't have the same impact as gang-redundant
18972 execution. (If the minimum gang-level partioning is not 1,
18973 the target is probably too confusing.) */
18974 dims[ix] = (used & GOMP_DIM_MASK (ix)
18975 ? oacc_default_dims[ix] : oacc_min_dims[ix]);
18976 changed = true;
18979 if (changed)
18981 /* Replace the attribute with new values. */
18982 pos = NULL_TREE;
18983 for (ix = GOMP_DIM_MAX; ix--;)
18985 pos = tree_cons (purpose[ix],
18986 build_int_cst (integer_type_node, dims[ix]),
18987 pos);
18988 if (is_kernel)
18989 TREE_PUBLIC (pos) = 1;
18991 replace_oacc_fn_attrib (fn, pos);
18995 /* Create an empty OpenACC loop structure at LOC. */
18997 static oacc_loop *
18998 new_oacc_loop_raw (oacc_loop *parent, location_t loc)
19000 oacc_loop *loop = XCNEW (oacc_loop);
19002 loop->parent = parent;
19003 loop->child = loop->sibling = NULL;
19005 if (parent)
19007 loop->sibling = parent->child;
19008 parent->child = loop;
19011 loop->loc = loc;
19012 loop->marker = NULL;
19013 memset (loop->heads, 0, sizeof (loop->heads));
19014 memset (loop->tails, 0, sizeof (loop->tails));
19015 loop->routine = NULL_TREE;
19017 loop->mask = loop->flags = loop->inner = 0;
19018 loop->ifns = 0;
19019 loop->chunk_size = 0;
19020 loop->head_end = NULL;
19022 return loop;
19025 /* Create an outermost, dummy OpenACC loop for offloaded function
19026 DECL. */
19028 static oacc_loop *
19029 new_oacc_loop_outer (tree decl)
19031 return new_oacc_loop_raw (NULL, DECL_SOURCE_LOCATION (decl));
19034 /* Start a new OpenACC loop structure beginning at head marker HEAD.
19035 Link into PARENT loop. Return the new loop. */
19037 static oacc_loop *
19038 new_oacc_loop (oacc_loop *parent, gcall *marker)
19040 oacc_loop *loop = new_oacc_loop_raw (parent, gimple_location (marker));
19042 loop->marker = marker;
19044 /* TODO: This is where device_type flattening would occur for the loop
19045 flags. */
19047 loop->flags = TREE_INT_CST_LOW (gimple_call_arg (marker, 3));
19049 tree chunk_size = integer_zero_node;
19050 if (loop->flags & OLF_GANG_STATIC)
19051 chunk_size = gimple_call_arg (marker, 4);
19052 loop->chunk_size = chunk_size;
19054 return loop;
19057 /* Create a dummy loop encompassing a call to a openACC routine.
19058 Extract the routine's partitioning requirements. */
19060 static void
19061 new_oacc_loop_routine (oacc_loop *parent, gcall *call, tree decl, tree attrs)
19063 oacc_loop *loop = new_oacc_loop_raw (parent, gimple_location (call));
19064 int level = oacc_fn_attrib_level (attrs);
19066 gcc_assert (level >= 0);
19068 loop->marker = call;
19069 loop->routine = decl;
19070 loop->mask = ((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
19071 ^ (GOMP_DIM_MASK (level) - 1));
19074 /* Finish off the current OpenACC loop ending at tail marker TAIL.
19075 Return the parent loop. */
19077 static oacc_loop *
19078 finish_oacc_loop (oacc_loop *loop)
19080 /* If the loop has been collapsed, don't partition it. */
19081 if (!loop->ifns)
19082 loop->mask = loop->flags = 0;
19083 return loop->parent;
19086 /* Free all OpenACC loop structures within LOOP (inclusive). */
19088 static void
19089 free_oacc_loop (oacc_loop *loop)
19091 if (loop->sibling)
19092 free_oacc_loop (loop->sibling);
19093 if (loop->child)
19094 free_oacc_loop (loop->child);
19096 free (loop);
19099 /* Dump out the OpenACC loop head or tail beginning at FROM. */
19101 static void
19102 dump_oacc_loop_part (FILE *file, gcall *from, int depth,
19103 const char *title, int level)
19105 enum ifn_unique_kind kind
19106 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (from, 0));
19108 fprintf (file, "%*s%s-%d:\n", depth * 2, "", title, level);
19109 for (gimple_stmt_iterator gsi = gsi_for_stmt (from);;)
19111 gimple *stmt = gsi_stmt (gsi);
19113 if (gimple_call_internal_p (stmt, IFN_UNIQUE))
19115 enum ifn_unique_kind k
19116 = ((enum ifn_unique_kind) TREE_INT_CST_LOW
19117 (gimple_call_arg (stmt, 0)));
19119 if (k == kind && stmt != from)
19120 break;
19122 print_gimple_stmt (file, stmt, depth * 2 + 2, 0);
19124 gsi_next (&gsi);
19125 while (gsi_end_p (gsi))
19126 gsi = gsi_start_bb (single_succ (gsi_bb (gsi)));
19130 /* Dump OpenACC loops LOOP, its siblings and its children. */
19132 static void
19133 dump_oacc_loop (FILE *file, oacc_loop *loop, int depth)
19135 int ix;
19137 fprintf (file, "%*sLoop %x(%x) %s:%u\n", depth * 2, "",
19138 loop->flags, loop->mask,
19139 LOCATION_FILE (loop->loc), LOCATION_LINE (loop->loc));
19141 if (loop->marker)
19142 print_gimple_stmt (file, loop->marker, depth * 2, 0);
19144 if (loop->routine)
19145 fprintf (file, "%*sRoutine %s:%u:%s\n",
19146 depth * 2, "", DECL_SOURCE_FILE (loop->routine),
19147 DECL_SOURCE_LINE (loop->routine),
19148 IDENTIFIER_POINTER (DECL_NAME (loop->routine)));
19150 for (ix = GOMP_DIM_GANG; ix != GOMP_DIM_MAX; ix++)
19151 if (loop->heads[ix])
19152 dump_oacc_loop_part (file, loop->heads[ix], depth, "Head", ix);
19153 for (ix = GOMP_DIM_MAX; ix--;)
19154 if (loop->tails[ix])
19155 dump_oacc_loop_part (file, loop->tails[ix], depth, "Tail", ix);
19157 if (loop->child)
19158 dump_oacc_loop (file, loop->child, depth + 1);
19159 if (loop->sibling)
19160 dump_oacc_loop (file, loop->sibling, depth);
19163 void debug_oacc_loop (oacc_loop *);
19165 /* Dump loops to stderr. */
19167 DEBUG_FUNCTION void
19168 debug_oacc_loop (oacc_loop *loop)
19170 dump_oacc_loop (stderr, loop, 0);
19173 /* DFS walk of basic blocks BB onwards, creating OpenACC loop
19174 structures as we go. By construction these loops are properly
19175 nested. */
19177 static void
19178 oacc_loop_discover_walk (oacc_loop *loop, basic_block bb)
19180 int marker = 0;
19181 int remaining = 0;
19183 if (bb->flags & BB_VISITED)
19184 return;
19186 follow:
19187 bb->flags |= BB_VISITED;
19189 /* Scan for loop markers. */
19190 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
19191 gsi_next (&gsi))
19193 gimple *stmt = gsi_stmt (gsi);
19195 if (!is_gimple_call (stmt))
19196 continue;
19198 gcall *call = as_a <gcall *> (stmt);
19200 /* If this is a routine, make a dummy loop for it. */
19201 if (tree decl = gimple_call_fndecl (call))
19202 if (tree attrs = get_oacc_fn_attrib (decl))
19204 gcc_assert (!marker);
19205 new_oacc_loop_routine (loop, call, decl, attrs);
19208 if (!gimple_call_internal_p (call))
19209 continue;
19211 switch (gimple_call_internal_fn (call))
19213 default:
19214 break;
19216 case IFN_GOACC_LOOP:
19217 /* Count the goacc loop abstraction fns, to determine if the
19218 loop was collapsed already. */
19219 loop->ifns++;
19220 break;
19222 case IFN_UNIQUE:
19223 enum ifn_unique_kind kind
19224 = (enum ifn_unique_kind) (TREE_INT_CST_LOW
19225 (gimple_call_arg (call, 0)));
19226 if (kind == IFN_UNIQUE_OACC_HEAD_MARK
19227 || kind == IFN_UNIQUE_OACC_TAIL_MARK)
19229 if (gimple_call_num_args (call) == 2)
19231 gcc_assert (marker && !remaining);
19232 marker = 0;
19233 if (kind == IFN_UNIQUE_OACC_TAIL_MARK)
19234 loop = finish_oacc_loop (loop);
19235 else
19236 loop->head_end = call;
19238 else
19240 int count = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
19242 if (!marker)
19244 if (kind == IFN_UNIQUE_OACC_HEAD_MARK)
19245 loop = new_oacc_loop (loop, call);
19246 remaining = count;
19248 gcc_assert (count == remaining);
19249 if (remaining)
19251 remaining--;
19252 if (kind == IFN_UNIQUE_OACC_HEAD_MARK)
19253 loop->heads[marker] = call;
19254 else
19255 loop->tails[remaining] = call;
19257 marker++;
19262 if (remaining || marker)
19264 bb = single_succ (bb);
19265 gcc_assert (single_pred_p (bb) && !(bb->flags & BB_VISITED));
19266 goto follow;
19269 /* Walk successor blocks. */
19270 edge e;
19271 edge_iterator ei;
19273 FOR_EACH_EDGE (e, ei, bb->succs)
19274 oacc_loop_discover_walk (loop, e->dest);
19277 /* LOOP is the first sibling. Reverse the order in place and return
19278 the new first sibling. Recurse to child loops. */
19280 static oacc_loop *
19281 oacc_loop_sibling_nreverse (oacc_loop *loop)
19283 oacc_loop *last = NULL;
19286 if (loop->child)
19287 loop->child = oacc_loop_sibling_nreverse (loop->child);
19289 oacc_loop *next = loop->sibling;
19290 loop->sibling = last;
19291 last = loop;
19292 loop = next;
19294 while (loop);
19296 return last;
19299 /* Discover the OpenACC loops marked up by HEAD and TAIL markers for
19300 the current function. */
19302 static oacc_loop *
19303 oacc_loop_discovery ()
19305 /* Clear basic block flags, in particular BB_VISITED which we're going to use
19306 in the following. */
19307 clear_bb_flags ();
19309 oacc_loop *top = new_oacc_loop_outer (current_function_decl);
19310 oacc_loop_discover_walk (top, ENTRY_BLOCK_PTR_FOR_FN (cfun));
19312 /* The siblings were constructed in reverse order, reverse them so
19313 that diagnostics come out in an unsurprising order. */
19314 top = oacc_loop_sibling_nreverse (top);
19316 return top;
19319 /* Transform the abstract internal function markers starting at FROM
19320 to be for partitioning level LEVEL. Stop when we meet another HEAD
19321 or TAIL marker. */
19323 static void
19324 oacc_loop_xform_head_tail (gcall *from, int level)
19326 enum ifn_unique_kind kind
19327 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (from, 0));
19328 tree replacement = build_int_cst (unsigned_type_node, level);
19330 for (gimple_stmt_iterator gsi = gsi_for_stmt (from);;)
19332 gimple *stmt = gsi_stmt (gsi);
19334 if (gimple_call_internal_p (stmt, IFN_UNIQUE))
19336 enum ifn_unique_kind k
19337 = ((enum ifn_unique_kind)
19338 TREE_INT_CST_LOW (gimple_call_arg (stmt, 0)));
19340 if (k == IFN_UNIQUE_OACC_FORK || k == IFN_UNIQUE_OACC_JOIN)
19341 *gimple_call_arg_ptr (stmt, 2) = replacement;
19342 else if (k == kind && stmt != from)
19343 break;
19345 else if (gimple_call_internal_p (stmt, IFN_GOACC_REDUCTION))
19346 *gimple_call_arg_ptr (stmt, 3) = replacement;
19348 gsi_next (&gsi);
19349 while (gsi_end_p (gsi))
19350 gsi = gsi_start_bb (single_succ (gsi_bb (gsi)));
19354 /* Transform the IFN_GOACC_LOOP internal functions by providing the
19355 determined partitioning mask and chunking argument. END_MARKER
19356 points at the end IFN_HEAD_TAIL call intgroducing the loop. IFNS
19357 is the number of IFN_GOACC_LOOP calls for the loop. MASK_ARG is
19358 the replacement partitioning mask and CHUNK_ARG is the replacement
19359 chunking arg. */
19361 static void
19362 oacc_loop_xform_loop (gcall *end_marker, unsigned ifns,
19363 tree mask_arg, tree chunk_arg)
19365 gimple_stmt_iterator gsi = gsi_for_stmt (end_marker);
19367 gcc_checking_assert (ifns);
19368 for (;;)
19370 for (; !gsi_end_p (gsi); gsi_next (&gsi))
19372 gimple *stmt = gsi_stmt (gsi);
19374 if (!is_gimple_call (stmt))
19375 continue;
19377 gcall *call = as_a <gcall *> (stmt);
19379 if (!gimple_call_internal_p (call))
19380 continue;
19382 if (gimple_call_internal_fn (call) != IFN_GOACC_LOOP)
19383 continue;
19385 *gimple_call_arg_ptr (call, 5) = mask_arg;
19386 *gimple_call_arg_ptr (call, 4) = chunk_arg;
19387 ifns--;
19388 if (!ifns)
19389 return;
19392 /* The LOOP_BOUND ifn could be in the single successor
19393 block. */
19394 basic_block bb = single_succ (gsi_bb (gsi));
19395 gsi = gsi_start_bb (bb);
19399 /* Process the discovered OpenACC loops, setting the correct
19400 partitioning level etc. */
19402 static void
19403 oacc_loop_process (oacc_loop *loop)
19405 if (loop->child)
19406 oacc_loop_process (loop->child);
19408 if (loop->mask && !loop->routine)
19410 int ix;
19411 unsigned mask = loop->mask;
19412 unsigned dim = GOMP_DIM_GANG;
19413 tree mask_arg = build_int_cst (unsigned_type_node, mask);
19414 tree chunk_arg = loop->chunk_size;
19416 oacc_loop_xform_loop (loop->head_end, loop->ifns, mask_arg, chunk_arg);
19418 for (ix = 0; ix != GOMP_DIM_MAX && mask; ix++)
19420 while (!(GOMP_DIM_MASK (dim) & mask))
19421 dim++;
19423 oacc_loop_xform_head_tail (loop->heads[ix], dim);
19424 oacc_loop_xform_head_tail (loop->tails[ix], dim);
19426 mask ^= GOMP_DIM_MASK (dim);
19430 if (loop->sibling)
19431 oacc_loop_process (loop->sibling);
19434 /* Walk the OpenACC loop heirarchy checking and assigning the
19435 programmer-specified partitionings. OUTER_MASK is the partitioning
19436 this loop is contained within. Return mask of partitioning
19437 encountered. If any auto loops are discovered, set GOMP_DIM_MAX
19438 bit. */
19440 static unsigned
19441 oacc_loop_fixed_partitions (oacc_loop *loop, unsigned outer_mask)
19443 unsigned this_mask = loop->mask;
19444 unsigned mask_all = 0;
19445 bool noisy = true;
19447 #ifdef ACCEL_COMPILER
19448 /* When device_type is supported, we want the device compiler to be
19449 noisy, if the loop parameters are device_type-specific. */
19450 noisy = false;
19451 #endif
19453 if (!loop->routine)
19455 bool auto_par = (loop->flags & OLF_AUTO) != 0;
19456 bool seq_par = (loop->flags & OLF_SEQ) != 0;
19458 this_mask = ((loop->flags >> OLF_DIM_BASE)
19459 & (GOMP_DIM_MASK (GOMP_DIM_MAX) - 1));
19461 if ((this_mask != 0) + auto_par + seq_par > 1)
19463 if (noisy)
19464 error_at (loop->loc,
19465 seq_par
19466 ? "%<seq%> overrides other OpenACC loop specifiers"
19467 : "%<auto%> conflicts with other OpenACC loop specifiers");
19468 auto_par = false;
19469 loop->flags &= ~OLF_AUTO;
19470 if (seq_par)
19472 loop->flags &=
19473 ~((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE);
19474 this_mask = 0;
19477 if (auto_par && (loop->flags & OLF_INDEPENDENT))
19478 mask_all |= GOMP_DIM_MASK (GOMP_DIM_MAX);
19481 if (this_mask & outer_mask)
19483 const oacc_loop *outer;
19484 for (outer = loop->parent; outer; outer = outer->parent)
19485 if (outer->mask & this_mask)
19486 break;
19488 if (noisy)
19490 if (outer)
19492 error_at (loop->loc,
19493 "%s uses same OpenACC parallelism as containing loop",
19494 loop->routine ? "routine call" : "inner loop");
19495 inform (outer->loc, "containing loop here");
19497 else
19498 error_at (loop->loc,
19499 "%s uses OpenACC parallelism disallowed by containing routine",
19500 loop->routine ? "routine call" : "loop");
19502 if (loop->routine)
19503 inform (DECL_SOURCE_LOCATION (loop->routine),
19504 "routine %qD declared here", loop->routine);
19506 this_mask &= ~outer_mask;
19508 else
19510 unsigned outermost = least_bit_hwi (this_mask);
19512 if (outermost && outermost <= outer_mask)
19514 if (noisy)
19516 error_at (loop->loc,
19517 "incorrectly nested OpenACC loop parallelism");
19519 const oacc_loop *outer;
19520 for (outer = loop->parent;
19521 outer->flags && outer->flags < outermost;
19522 outer = outer->parent)
19523 continue;
19524 inform (outer->loc, "containing loop here");
19527 this_mask &= ~outermost;
19531 loop->mask = this_mask;
19532 mask_all |= this_mask;
19534 if (loop->child)
19536 loop->inner = oacc_loop_fixed_partitions (loop->child,
19537 outer_mask | this_mask);
19538 mask_all |= loop->inner;
19541 if (loop->sibling)
19542 mask_all |= oacc_loop_fixed_partitions (loop->sibling, outer_mask);
19544 return mask_all;
19547 /* Walk the OpenACC loop heirarchy to assign auto-partitioned loops.
19548 OUTER_MASK is the partitioning this loop is contained within.
19549 Return the cumulative partitioning used by this loop, siblings and
19550 children. */
19552 static unsigned
19553 oacc_loop_auto_partitions (oacc_loop *loop, unsigned outer_mask)
19555 bool assign = (loop->flags & OLF_AUTO) && (loop->flags & OLF_INDEPENDENT);
19556 bool noisy = true;
19558 #ifdef ACCEL_COMPILER
19559 /* When device_type is supported, we want the device compiler to be
19560 noisy, if the loop parameters are device_type-specific. */
19561 noisy = false;
19562 #endif
19564 if (assign && outer_mask < GOMP_DIM_MASK (GOMP_DIM_MAX - 1))
19566 /* Allocate the outermost loop at the outermost available
19567 level. */
19568 unsigned this_mask = outer_mask + 1;
19570 if (!(this_mask & loop->inner))
19571 loop->mask = this_mask;
19574 if (loop->child)
19576 unsigned child_mask = outer_mask | loop->mask;
19578 if (loop->mask || assign)
19579 child_mask |= GOMP_DIM_MASK (GOMP_DIM_MAX);
19581 loop->inner = oacc_loop_auto_partitions (loop->child, child_mask);
19584 if (assign && !loop->mask)
19586 /* Allocate the loop at the innermost available level. */
19587 unsigned this_mask = 0;
19589 /* Determine the outermost partitioning used within this loop. */
19590 this_mask = loop->inner | GOMP_DIM_MASK (GOMP_DIM_MAX);
19591 this_mask = least_bit_hwi (this_mask);
19593 /* Pick the partitioning just inside that one. */
19594 this_mask >>= 1;
19596 /* And avoid picking one use by an outer loop. */
19597 this_mask &= ~outer_mask;
19599 if (!this_mask && noisy)
19600 warning_at (loop->loc, 0,
19601 "insufficient partitioning available to parallelize loop");
19603 loop->mask = this_mask;
19606 if (assign && dump_file)
19607 fprintf (dump_file, "Auto loop %s:%d assigned %d\n",
19608 LOCATION_FILE (loop->loc), LOCATION_LINE (loop->loc),
19609 loop->mask);
19611 unsigned inner_mask = 0;
19613 if (loop->sibling)
19614 inner_mask |= oacc_loop_auto_partitions (loop->sibling, outer_mask);
19616 inner_mask |= loop->inner | loop->mask;
19618 return inner_mask;
19621 /* Walk the OpenACC loop heirarchy to check and assign partitioning
19622 axes. Return mask of partitioning. */
19624 static unsigned
19625 oacc_loop_partition (oacc_loop *loop, unsigned outer_mask)
19627 unsigned mask_all = oacc_loop_fixed_partitions (loop, outer_mask);
19629 if (mask_all & GOMP_DIM_MASK (GOMP_DIM_MAX))
19631 mask_all ^= GOMP_DIM_MASK (GOMP_DIM_MAX);
19632 mask_all |= oacc_loop_auto_partitions (loop, outer_mask);
19634 return mask_all;
19637 /* Default fork/join early expander. Delete the function calls if
19638 there is no RTL expander. */
19640 bool
19641 default_goacc_fork_join (gcall *ARG_UNUSED (call),
19642 const int *ARG_UNUSED (dims), bool is_fork)
19644 if (is_fork)
19645 return targetm.have_oacc_fork ();
19646 else
19647 return targetm.have_oacc_join ();
19650 /* Default goacc.reduction early expander.
19652 LHS-opt = IFN_REDUCTION (KIND, RES_PTR, VAR, LEVEL, OP, OFFSET)
19653 If RES_PTR is not integer-zerop:
19654 SETUP - emit 'LHS = *RES_PTR', LHS = NULL
19655 TEARDOWN - emit '*RES_PTR = VAR'
19656 If LHS is not NULL
19657 emit 'LHS = VAR' */
19659 void
19660 default_goacc_reduction (gcall *call)
19662 unsigned code = (unsigned)TREE_INT_CST_LOW (gimple_call_arg (call, 0));
19663 gimple_stmt_iterator gsi = gsi_for_stmt (call);
19664 tree lhs = gimple_call_lhs (call);
19665 tree var = gimple_call_arg (call, 2);
19666 gimple_seq seq = NULL;
19668 if (code == IFN_GOACC_REDUCTION_SETUP
19669 || code == IFN_GOACC_REDUCTION_TEARDOWN)
19671 /* Setup and Teardown need to copy from/to the receiver object,
19672 if there is one. */
19673 tree ref_to_res = gimple_call_arg (call, 1);
19675 if (!integer_zerop (ref_to_res))
19677 tree dst = build_simple_mem_ref (ref_to_res);
19678 tree src = var;
19680 if (code == IFN_GOACC_REDUCTION_SETUP)
19682 src = dst;
19683 dst = lhs;
19684 lhs = NULL;
19686 gimple_seq_add_stmt (&seq, gimple_build_assign (dst, src));
19690 /* Copy VAR to LHS, if there is an LHS. */
19691 if (lhs)
19692 gimple_seq_add_stmt (&seq, gimple_build_assign (lhs, var));
19694 gsi_replace_with_seq (&gsi, seq, true);
19697 /* Main entry point for oacc transformations which run on the device
19698 compiler after LTO, so we know what the target device is at this
19699 point (including the host fallback). */
19701 static unsigned int
19702 execute_oacc_device_lower ()
19704 tree attrs = get_oacc_fn_attrib (current_function_decl);
19706 if (!attrs)
19707 /* Not an offloaded function. */
19708 return 0;
19710 /* Parse the default dim argument exactly once. */
19711 if ((const void *)flag_openacc_dims != &flag_openacc_dims)
19713 oacc_parse_default_dims (flag_openacc_dims);
19714 flag_openacc_dims = (char *)&flag_openacc_dims;
19717 /* Discover, partition and process the loops. */
19718 oacc_loop *loops = oacc_loop_discovery ();
19719 int fn_level = oacc_fn_attrib_level (attrs);
19721 if (dump_file)
19722 fprintf (dump_file, oacc_fn_attrib_kernels_p (attrs)
19723 ? "Function is kernels offload\n"
19724 : fn_level < 0 ? "Function is parallel offload\n"
19725 : "Function is routine level %d\n", fn_level);
19727 unsigned outer_mask = fn_level >= 0 ? GOMP_DIM_MASK (fn_level) - 1 : 0;
19728 unsigned used_mask = oacc_loop_partition (loops, outer_mask);
19729 int dims[GOMP_DIM_MAX];
19731 oacc_validate_dims (current_function_decl, attrs, dims, fn_level, used_mask);
19733 if (dump_file)
19735 const char *comma = "Compute dimensions [";
19736 for (int ix = 0; ix != GOMP_DIM_MAX; ix++, comma = ", ")
19737 fprintf (dump_file, "%s%d", comma, dims[ix]);
19738 fprintf (dump_file, "]\n");
19741 oacc_loop_process (loops);
19742 if (dump_file)
19744 fprintf (dump_file, "OpenACC loops\n");
19745 dump_oacc_loop (dump_file, loops, 0);
19746 fprintf (dump_file, "\n");
19749 /* Offloaded targets may introduce new basic blocks, which require
19750 dominance information to update SSA. */
19751 calculate_dominance_info (CDI_DOMINATORS);
19753 /* Now lower internal loop functions to target-specific code
19754 sequences. */
19755 basic_block bb;
19756 FOR_ALL_BB_FN (bb, cfun)
19757 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
19759 gimple *stmt = gsi_stmt (gsi);
19760 if (!is_gimple_call (stmt))
19762 gsi_next (&gsi);
19763 continue;
19766 gcall *call = as_a <gcall *> (stmt);
19767 if (!gimple_call_internal_p (call))
19769 gsi_next (&gsi);
19770 continue;
19773 /* Rewind to allow rescan. */
19774 gsi_prev (&gsi);
19775 bool rescan = false, remove = false;
19776 enum internal_fn ifn_code = gimple_call_internal_fn (call);
19778 switch (ifn_code)
19780 default: break;
19782 case IFN_GOACC_LOOP:
19783 oacc_xform_loop (call);
19784 rescan = true;
19785 break;
19787 case IFN_GOACC_REDUCTION:
19788 /* Mark the function for SSA renaming. */
19789 mark_virtual_operands_for_renaming (cfun);
19791 /* If the level is -1, this ended up being an unused
19792 axis. Handle as a default. */
19793 if (integer_minus_onep (gimple_call_arg (call, 3)))
19794 default_goacc_reduction (call);
19795 else
19796 targetm.goacc.reduction (call);
19797 rescan = true;
19798 break;
19800 case IFN_UNIQUE:
19802 enum ifn_unique_kind kind
19803 = ((enum ifn_unique_kind)
19804 TREE_INT_CST_LOW (gimple_call_arg (call, 0)));
19806 switch (kind)
19808 default:
19809 gcc_unreachable ();
19811 case IFN_UNIQUE_OACC_FORK:
19812 case IFN_UNIQUE_OACC_JOIN:
19813 if (integer_minus_onep (gimple_call_arg (call, 2)))
19814 remove = true;
19815 else if (!targetm.goacc.fork_join
19816 (call, dims, kind == IFN_UNIQUE_OACC_FORK))
19817 remove = true;
19818 break;
19820 case IFN_UNIQUE_OACC_HEAD_MARK:
19821 case IFN_UNIQUE_OACC_TAIL_MARK:
19822 remove = true;
19823 break;
19825 break;
19829 if (gsi_end_p (gsi))
19830 /* We rewound past the beginning of the BB. */
19831 gsi = gsi_start_bb (bb);
19832 else
19833 /* Undo the rewind. */
19834 gsi_next (&gsi);
19836 if (remove)
19838 if (gimple_vdef (call))
19839 replace_uses_by (gimple_vdef (call), gimple_vuse (call));
19840 if (gimple_call_lhs (call))
19842 /* Propagate the data dependency var. */
19843 gimple *ass = gimple_build_assign (gimple_call_lhs (call),
19844 gimple_call_arg (call, 1));
19845 gsi_replace (&gsi, ass, false);
19847 else
19848 gsi_remove (&gsi, true);
19850 else if (!rescan)
19851 /* If not rescanning, advance over the call. */
19852 gsi_next (&gsi);
19855 free_oacc_loop (loops);
19857 return 0;
19860 /* Default launch dimension validator. Force everything to 1. A
19861 backend that wants to provide larger dimensions must override this
19862 hook. */
19864 bool
19865 default_goacc_validate_dims (tree ARG_UNUSED (decl), int *dims,
19866 int ARG_UNUSED (fn_level))
19868 bool changed = false;
19870 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
19872 if (dims[ix] != 1)
19874 dims[ix] = 1;
19875 changed = true;
19879 return changed;
19882 /* Default dimension bound is unknown on accelerator and 1 on host. */
19885 default_goacc_dim_limit (int ARG_UNUSED (axis))
19887 #ifdef ACCEL_COMPILER
19888 return 0;
19889 #else
19890 return 1;
19891 #endif
19894 namespace {
19896 const pass_data pass_data_oacc_device_lower =
19898 GIMPLE_PASS, /* type */
19899 "oaccdevlow", /* name */
19900 OPTGROUP_NONE, /* optinfo_flags */
19901 TV_NONE, /* tv_id */
19902 PROP_cfg, /* properties_required */
19903 0 /* Possibly PROP_gimple_eomp. */, /* properties_provided */
19904 0, /* properties_destroyed */
19905 0, /* todo_flags_start */
19906 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
19909 class pass_oacc_device_lower : public gimple_opt_pass
19911 public:
19912 pass_oacc_device_lower (gcc::context *ctxt)
19913 : gimple_opt_pass (pass_data_oacc_device_lower, ctxt)
19916 /* opt_pass methods: */
19917 virtual bool gate (function *) { return flag_openacc; };
19919 virtual unsigned int execute (function *)
19921 return execute_oacc_device_lower ();
19924 }; // class pass_oacc_device_lower
19926 } // anon namespace
19928 gimple_opt_pass *
19929 make_pass_oacc_device_lower (gcc::context *ctxt)
19931 return new pass_oacc_device_lower (ctxt);
19934 /* "omp declare target link" handling pass. */
19936 namespace {
19938 const pass_data pass_data_omp_target_link =
19940 GIMPLE_PASS, /* type */
19941 "omptargetlink", /* name */
19942 OPTGROUP_NONE, /* optinfo_flags */
19943 TV_NONE, /* tv_id */
19944 PROP_ssa, /* properties_required */
19945 0, /* properties_provided */
19946 0, /* properties_destroyed */
19947 0, /* todo_flags_start */
19948 TODO_update_ssa, /* todo_flags_finish */
19951 class pass_omp_target_link : public gimple_opt_pass
19953 public:
19954 pass_omp_target_link (gcc::context *ctxt)
19955 : gimple_opt_pass (pass_data_omp_target_link, ctxt)
19958 /* opt_pass methods: */
19959 virtual bool gate (function *fun)
19961 #ifdef ACCEL_COMPILER
19962 tree attrs = DECL_ATTRIBUTES (fun->decl);
19963 return lookup_attribute ("omp declare target", attrs)
19964 || lookup_attribute ("omp target entrypoint", attrs);
19965 #else
19966 (void) fun;
19967 return false;
19968 #endif
19971 virtual unsigned execute (function *);
19974 /* Callback for walk_gimple_stmt used to scan for link var operands. */
19976 static tree
19977 find_link_var_op (tree *tp, int *walk_subtrees, void *)
19979 tree t = *tp;
19981 if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)
19982 && lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (t)))
19984 *walk_subtrees = 0;
19985 return t;
19988 return NULL_TREE;
19991 unsigned
19992 pass_omp_target_link::execute (function *fun)
19994 basic_block bb;
19995 FOR_EACH_BB_FN (bb, fun)
19997 gimple_stmt_iterator gsi;
19998 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
19999 if (walk_gimple_stmt (&gsi, NULL, find_link_var_op, NULL))
20000 gimple_regimplify_operands (gsi_stmt (gsi), &gsi);
20003 return 0;
20006 } // anon namespace
20008 gimple_opt_pass *
20009 make_pass_omp_target_link (gcc::context *ctxt)
20011 return new pass_omp_target_link (ctxt);
20014 #include "gt-omp-low.h"